Use getters/setters to access ARM branch type
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851
1852 struct elf32_arm_reloc_map
1853 {
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1856 };
1857
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860 {
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951 };
1952
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1956 {
1957 unsigned int i;
1958
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962
1963 return NULL;
1964 }
1965
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 const char *r_name)
1969 {
1970 unsigned int i;
1971
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1976
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1981
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1986
1987 return NULL;
1988 }
1989
1990 /* Support for core dump NOTE sections. */
1991
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995 int offset;
1996 size_t size;
1997
1998 switch (note->descsz)
1999 {
2000 default:
2001 return FALSE;
2002
2003 case 148: /* Linux/ARM 32-bit. */
2004 /* pr_cursig */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006
2007 /* pr_pid */
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009
2010 /* pr_reg */
2011 offset = 72;
2012 size = 72;
2013
2014 break;
2015 }
2016
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2020 }
2021
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025 switch (note->descsz)
2026 {
2027 default:
2028 return FALSE;
2029
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037 }
2038
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2042 {
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2045
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 int note_type, ...)
2056 {
2057 switch (note_type)
2058 {
2059 default:
2060 return NULL;
2061
2062 case NT_PRPSINFO:
2063 {
2064 char data[124];
2065 va_list ap;
2066
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2071 va_end (ap);
2072
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2075 }
2076
2077 case NT_PRSTATUS:
2078 {
2079 char data[148];
2080 va_list ap;
2081 long pid;
2082 int cursig;
2083 const void *greg;
2084
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2093 va_end (ap);
2094
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2097 }
2098 }
2099 }
2100
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2105
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2109
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114 interworkable. */
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2119
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2126
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2129
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2132
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2135
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2138
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2140
2141 /* The name of the dynamic interpreter. This is put in the .interp
2142 section. */
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2144
2145 static const unsigned long tls_trampoline [] =
2146 {
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2150 };
2151
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164
2165 #ifdef FOUR_WORD_PLT
2166
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2170 linker first. */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2177 };
2178
2179 /* Subsequent entries in a procedure linkage table look like
2180 this. */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2187 };
2188
2189 #else /* not FOUR_WORD_PLT */
2190
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2194 linker first. */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2202 };
2203
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2211 };
2212
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224
2225 #endif /* not FOUR_WORD_PLT */
2226
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2236 /* add lr, pc */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2239 };
2240
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242 look like this. */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2251 /* nop */
2252 };
2253
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2262 };
2263
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2273 };
2274
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2284 };
2285
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290 0x4778, /* bx pc */
2291 0x46c0 /* nop */
2292 };
2293
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295 address spaces. */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2300 };
2301
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2305 linker first. */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308 /* First bundle: */
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2318 /* Third bundle: */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2322 /* .Lplt_tail: */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2331
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2339 };
2340
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349
2350 enum stub_insn_type
2351 {
2352 THUMB16_TYPE = 1,
2353 THUMB32_TYPE,
2354 ARM_TYPE,
2355 DATA_TYPE
2356 };
2357
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2367
2368 typedef struct
2369 {
2370 bfd_vma data;
2371 enum stub_insn_type type;
2372 unsigned int r_type;
2373 int reloc_addend;
2374 } insn_sequence;
2375
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2379 {
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2382 };
2383
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2385 available. */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2387 {
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2391 };
2392
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2395 {
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2403 };
2404
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2406 allowed. */
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2408 {
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2414 };
2415
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2417 available. */
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2419 {
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2424 };
2425
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2429 {
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2433 };
2434
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2438 {
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2442 };
2443
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2447 ARMv7). */
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2449 {
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2454 };
2455
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2458 {
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2463 };
2464
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2467 {
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2473 };
2474
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2476 architectures. */
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2478 {
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2486 };
2487
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2489 allowed. */
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2491 {
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2498 };
2499
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2503 {
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2507 };
2508
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2512 {
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2518 };
2519
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2522 {
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2531 };
2532
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2535 {
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2544 };
2545
2546
2547 /* Cortex-A8 erratum-workaround stubs. */
2548
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2551
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2553 {
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2557 };
2558
2559 /* Stub used for b.w and bl.w instructions. */
2560
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2562 {
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2564 };
2565
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2567 {
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2569 };
2570
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2574
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2576 {
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2578 };
2579
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2583 applied.
2584
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2588
2589 const char * stubborn_problems[] = { "np" };
2590
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2592 section called:
2593
2594 .data.rel.local.stubborn_problems
2595
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2597
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2600 continue;
2601
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2604 C identifier. */
2605 #define STUB_SUFFIX ".__stub"
2606
2607 /* One entry per long/short branch stub defined above. */
2608 #define DEF_STUBS \
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2629
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2632 {
2633 arm_stub_none,
2634 DEF_STUBS
2635 };
2636 #undef DEF_STUB
2637
2638 /* Note the first a8_veneer type. */
2639 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2640
2641 typedef struct
2642 {
2643 const insn_sequence* template_sequence;
2644 int template_size;
2645 } stub_def;
2646
2647 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2648 static const stub_def stub_definitions[] =
2649 {
2650 {NULL, 0},
2651 DEF_STUBS
2652 };
2653
2654 struct elf32_arm_stub_hash_entry
2655 {
2656 /* Base hash table entry structure. */
2657 struct bfd_hash_entry root;
2658
2659 /* The stub section. */
2660 asection *stub_sec;
2661
2662 /* Offset within stub_sec of the beginning of this stub. */
2663 bfd_vma stub_offset;
2664
2665 /* Given the symbol's value and its section we can determine its final
2666 value when building the stubs (so the stub knows where to jump). */
2667 bfd_vma target_value;
2668 asection *target_section;
2669
2670 /* Same as above but for the source of the branch to the stub. Used for
2671 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2672 such, source section does not need to be recorded since Cortex-A8 erratum
2673 workaround stubs are only generated when both source and target are in the
2674 same section. */
2675 bfd_vma source_value;
2676
2677 /* The instruction which caused this stub to be generated (only valid for
2678 Cortex-A8 erratum workaround stubs at present). */
2679 unsigned long orig_insn;
2680
2681 /* The stub type. */
2682 enum elf32_arm_stub_type stub_type;
2683 /* Its encoding size in bytes. */
2684 int stub_size;
2685 /* Its template. */
2686 const insn_sequence *stub_template;
2687 /* The size of the template (number of entries). */
2688 int stub_template_size;
2689
2690 /* The symbol table entry, if any, that this was derived from. */
2691 struct elf32_arm_link_hash_entry *h;
2692
2693 /* Type of branch. */
2694 enum arm_st_branch_type branch_type;
2695
2696 /* Where this stub is being called from, or, in the case of combined
2697 stub sections, the first input section in the group. */
2698 asection *id_sec;
2699
2700 /* The name for the local symbol at the start of this stub. The
2701 stub name in the hash table has to be unique; this does not, so
2702 it can be friendlier. */
2703 char *output_name;
2704 };
2705
2706 /* Used to build a map of a section. This is required for mixed-endian
2707 code/data. */
2708
2709 typedef struct elf32_elf_section_map
2710 {
2711 bfd_vma vma;
2712 char type;
2713 }
2714 elf32_arm_section_map;
2715
2716 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2717
2718 typedef enum
2719 {
2720 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2721 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2722 VFP11_ERRATUM_ARM_VENEER,
2723 VFP11_ERRATUM_THUMB_VENEER
2724 }
2725 elf32_vfp11_erratum_type;
2726
2727 typedef struct elf32_vfp11_erratum_list
2728 {
2729 struct elf32_vfp11_erratum_list *next;
2730 bfd_vma vma;
2731 union
2732 {
2733 struct
2734 {
2735 struct elf32_vfp11_erratum_list *veneer;
2736 unsigned int vfp_insn;
2737 } b;
2738 struct
2739 {
2740 struct elf32_vfp11_erratum_list *branch;
2741 unsigned int id;
2742 } v;
2743 } u;
2744 elf32_vfp11_erratum_type type;
2745 }
2746 elf32_vfp11_erratum_list;
2747
2748 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2749 veneer. */
2750 typedef enum
2751 {
2752 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2753 STM32L4XX_ERRATUM_VENEER
2754 }
2755 elf32_stm32l4xx_erratum_type;
2756
2757 typedef struct elf32_stm32l4xx_erratum_list
2758 {
2759 struct elf32_stm32l4xx_erratum_list *next;
2760 bfd_vma vma;
2761 union
2762 {
2763 struct
2764 {
2765 struct elf32_stm32l4xx_erratum_list *veneer;
2766 unsigned int insn;
2767 } b;
2768 struct
2769 {
2770 struct elf32_stm32l4xx_erratum_list *branch;
2771 unsigned int id;
2772 } v;
2773 } u;
2774 elf32_stm32l4xx_erratum_type type;
2775 }
2776 elf32_stm32l4xx_erratum_list;
2777
2778 typedef enum
2779 {
2780 DELETE_EXIDX_ENTRY,
2781 INSERT_EXIDX_CANTUNWIND_AT_END
2782 }
2783 arm_unwind_edit_type;
2784
2785 /* A (sorted) list of edits to apply to an unwind table. */
2786 typedef struct arm_unwind_table_edit
2787 {
2788 arm_unwind_edit_type type;
2789 /* Note: we sometimes want to insert an unwind entry corresponding to a
2790 section different from the one we're currently writing out, so record the
2791 (text) section this edit relates to here. */
2792 asection *linked_section;
2793 unsigned int index;
2794 struct arm_unwind_table_edit *next;
2795 }
2796 arm_unwind_table_edit;
2797
2798 typedef struct _arm_elf_section_data
2799 {
2800 /* Information about mapping symbols. */
2801 struct bfd_elf_section_data elf;
2802 unsigned int mapcount;
2803 unsigned int mapsize;
2804 elf32_arm_section_map *map;
2805 /* Information about CPU errata. */
2806 unsigned int erratumcount;
2807 elf32_vfp11_erratum_list *erratumlist;
2808 unsigned int stm32l4xx_erratumcount;
2809 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2810 unsigned int additional_reloc_count;
2811 /* Information about unwind tables. */
2812 union
2813 {
2814 /* Unwind info attached to a text section. */
2815 struct
2816 {
2817 asection *arm_exidx_sec;
2818 } text;
2819
2820 /* Unwind info attached to an .ARM.exidx section. */
2821 struct
2822 {
2823 arm_unwind_table_edit *unwind_edit_list;
2824 arm_unwind_table_edit *unwind_edit_tail;
2825 } exidx;
2826 } u;
2827 }
2828 _arm_elf_section_data;
2829
2830 #define elf32_arm_section_data(sec) \
2831 ((_arm_elf_section_data *) elf_section_data (sec))
2832
2833 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2834 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2835 so may be created multiple times: we use an array of these entries whilst
2836 relaxing which we can refresh easily, then create stubs for each potentially
2837 erratum-triggering instruction once we've settled on a solution. */
2838
2839 struct a8_erratum_fix
2840 {
2841 bfd *input_bfd;
2842 asection *section;
2843 bfd_vma offset;
2844 bfd_vma target_offset;
2845 unsigned long orig_insn;
2846 char *stub_name;
2847 enum elf32_arm_stub_type stub_type;
2848 enum arm_st_branch_type branch_type;
2849 };
2850
2851 /* A table of relocs applied to branches which might trigger Cortex-A8
2852 erratum. */
2853
2854 struct a8_erratum_reloc
2855 {
2856 bfd_vma from;
2857 bfd_vma destination;
2858 struct elf32_arm_link_hash_entry *hash;
2859 const char *sym_name;
2860 unsigned int r_type;
2861 enum arm_st_branch_type branch_type;
2862 bfd_boolean non_a8_stub;
2863 };
2864
2865 /* The size of the thread control block. */
2866 #define TCB_SIZE 8
2867
2868 /* ARM-specific information about a PLT entry, over and above the usual
2869 gotplt_union. */
2870 struct arm_plt_info
2871 {
2872 /* We reference count Thumb references to a PLT entry separately,
2873 so that we can emit the Thumb trampoline only if needed. */
2874 bfd_signed_vma thumb_refcount;
2875
2876 /* Some references from Thumb code may be eliminated by BL->BLX
2877 conversion, so record them separately. */
2878 bfd_signed_vma maybe_thumb_refcount;
2879
2880 /* How many of the recorded PLT accesses were from non-call relocations.
2881 This information is useful when deciding whether anything takes the
2882 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2883 non-call references to the function should resolve directly to the
2884 real runtime target. */
2885 unsigned int noncall_refcount;
2886
2887 /* Since PLT entries have variable size if the Thumb prologue is
2888 used, we need to record the index into .got.plt instead of
2889 recomputing it from the PLT offset. */
2890 bfd_signed_vma got_offset;
2891 };
2892
2893 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2894 struct arm_local_iplt_info
2895 {
2896 /* The information that is usually found in the generic ELF part of
2897 the hash table entry. */
2898 union gotplt_union root;
2899
2900 /* The information that is usually found in the ARM-specific part of
2901 the hash table entry. */
2902 struct arm_plt_info arm;
2903
2904 /* A list of all potential dynamic relocations against this symbol. */
2905 struct elf_dyn_relocs *dyn_relocs;
2906 };
2907
2908 struct elf_arm_obj_tdata
2909 {
2910 struct elf_obj_tdata root;
2911
2912 /* tls_type for each local got entry. */
2913 char *local_got_tls_type;
2914
2915 /* GOTPLT entries for TLS descriptors. */
2916 bfd_vma *local_tlsdesc_gotent;
2917
2918 /* Information for local symbols that need entries in .iplt. */
2919 struct arm_local_iplt_info **local_iplt;
2920
2921 /* Zero to warn when linking objects with incompatible enum sizes. */
2922 int no_enum_size_warning;
2923
2924 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2925 int no_wchar_size_warning;
2926 };
2927
2928 #define elf_arm_tdata(bfd) \
2929 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2930
2931 #define elf32_arm_local_got_tls_type(bfd) \
2932 (elf_arm_tdata (bfd)->local_got_tls_type)
2933
2934 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2935 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2936
2937 #define elf32_arm_local_iplt(bfd) \
2938 (elf_arm_tdata (bfd)->local_iplt)
2939
2940 #define is_arm_elf(bfd) \
2941 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2942 && elf_tdata (bfd) != NULL \
2943 && elf_object_id (bfd) == ARM_ELF_DATA)
2944
2945 static bfd_boolean
2946 elf32_arm_mkobject (bfd *abfd)
2947 {
2948 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2949 ARM_ELF_DATA);
2950 }
2951
2952 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2953
2954 /* Arm ELF linker hash entry. */
2955 struct elf32_arm_link_hash_entry
2956 {
2957 struct elf_link_hash_entry root;
2958
2959 /* Track dynamic relocs copied for this symbol. */
2960 struct elf_dyn_relocs *dyn_relocs;
2961
2962 /* ARM-specific PLT information. */
2963 struct arm_plt_info plt;
2964
2965 #define GOT_UNKNOWN 0
2966 #define GOT_NORMAL 1
2967 #define GOT_TLS_GD 2
2968 #define GOT_TLS_IE 4
2969 #define GOT_TLS_GDESC 8
2970 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2971 unsigned int tls_type : 8;
2972
2973 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2974 unsigned int is_iplt : 1;
2975
2976 unsigned int unused : 23;
2977
2978 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2979 starting at the end of the jump table. */
2980 bfd_vma tlsdesc_got;
2981
2982 /* The symbol marking the real symbol location for exported thumb
2983 symbols with Arm stubs. */
2984 struct elf_link_hash_entry *export_glue;
2985
2986 /* A pointer to the most recently used stub hash entry against this
2987 symbol. */
2988 struct elf32_arm_stub_hash_entry *stub_cache;
2989 };
2990
2991 /* Traverse an arm ELF linker hash table. */
2992 #define elf32_arm_link_hash_traverse(table, func, info) \
2993 (elf_link_hash_traverse \
2994 (&(table)->root, \
2995 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2996 (info)))
2997
2998 /* Get the ARM elf linker hash table from a link_info structure. */
2999 #define elf32_arm_hash_table(info) \
3000 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3001 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3002
3003 #define arm_stub_hash_lookup(table, string, create, copy) \
3004 ((struct elf32_arm_stub_hash_entry *) \
3005 bfd_hash_lookup ((table), (string), (create), (copy)))
3006
3007 /* Array to keep track of which stub sections have been created, and
3008 information on stub grouping. */
3009 struct map_stub
3010 {
3011 /* This is the section to which stubs in the group will be
3012 attached. */
3013 asection *link_sec;
3014 /* The stub section. */
3015 asection *stub_sec;
3016 };
3017
3018 #define elf32_arm_compute_jump_table_size(htab) \
3019 ((htab)->next_tls_desc_index * 4)
3020
3021 /* ARM ELF linker hash table. */
3022 struct elf32_arm_link_hash_table
3023 {
3024 /* The main hash table. */
3025 struct elf_link_hash_table root;
3026
3027 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3028 bfd_size_type thumb_glue_size;
3029
3030 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3031 bfd_size_type arm_glue_size;
3032
3033 /* The size in bytes of section containing the ARMv4 BX veneers. */
3034 bfd_size_type bx_glue_size;
3035
3036 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3037 veneer has been populated. */
3038 bfd_vma bx_glue_offset[15];
3039
3040 /* The size in bytes of the section containing glue for VFP11 erratum
3041 veneers. */
3042 bfd_size_type vfp11_erratum_glue_size;
3043
3044 /* The size in bytes of the section containing glue for STM32L4XX erratum
3045 veneers. */
3046 bfd_size_type stm32l4xx_erratum_glue_size;
3047
3048 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3049 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3050 elf32_arm_write_section(). */
3051 struct a8_erratum_fix *a8_erratum_fixes;
3052 unsigned int num_a8_erratum_fixes;
3053
3054 /* An arbitrary input BFD chosen to hold the glue sections. */
3055 bfd * bfd_of_glue_owner;
3056
3057 /* Nonzero to output a BE8 image. */
3058 int byteswap_code;
3059
3060 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3061 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3062 int target1_is_rel;
3063
3064 /* The relocation to use for R_ARM_TARGET2 relocations. */
3065 int target2_reloc;
3066
3067 /* 0 = Ignore R_ARM_V4BX.
3068 1 = Convert BX to MOV PC.
3069 2 = Generate v4 interworing stubs. */
3070 int fix_v4bx;
3071
3072 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3073 int fix_cortex_a8;
3074
3075 /* Whether we should fix the ARM1176 BLX immediate issue. */
3076 int fix_arm1176;
3077
3078 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3079 int use_blx;
3080
3081 /* What sort of code sequences we should look for which may trigger the
3082 VFP11 denorm erratum. */
3083 bfd_arm_vfp11_fix vfp11_fix;
3084
3085 /* Global counter for the number of fixes we have emitted. */
3086 int num_vfp11_fixes;
3087
3088 /* What sort of code sequences we should look for which may trigger the
3089 STM32L4XX erratum. */
3090 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3091
3092 /* Global counter for the number of fixes we have emitted. */
3093 int num_stm32l4xx_fixes;
3094
3095 /* Nonzero to force PIC branch veneers. */
3096 int pic_veneer;
3097
3098 /* The number of bytes in the initial entry in the PLT. */
3099 bfd_size_type plt_header_size;
3100
3101 /* The number of bytes in the subsequent PLT etries. */
3102 bfd_size_type plt_entry_size;
3103
3104 /* True if the target system is VxWorks. */
3105 int vxworks_p;
3106
3107 /* True if the target system is Symbian OS. */
3108 int symbian_p;
3109
3110 /* True if the target system is Native Client. */
3111 int nacl_p;
3112
3113 /* True if the target uses REL relocations. */
3114 int use_rel;
3115
3116 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3117 bfd_vma next_tls_desc_index;
3118
3119 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3120 bfd_vma num_tls_desc;
3121
3122 /* Short-cuts to get to dynamic linker sections. */
3123 asection *sdynbss;
3124 asection *srelbss;
3125
3126 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3127 asection *srelplt2;
3128
3129 /* The offset into splt of the PLT entry for the TLS descriptor
3130 resolver. Special values are 0, if not necessary (or not found
3131 to be necessary yet), and -1 if needed but not determined
3132 yet. */
3133 bfd_vma dt_tlsdesc_plt;
3134
3135 /* The offset into sgot of the GOT entry used by the PLT entry
3136 above. */
3137 bfd_vma dt_tlsdesc_got;
3138
3139 /* Offset in .plt section of tls_arm_trampoline. */
3140 bfd_vma tls_trampoline;
3141
3142 /* Data for R_ARM_TLS_LDM32 relocations. */
3143 union
3144 {
3145 bfd_signed_vma refcount;
3146 bfd_vma offset;
3147 } tls_ldm_got;
3148
3149 /* Small local sym cache. */
3150 struct sym_cache sym_cache;
3151
3152 /* For convenience in allocate_dynrelocs. */
3153 bfd * obfd;
3154
3155 /* The amount of space used by the reserved portion of the sgotplt
3156 section, plus whatever space is used by the jump slots. */
3157 bfd_vma sgotplt_jump_table_size;
3158
3159 /* The stub hash table. */
3160 struct bfd_hash_table stub_hash_table;
3161
3162 /* Linker stub bfd. */
3163 bfd *stub_bfd;
3164
3165 /* Linker call-backs. */
3166 asection * (*add_stub_section) (const char *, asection *, asection *,
3167 unsigned int);
3168 void (*layout_sections_again) (void);
3169
3170 /* Array to keep track of which stub sections have been created, and
3171 information on stub grouping. */
3172 struct map_stub *stub_group;
3173
3174 /* Number of elements in stub_group. */
3175 unsigned int top_id;
3176
3177 /* Assorted information used by elf32_arm_size_stubs. */
3178 unsigned int bfd_count;
3179 unsigned int top_index;
3180 asection **input_list;
3181 };
3182
3183 static inline int
3184 ctz (unsigned int mask)
3185 {
3186 #if GCC_VERSION >= 3004
3187 return __builtin_ctz (mask);
3188 #else
3189 unsigned int i;
3190
3191 for (i = 0; i < 8 * sizeof (mask); i++)
3192 {
3193 if (mask & 0x1)
3194 break;
3195 mask = (mask >> 1);
3196 }
3197 return i;
3198 #endif
3199 }
3200
3201 static inline int
3202 popcount (unsigned int mask)
3203 {
3204 #if GCC_VERSION >= 3004
3205 return __builtin_popcount (mask);
3206 #else
3207 unsigned int i, sum = 0;
3208
3209 for (i = 0; i < 8 * sizeof (mask); i++)
3210 {
3211 if (mask & 0x1)
3212 sum++;
3213 mask = (mask >> 1);
3214 }
3215 return sum;
3216 #endif
3217 }
3218
3219 /* Create an entry in an ARM ELF linker hash table. */
3220
3221 static struct bfd_hash_entry *
3222 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3223 struct bfd_hash_table * table,
3224 const char * string)
3225 {
3226 struct elf32_arm_link_hash_entry * ret =
3227 (struct elf32_arm_link_hash_entry *) entry;
3228
3229 /* Allocate the structure if it has not already been allocated by a
3230 subclass. */
3231 if (ret == NULL)
3232 ret = (struct elf32_arm_link_hash_entry *)
3233 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3234 if (ret == NULL)
3235 return (struct bfd_hash_entry *) ret;
3236
3237 /* Call the allocation method of the superclass. */
3238 ret = ((struct elf32_arm_link_hash_entry *)
3239 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3240 table, string));
3241 if (ret != NULL)
3242 {
3243 ret->dyn_relocs = NULL;
3244 ret->tls_type = GOT_UNKNOWN;
3245 ret->tlsdesc_got = (bfd_vma) -1;
3246 ret->plt.thumb_refcount = 0;
3247 ret->plt.maybe_thumb_refcount = 0;
3248 ret->plt.noncall_refcount = 0;
3249 ret->plt.got_offset = -1;
3250 ret->is_iplt = FALSE;
3251 ret->export_glue = NULL;
3252
3253 ret->stub_cache = NULL;
3254 }
3255
3256 return (struct bfd_hash_entry *) ret;
3257 }
3258
3259 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3260 symbols. */
3261
3262 static bfd_boolean
3263 elf32_arm_allocate_local_sym_info (bfd *abfd)
3264 {
3265 if (elf_local_got_refcounts (abfd) == NULL)
3266 {
3267 bfd_size_type num_syms;
3268 bfd_size_type size;
3269 char *data;
3270
3271 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3272 size = num_syms * (sizeof (bfd_signed_vma)
3273 + sizeof (struct arm_local_iplt_info *)
3274 + sizeof (bfd_vma)
3275 + sizeof (char));
3276 data = bfd_zalloc (abfd, size);
3277 if (data == NULL)
3278 return FALSE;
3279
3280 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3281 data += num_syms * sizeof (bfd_signed_vma);
3282
3283 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3284 data += num_syms * sizeof (struct arm_local_iplt_info *);
3285
3286 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3287 data += num_syms * sizeof (bfd_vma);
3288
3289 elf32_arm_local_got_tls_type (abfd) = data;
3290 }
3291 return TRUE;
3292 }
3293
3294 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3295 to input bfd ABFD. Create the information if it doesn't already exist.
3296 Return null if an allocation fails. */
3297
3298 static struct arm_local_iplt_info *
3299 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3300 {
3301 struct arm_local_iplt_info **ptr;
3302
3303 if (!elf32_arm_allocate_local_sym_info (abfd))
3304 return NULL;
3305
3306 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3307 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3308 if (*ptr == NULL)
3309 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3310 return *ptr;
3311 }
3312
3313 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3314 in ABFD's symbol table. If the symbol is global, H points to its
3315 hash table entry, otherwise H is null.
3316
3317 Return true if the symbol does have PLT information. When returning
3318 true, point *ROOT_PLT at the target-independent reference count/offset
3319 union and *ARM_PLT at the ARM-specific information. */
3320
3321 static bfd_boolean
3322 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3323 unsigned long r_symndx, union gotplt_union **root_plt,
3324 struct arm_plt_info **arm_plt)
3325 {
3326 struct arm_local_iplt_info *local_iplt;
3327
3328 if (h != NULL)
3329 {
3330 *root_plt = &h->root.plt;
3331 *arm_plt = &h->plt;
3332 return TRUE;
3333 }
3334
3335 if (elf32_arm_local_iplt (abfd) == NULL)
3336 return FALSE;
3337
3338 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3339 if (local_iplt == NULL)
3340 return FALSE;
3341
3342 *root_plt = &local_iplt->root;
3343 *arm_plt = &local_iplt->arm;
3344 return TRUE;
3345 }
3346
3347 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3348 before it. */
3349
3350 static bfd_boolean
3351 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3352 struct arm_plt_info *arm_plt)
3353 {
3354 struct elf32_arm_link_hash_table *htab;
3355
3356 htab = elf32_arm_hash_table (info);
3357 return (arm_plt->thumb_refcount != 0
3358 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3359 }
3360
3361 /* Return a pointer to the head of the dynamic reloc list that should
3362 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3363 ABFD's symbol table. Return null if an error occurs. */
3364
3365 static struct elf_dyn_relocs **
3366 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3367 Elf_Internal_Sym *isym)
3368 {
3369 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3370 {
3371 struct arm_local_iplt_info *local_iplt;
3372
3373 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3374 if (local_iplt == NULL)
3375 return NULL;
3376 return &local_iplt->dyn_relocs;
3377 }
3378 else
3379 {
3380 /* Track dynamic relocs needed for local syms too.
3381 We really need local syms available to do this
3382 easily. Oh well. */
3383 asection *s;
3384 void *vpp;
3385
3386 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3387 if (s == NULL)
3388 abort ();
3389
3390 vpp = &elf_section_data (s)->local_dynrel;
3391 return (struct elf_dyn_relocs **) vpp;
3392 }
3393 }
3394
3395 /* Initialize an entry in the stub hash table. */
3396
3397 static struct bfd_hash_entry *
3398 stub_hash_newfunc (struct bfd_hash_entry *entry,
3399 struct bfd_hash_table *table,
3400 const char *string)
3401 {
3402 /* Allocate the structure if it has not already been allocated by a
3403 subclass. */
3404 if (entry == NULL)
3405 {
3406 entry = (struct bfd_hash_entry *)
3407 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3408 if (entry == NULL)
3409 return entry;
3410 }
3411
3412 /* Call the allocation method of the superclass. */
3413 entry = bfd_hash_newfunc (entry, table, string);
3414 if (entry != NULL)
3415 {
3416 struct elf32_arm_stub_hash_entry *eh;
3417
3418 /* Initialize the local fields. */
3419 eh = (struct elf32_arm_stub_hash_entry *) entry;
3420 eh->stub_sec = NULL;
3421 eh->stub_offset = 0;
3422 eh->source_value = 0;
3423 eh->target_value = 0;
3424 eh->target_section = NULL;
3425 eh->orig_insn = 0;
3426 eh->stub_type = arm_stub_none;
3427 eh->stub_size = 0;
3428 eh->stub_template = NULL;
3429 eh->stub_template_size = 0;
3430 eh->h = NULL;
3431 eh->id_sec = NULL;
3432 eh->output_name = NULL;
3433 }
3434
3435 return entry;
3436 }
3437
3438 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3439 shortcuts to them in our hash table. */
3440
3441 static bfd_boolean
3442 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3443 {
3444 struct elf32_arm_link_hash_table *htab;
3445
3446 htab = elf32_arm_hash_table (info);
3447 if (htab == NULL)
3448 return FALSE;
3449
3450 /* BPABI objects never have a GOT, or associated sections. */
3451 if (htab->symbian_p)
3452 return TRUE;
3453
3454 if (! _bfd_elf_create_got_section (dynobj, info))
3455 return FALSE;
3456
3457 return TRUE;
3458 }
3459
3460 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3461
3462 static bfd_boolean
3463 create_ifunc_sections (struct bfd_link_info *info)
3464 {
3465 struct elf32_arm_link_hash_table *htab;
3466 const struct elf_backend_data *bed;
3467 bfd *dynobj;
3468 asection *s;
3469 flagword flags;
3470
3471 htab = elf32_arm_hash_table (info);
3472 dynobj = htab->root.dynobj;
3473 bed = get_elf_backend_data (dynobj);
3474 flags = bed->dynamic_sec_flags;
3475
3476 if (htab->root.iplt == NULL)
3477 {
3478 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3479 flags | SEC_READONLY | SEC_CODE);
3480 if (s == NULL
3481 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3482 return FALSE;
3483 htab->root.iplt = s;
3484 }
3485
3486 if (htab->root.irelplt == NULL)
3487 {
3488 s = bfd_make_section_anyway_with_flags (dynobj,
3489 RELOC_SECTION (htab, ".iplt"),
3490 flags | SEC_READONLY);
3491 if (s == NULL
3492 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3493 return FALSE;
3494 htab->root.irelplt = s;
3495 }
3496
3497 if (htab->root.igotplt == NULL)
3498 {
3499 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3500 if (s == NULL
3501 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3502 return FALSE;
3503 htab->root.igotplt = s;
3504 }
3505 return TRUE;
3506 }
3507
3508 /* Determine if we're dealing with a Thumb only architecture. */
3509
3510 static bfd_boolean
3511 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3512 {
3513 int arch;
3514 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3515 Tag_CPU_arch_profile);
3516
3517 if (profile)
3518 return profile == 'M';
3519
3520 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3521
3522 if (arch == TAG_CPU_ARCH_V6_M
3523 || arch == TAG_CPU_ARCH_V6S_M
3524 || arch == TAG_CPU_ARCH_V7E_M
3525 || arch == TAG_CPU_ARCH_V8M_BASE
3526 || arch == TAG_CPU_ARCH_V8M_MAIN)
3527 return TRUE;
3528
3529 return FALSE;
3530 }
3531
3532 /* Determine if we're dealing with a Thumb-2 object. */
3533
3534 static bfd_boolean
3535 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3536 {
3537 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3538 Tag_CPU_arch);
3539 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3540 }
3541
3542 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3543 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3544 hash table. */
3545
3546 static bfd_boolean
3547 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3548 {
3549 struct elf32_arm_link_hash_table *htab;
3550
3551 htab = elf32_arm_hash_table (info);
3552 if (htab == NULL)
3553 return FALSE;
3554
3555 if (!htab->root.sgot && !create_got_section (dynobj, info))
3556 return FALSE;
3557
3558 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3559 return FALSE;
3560
3561 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3562 if (!bfd_link_pic (info))
3563 htab->srelbss = bfd_get_linker_section (dynobj,
3564 RELOC_SECTION (htab, ".bss"));
3565
3566 if (htab->vxworks_p)
3567 {
3568 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3569 return FALSE;
3570
3571 if (bfd_link_pic (info))
3572 {
3573 htab->plt_header_size = 0;
3574 htab->plt_entry_size
3575 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3576 }
3577 else
3578 {
3579 htab->plt_header_size
3580 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3581 htab->plt_entry_size
3582 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3583 }
3584
3585 if (elf_elfheader (dynobj))
3586 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3587 }
3588 else
3589 {
3590 /* PR ld/16017
3591 Test for thumb only architectures. Note - we cannot just call
3592 using_thumb_only() as the attributes in the output bfd have not been
3593 initialised at this point, so instead we use the input bfd. */
3594 bfd * saved_obfd = htab->obfd;
3595
3596 htab->obfd = dynobj;
3597 if (using_thumb_only (htab))
3598 {
3599 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3600 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3601 }
3602 htab->obfd = saved_obfd;
3603 }
3604
3605 if (!htab->root.splt
3606 || !htab->root.srelplt
3607 || !htab->sdynbss
3608 || (!bfd_link_pic (info) && !htab->srelbss))
3609 abort ();
3610
3611 return TRUE;
3612 }
3613
3614 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3615
3616 static void
3617 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3618 struct elf_link_hash_entry *dir,
3619 struct elf_link_hash_entry *ind)
3620 {
3621 struct elf32_arm_link_hash_entry *edir, *eind;
3622
3623 edir = (struct elf32_arm_link_hash_entry *) dir;
3624 eind = (struct elf32_arm_link_hash_entry *) ind;
3625
3626 if (eind->dyn_relocs != NULL)
3627 {
3628 if (edir->dyn_relocs != NULL)
3629 {
3630 struct elf_dyn_relocs **pp;
3631 struct elf_dyn_relocs *p;
3632
3633 /* Add reloc counts against the indirect sym to the direct sym
3634 list. Merge any entries against the same section. */
3635 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3636 {
3637 struct elf_dyn_relocs *q;
3638
3639 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3640 if (q->sec == p->sec)
3641 {
3642 q->pc_count += p->pc_count;
3643 q->count += p->count;
3644 *pp = p->next;
3645 break;
3646 }
3647 if (q == NULL)
3648 pp = &p->next;
3649 }
3650 *pp = edir->dyn_relocs;
3651 }
3652
3653 edir->dyn_relocs = eind->dyn_relocs;
3654 eind->dyn_relocs = NULL;
3655 }
3656
3657 if (ind->root.type == bfd_link_hash_indirect)
3658 {
3659 /* Copy over PLT info. */
3660 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3661 eind->plt.thumb_refcount = 0;
3662 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3663 eind->plt.maybe_thumb_refcount = 0;
3664 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3665 eind->plt.noncall_refcount = 0;
3666
3667 /* We should only allocate a function to .iplt once the final
3668 symbol information is known. */
3669 BFD_ASSERT (!eind->is_iplt);
3670
3671 if (dir->got.refcount <= 0)
3672 {
3673 edir->tls_type = eind->tls_type;
3674 eind->tls_type = GOT_UNKNOWN;
3675 }
3676 }
3677
3678 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3679 }
3680
3681 /* Destroy an ARM elf linker hash table. */
3682
3683 static void
3684 elf32_arm_link_hash_table_free (bfd *obfd)
3685 {
3686 struct elf32_arm_link_hash_table *ret
3687 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3688
3689 bfd_hash_table_free (&ret->stub_hash_table);
3690 _bfd_elf_link_hash_table_free (obfd);
3691 }
3692
3693 /* Create an ARM elf linker hash table. */
3694
3695 static struct bfd_link_hash_table *
3696 elf32_arm_link_hash_table_create (bfd *abfd)
3697 {
3698 struct elf32_arm_link_hash_table *ret;
3699 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3700
3701 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3702 if (ret == NULL)
3703 return NULL;
3704
3705 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3706 elf32_arm_link_hash_newfunc,
3707 sizeof (struct elf32_arm_link_hash_entry),
3708 ARM_ELF_DATA))
3709 {
3710 free (ret);
3711 return NULL;
3712 }
3713
3714 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3715 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3716 #ifdef FOUR_WORD_PLT
3717 ret->plt_header_size = 16;
3718 ret->plt_entry_size = 16;
3719 #else
3720 ret->plt_header_size = 20;
3721 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3722 #endif
3723 ret->use_rel = 1;
3724 ret->obfd = abfd;
3725
3726 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3727 sizeof (struct elf32_arm_stub_hash_entry)))
3728 {
3729 _bfd_elf_link_hash_table_free (abfd);
3730 return NULL;
3731 }
3732 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3733
3734 return &ret->root.root;
3735 }
3736
3737 /* Determine what kind of NOPs are available. */
3738
3739 static bfd_boolean
3740 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3741 {
3742 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3743 Tag_CPU_arch);
3744 return arch == TAG_CPU_ARCH_V6T2
3745 || arch == TAG_CPU_ARCH_V6K
3746 || arch == TAG_CPU_ARCH_V7
3747 || arch == TAG_CPU_ARCH_V7E_M;
3748 }
3749
3750 static bfd_boolean
3751 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3752 {
3753 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3754 Tag_CPU_arch);
3755 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3756 || arch == TAG_CPU_ARCH_V7E_M);
3757 }
3758
3759 static bfd_boolean
3760 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3761 {
3762 switch (stub_type)
3763 {
3764 case arm_stub_long_branch_thumb_only:
3765 case arm_stub_long_branch_v4t_thumb_arm:
3766 case arm_stub_short_branch_v4t_thumb_arm:
3767 case arm_stub_long_branch_v4t_thumb_arm_pic:
3768 case arm_stub_long_branch_v4t_thumb_tls_pic:
3769 case arm_stub_long_branch_thumb_only_pic:
3770 return TRUE;
3771 case arm_stub_none:
3772 BFD_FAIL ();
3773 return FALSE;
3774 break;
3775 default:
3776 return FALSE;
3777 }
3778 }
3779
3780 /* Determine the type of stub needed, if any, for a call. */
3781
3782 static enum elf32_arm_stub_type
3783 arm_type_of_stub (struct bfd_link_info *info,
3784 asection *input_sec,
3785 const Elf_Internal_Rela *rel,
3786 unsigned char st_type,
3787 enum arm_st_branch_type *actual_branch_type,
3788 struct elf32_arm_link_hash_entry *hash,
3789 bfd_vma destination,
3790 asection *sym_sec,
3791 bfd *input_bfd,
3792 const char *name)
3793 {
3794 bfd_vma location;
3795 bfd_signed_vma branch_offset;
3796 unsigned int r_type;
3797 struct elf32_arm_link_hash_table * globals;
3798 int thumb2;
3799 int thumb_only;
3800 enum elf32_arm_stub_type stub_type = arm_stub_none;
3801 int use_plt = 0;
3802 enum arm_st_branch_type branch_type = *actual_branch_type;
3803 union gotplt_union *root_plt;
3804 struct arm_plt_info *arm_plt;
3805
3806 if (branch_type == ST_BRANCH_LONG)
3807 return stub_type;
3808
3809 globals = elf32_arm_hash_table (info);
3810 if (globals == NULL)
3811 return stub_type;
3812
3813 thumb_only = using_thumb_only (globals);
3814
3815 thumb2 = using_thumb2 (globals);
3816
3817 /* Determine where the call point is. */
3818 location = (input_sec->output_offset
3819 + input_sec->output_section->vma
3820 + rel->r_offset);
3821
3822 r_type = ELF32_R_TYPE (rel->r_info);
3823
3824 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3825 are considering a function call relocation. */
3826 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3827 || r_type == R_ARM_THM_JUMP19)
3828 && branch_type == ST_BRANCH_TO_ARM)
3829 branch_type = ST_BRANCH_TO_THUMB;
3830
3831 /* For TLS call relocs, it is the caller's responsibility to provide
3832 the address of the appropriate trampoline. */
3833 if (r_type != R_ARM_TLS_CALL
3834 && r_type != R_ARM_THM_TLS_CALL
3835 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3836 &root_plt, &arm_plt)
3837 && root_plt->offset != (bfd_vma) -1)
3838 {
3839 asection *splt;
3840
3841 if (hash == NULL || hash->is_iplt)
3842 splt = globals->root.iplt;
3843 else
3844 splt = globals->root.splt;
3845 if (splt != NULL)
3846 {
3847 use_plt = 1;
3848
3849 /* Note when dealing with PLT entries: the main PLT stub is in
3850 ARM mode, so if the branch is in Thumb mode, another
3851 Thumb->ARM stub will be inserted later just before the ARM
3852 PLT stub. We don't take this extra distance into account
3853 here, because if a long branch stub is needed, we'll add a
3854 Thumb->Arm one and branch directly to the ARM PLT entry
3855 because it avoids spreading offset corrections in several
3856 places. */
3857
3858 destination = (splt->output_section->vma
3859 + splt->output_offset
3860 + root_plt->offset);
3861 st_type = STT_FUNC;
3862 branch_type = ST_BRANCH_TO_ARM;
3863 }
3864 }
3865 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3866 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3867
3868 branch_offset = (bfd_signed_vma)(destination - location);
3869
3870 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3871 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3872 {
3873 /* Handle cases where:
3874 - this call goes too far (different Thumb/Thumb2 max
3875 distance)
3876 - it's a Thumb->Arm call and blx is not available, or it's a
3877 Thumb->Arm branch (not bl). A stub is needed in this case,
3878 but only if this call is not through a PLT entry. Indeed,
3879 PLT stubs handle mode switching already.
3880 */
3881 if ((!thumb2
3882 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3883 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3884 || (thumb2
3885 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3886 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3887 || (thumb2
3888 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3889 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3890 && (r_type == R_ARM_THM_JUMP19))
3891 || (branch_type == ST_BRANCH_TO_ARM
3892 && (((r_type == R_ARM_THM_CALL
3893 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3894 || (r_type == R_ARM_THM_JUMP24)
3895 || (r_type == R_ARM_THM_JUMP19))
3896 && !use_plt))
3897 {
3898 if (branch_type == ST_BRANCH_TO_THUMB)
3899 {
3900 /* Thumb to thumb. */
3901 if (!thumb_only)
3902 {
3903 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3904 /* PIC stubs. */
3905 ? ((globals->use_blx
3906 && (r_type == R_ARM_THM_CALL))
3907 /* V5T and above. Stub starts with ARM code, so
3908 we must be able to switch mode before
3909 reaching it, which is only possible for 'bl'
3910 (ie R_ARM_THM_CALL relocation). */
3911 ? arm_stub_long_branch_any_thumb_pic
3912 /* On V4T, use Thumb code only. */
3913 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3914
3915 /* non-PIC stubs. */
3916 : ((globals->use_blx
3917 && (r_type == R_ARM_THM_CALL))
3918 /* V5T and above. */
3919 ? arm_stub_long_branch_any_any
3920 /* V4T. */
3921 : arm_stub_long_branch_v4t_thumb_thumb);
3922 }
3923 else
3924 {
3925 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3926 /* PIC stub. */
3927 ? arm_stub_long_branch_thumb_only_pic
3928 /* non-PIC stub. */
3929 : arm_stub_long_branch_thumb_only;
3930 }
3931 }
3932 else
3933 {
3934 /* Thumb to arm. */
3935 if (sym_sec != NULL
3936 && sym_sec->owner != NULL
3937 && !INTERWORK_FLAG (sym_sec->owner))
3938 {
3939 (*_bfd_error_handler)
3940 (_("%B(%s): warning: interworking not enabled.\n"
3941 " first occurrence: %B: Thumb call to ARM"),
3942 sym_sec->owner, input_bfd, name);
3943 }
3944
3945 stub_type =
3946 (bfd_link_pic (info) | globals->pic_veneer)
3947 /* PIC stubs. */
3948 ? (r_type == R_ARM_THM_TLS_CALL
3949 /* TLS PIC stubs. */
3950 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3951 : arm_stub_long_branch_v4t_thumb_tls_pic)
3952 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3953 /* V5T PIC and above. */
3954 ? arm_stub_long_branch_any_arm_pic
3955 /* V4T PIC stub. */
3956 : arm_stub_long_branch_v4t_thumb_arm_pic))
3957
3958 /* non-PIC stubs. */
3959 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3960 /* V5T and above. */
3961 ? arm_stub_long_branch_any_any
3962 /* V4T. */
3963 : arm_stub_long_branch_v4t_thumb_arm);
3964
3965 /* Handle v4t short branches. */
3966 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3967 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3968 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3969 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3970 }
3971 }
3972 }
3973 else if (r_type == R_ARM_CALL
3974 || r_type == R_ARM_JUMP24
3975 || r_type == R_ARM_PLT32
3976 || r_type == R_ARM_TLS_CALL)
3977 {
3978 if (branch_type == ST_BRANCH_TO_THUMB)
3979 {
3980 /* Arm to thumb. */
3981
3982 if (sym_sec != NULL
3983 && sym_sec->owner != NULL
3984 && !INTERWORK_FLAG (sym_sec->owner))
3985 {
3986 (*_bfd_error_handler)
3987 (_("%B(%s): warning: interworking not enabled.\n"
3988 " first occurrence: %B: ARM call to Thumb"),
3989 sym_sec->owner, input_bfd, name);
3990 }
3991
3992 /* We have an extra 2-bytes reach because of
3993 the mode change (bit 24 (H) of BLX encoding). */
3994 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3995 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3996 || (r_type == R_ARM_CALL && !globals->use_blx)
3997 || (r_type == R_ARM_JUMP24)
3998 || (r_type == R_ARM_PLT32))
3999 {
4000 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4001 /* PIC stubs. */
4002 ? ((globals->use_blx)
4003 /* V5T and above. */
4004 ? arm_stub_long_branch_any_thumb_pic
4005 /* V4T stub. */
4006 : arm_stub_long_branch_v4t_arm_thumb_pic)
4007
4008 /* non-PIC stubs. */
4009 : ((globals->use_blx)
4010 /* V5T and above. */
4011 ? arm_stub_long_branch_any_any
4012 /* V4T. */
4013 : arm_stub_long_branch_v4t_arm_thumb);
4014 }
4015 }
4016 else
4017 {
4018 /* Arm to arm. */
4019 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4020 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4021 {
4022 stub_type =
4023 (bfd_link_pic (info) | globals->pic_veneer)
4024 /* PIC stubs. */
4025 ? (r_type == R_ARM_TLS_CALL
4026 /* TLS PIC Stub. */
4027 ? arm_stub_long_branch_any_tls_pic
4028 : (globals->nacl_p
4029 ? arm_stub_long_branch_arm_nacl_pic
4030 : arm_stub_long_branch_any_arm_pic))
4031 /* non-PIC stubs. */
4032 : (globals->nacl_p
4033 ? arm_stub_long_branch_arm_nacl
4034 : arm_stub_long_branch_any_any);
4035 }
4036 }
4037 }
4038
4039 /* If a stub is needed, record the actual destination type. */
4040 if (stub_type != arm_stub_none)
4041 *actual_branch_type = branch_type;
4042
4043 return stub_type;
4044 }
4045
4046 /* Build a name for an entry in the stub hash table. */
4047
4048 static char *
4049 elf32_arm_stub_name (const asection *input_section,
4050 const asection *sym_sec,
4051 const struct elf32_arm_link_hash_entry *hash,
4052 const Elf_Internal_Rela *rel,
4053 enum elf32_arm_stub_type stub_type)
4054 {
4055 char *stub_name;
4056 bfd_size_type len;
4057
4058 if (hash)
4059 {
4060 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4061 stub_name = (char *) bfd_malloc (len);
4062 if (stub_name != NULL)
4063 sprintf (stub_name, "%08x_%s+%x_%d",
4064 input_section->id & 0xffffffff,
4065 hash->root.root.root.string,
4066 (int) rel->r_addend & 0xffffffff,
4067 (int) stub_type);
4068 }
4069 else
4070 {
4071 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4072 stub_name = (char *) bfd_malloc (len);
4073 if (stub_name != NULL)
4074 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4075 input_section->id & 0xffffffff,
4076 sym_sec->id & 0xffffffff,
4077 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4078 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4079 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4080 (int) rel->r_addend & 0xffffffff,
4081 (int) stub_type);
4082 }
4083
4084 return stub_name;
4085 }
4086
4087 /* Look up an entry in the stub hash. Stub entries are cached because
4088 creating the stub name takes a bit of time. */
4089
4090 static struct elf32_arm_stub_hash_entry *
4091 elf32_arm_get_stub_entry (const asection *input_section,
4092 const asection *sym_sec,
4093 struct elf_link_hash_entry *hash,
4094 const Elf_Internal_Rela *rel,
4095 struct elf32_arm_link_hash_table *htab,
4096 enum elf32_arm_stub_type stub_type)
4097 {
4098 struct elf32_arm_stub_hash_entry *stub_entry;
4099 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4100 const asection *id_sec;
4101
4102 if ((input_section->flags & SEC_CODE) == 0)
4103 return NULL;
4104
4105 /* If this input section is part of a group of sections sharing one
4106 stub section, then use the id of the first section in the group.
4107 Stub names need to include a section id, as there may well be
4108 more than one stub used to reach say, printf, and we need to
4109 distinguish between them. */
4110 id_sec = htab->stub_group[input_section->id].link_sec;
4111
4112 if (h != NULL && h->stub_cache != NULL
4113 && h->stub_cache->h == h
4114 && h->stub_cache->id_sec == id_sec
4115 && h->stub_cache->stub_type == stub_type)
4116 {
4117 stub_entry = h->stub_cache;
4118 }
4119 else
4120 {
4121 char *stub_name;
4122
4123 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4124 if (stub_name == NULL)
4125 return NULL;
4126
4127 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4128 stub_name, FALSE, FALSE);
4129 if (h != NULL)
4130 h->stub_cache = stub_entry;
4131
4132 free (stub_name);
4133 }
4134
4135 return stub_entry;
4136 }
4137
4138 /* Find or create a stub section. Returns a pointer to the stub section, and
4139 the section to which the stub section will be attached (in *LINK_SEC_P).
4140 LINK_SEC_P may be NULL. */
4141
4142 static asection *
4143 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4144 struct elf32_arm_link_hash_table *htab)
4145 {
4146 asection *link_sec;
4147 asection *stub_sec;
4148 asection *out_sec;
4149
4150 link_sec = htab->stub_group[section->id].link_sec;
4151 BFD_ASSERT (link_sec != NULL);
4152 stub_sec = htab->stub_group[section->id].stub_sec;
4153
4154 if (stub_sec == NULL)
4155 {
4156 stub_sec = htab->stub_group[link_sec->id].stub_sec;
4157 if (stub_sec == NULL)
4158 {
4159 size_t namelen;
4160 bfd_size_type len;
4161 char *s_name;
4162
4163 namelen = strlen (link_sec->name);
4164 len = namelen + sizeof (STUB_SUFFIX);
4165 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4166 if (s_name == NULL)
4167 return NULL;
4168
4169 memcpy (s_name, link_sec->name, namelen);
4170 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4171 out_sec = link_sec->output_section;
4172 stub_sec = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4173 htab->nacl_p ? 4 : 3);
4174 if (stub_sec == NULL)
4175 return NULL;
4176 htab->stub_group[link_sec->id].stub_sec = stub_sec;
4177 }
4178 htab->stub_group[section->id].stub_sec = stub_sec;
4179 }
4180
4181 if (link_sec_p)
4182 *link_sec_p = link_sec;
4183
4184 return stub_sec;
4185 }
4186
4187 /* Add a new stub entry to the stub hash. Not all fields of the new
4188 stub entry are initialised. */
4189
4190 static struct elf32_arm_stub_hash_entry *
4191 elf32_arm_add_stub (const char *stub_name,
4192 asection *section,
4193 struct elf32_arm_link_hash_table *htab)
4194 {
4195 asection *link_sec;
4196 asection *stub_sec;
4197 struct elf32_arm_stub_hash_entry *stub_entry;
4198
4199 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
4200 if (stub_sec == NULL)
4201 return NULL;
4202
4203 /* Enter this entry into the linker stub hash table. */
4204 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4205 TRUE, FALSE);
4206 if (stub_entry == NULL)
4207 {
4208 if (section == NULL)
4209 section = stub_sec;
4210 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4211 section->owner,
4212 stub_name);
4213 return NULL;
4214 }
4215
4216 stub_entry->stub_sec = stub_sec;
4217 stub_entry->stub_offset = 0;
4218 stub_entry->id_sec = link_sec;
4219
4220 return stub_entry;
4221 }
4222
4223 /* Store an Arm insn into an output section not processed by
4224 elf32_arm_write_section. */
4225
4226 static void
4227 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4228 bfd * output_bfd, bfd_vma val, void * ptr)
4229 {
4230 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4231 bfd_putl32 (val, ptr);
4232 else
4233 bfd_putb32 (val, ptr);
4234 }
4235
4236 /* Store a 16-bit Thumb insn into an output section not processed by
4237 elf32_arm_write_section. */
4238
4239 static void
4240 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4241 bfd * output_bfd, bfd_vma val, void * ptr)
4242 {
4243 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4244 bfd_putl16 (val, ptr);
4245 else
4246 bfd_putb16 (val, ptr);
4247 }
4248
4249 /* Store a Thumb2 insn into an output section not processed by
4250 elf32_arm_write_section. */
4251
4252 static void
4253 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4254 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4255 {
4256 /* T2 instructions are 16-bit streamed. */
4257 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4258 {
4259 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4260 bfd_putl16 ((val & 0xffff), ptr + 2);
4261 }
4262 else
4263 {
4264 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4265 bfd_putb16 ((val & 0xffff), ptr + 2);
4266 }
4267 }
4268
4269 /* If it's possible to change R_TYPE to a more efficient access
4270 model, return the new reloc type. */
4271
4272 static unsigned
4273 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4274 struct elf_link_hash_entry *h)
4275 {
4276 int is_local = (h == NULL);
4277
4278 if (bfd_link_pic (info)
4279 || (h && h->root.type == bfd_link_hash_undefweak))
4280 return r_type;
4281
4282 /* We do not support relaxations for Old TLS models. */
4283 switch (r_type)
4284 {
4285 case R_ARM_TLS_GOTDESC:
4286 case R_ARM_TLS_CALL:
4287 case R_ARM_THM_TLS_CALL:
4288 case R_ARM_TLS_DESCSEQ:
4289 case R_ARM_THM_TLS_DESCSEQ:
4290 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4291 }
4292
4293 return r_type;
4294 }
4295
4296 static bfd_reloc_status_type elf32_arm_final_link_relocate
4297 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4298 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4299 const char *, unsigned char, enum arm_st_branch_type,
4300 struct elf_link_hash_entry *, bfd_boolean *, char **);
4301
4302 static unsigned int
4303 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4304 {
4305 switch (stub_type)
4306 {
4307 case arm_stub_a8_veneer_b_cond:
4308 case arm_stub_a8_veneer_b:
4309 case arm_stub_a8_veneer_bl:
4310 return 2;
4311
4312 case arm_stub_long_branch_any_any:
4313 case arm_stub_long_branch_v4t_arm_thumb:
4314 case arm_stub_long_branch_thumb_only:
4315 case arm_stub_long_branch_v4t_thumb_thumb:
4316 case arm_stub_long_branch_v4t_thumb_arm:
4317 case arm_stub_short_branch_v4t_thumb_arm:
4318 case arm_stub_long_branch_any_arm_pic:
4319 case arm_stub_long_branch_any_thumb_pic:
4320 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4321 case arm_stub_long_branch_v4t_arm_thumb_pic:
4322 case arm_stub_long_branch_v4t_thumb_arm_pic:
4323 case arm_stub_long_branch_thumb_only_pic:
4324 case arm_stub_long_branch_any_tls_pic:
4325 case arm_stub_long_branch_v4t_thumb_tls_pic:
4326 case arm_stub_a8_veneer_blx:
4327 return 4;
4328
4329 case arm_stub_long_branch_arm_nacl:
4330 case arm_stub_long_branch_arm_nacl_pic:
4331 return 16;
4332
4333 default:
4334 abort (); /* Should be unreachable. */
4335 }
4336 }
4337
4338 static bfd_boolean
4339 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4340 void * in_arg)
4341 {
4342 #define MAXRELOCS 3
4343 struct elf32_arm_stub_hash_entry *stub_entry;
4344 struct elf32_arm_link_hash_table *globals;
4345 struct bfd_link_info *info;
4346 asection *stub_sec;
4347 bfd *stub_bfd;
4348 bfd_byte *loc;
4349 bfd_vma sym_value;
4350 int template_size;
4351 int size;
4352 const insn_sequence *template_sequence;
4353 int i;
4354 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4355 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4356 int nrelocs = 0;
4357
4358 /* Massage our args to the form they really have. */
4359 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4360 info = (struct bfd_link_info *) in_arg;
4361
4362 globals = elf32_arm_hash_table (info);
4363 if (globals == NULL)
4364 return FALSE;
4365
4366 stub_sec = stub_entry->stub_sec;
4367
4368 if ((globals->fix_cortex_a8 < 0)
4369 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4370 /* We have to do less-strictly-aligned fixes last. */
4371 return TRUE;
4372
4373 /* Make a note of the offset within the stubs for this entry. */
4374 stub_entry->stub_offset = stub_sec->size;
4375 loc = stub_sec->contents + stub_entry->stub_offset;
4376
4377 stub_bfd = stub_sec->owner;
4378
4379 /* This is the address of the stub destination. */
4380 sym_value = (stub_entry->target_value
4381 + stub_entry->target_section->output_offset
4382 + stub_entry->target_section->output_section->vma);
4383
4384 template_sequence = stub_entry->stub_template;
4385 template_size = stub_entry->stub_template_size;
4386
4387 size = 0;
4388 for (i = 0; i < template_size; i++)
4389 {
4390 switch (template_sequence[i].type)
4391 {
4392 case THUMB16_TYPE:
4393 {
4394 bfd_vma data = (bfd_vma) template_sequence[i].data;
4395 if (template_sequence[i].reloc_addend != 0)
4396 {
4397 /* We've borrowed the reloc_addend field to mean we should
4398 insert a condition code into this (Thumb-1 branch)
4399 instruction. See THUMB16_BCOND_INSN. */
4400 BFD_ASSERT ((data & 0xff00) == 0xd000);
4401 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4402 }
4403 bfd_put_16 (stub_bfd, data, loc + size);
4404 size += 2;
4405 }
4406 break;
4407
4408 case THUMB32_TYPE:
4409 bfd_put_16 (stub_bfd,
4410 (template_sequence[i].data >> 16) & 0xffff,
4411 loc + size);
4412 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4413 loc + size + 2);
4414 if (template_sequence[i].r_type != R_ARM_NONE)
4415 {
4416 stub_reloc_idx[nrelocs] = i;
4417 stub_reloc_offset[nrelocs++] = size;
4418 }
4419 size += 4;
4420 break;
4421
4422 case ARM_TYPE:
4423 bfd_put_32 (stub_bfd, template_sequence[i].data,
4424 loc + size);
4425 /* Handle cases where the target is encoded within the
4426 instruction. */
4427 if (template_sequence[i].r_type == R_ARM_JUMP24)
4428 {
4429 stub_reloc_idx[nrelocs] = i;
4430 stub_reloc_offset[nrelocs++] = size;
4431 }
4432 size += 4;
4433 break;
4434
4435 case DATA_TYPE:
4436 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4437 stub_reloc_idx[nrelocs] = i;
4438 stub_reloc_offset[nrelocs++] = size;
4439 size += 4;
4440 break;
4441
4442 default:
4443 BFD_FAIL ();
4444 return FALSE;
4445 }
4446 }
4447
4448 stub_sec->size += size;
4449
4450 /* Stub size has already been computed in arm_size_one_stub. Check
4451 consistency. */
4452 BFD_ASSERT (size == stub_entry->stub_size);
4453
4454 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4455 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4456 sym_value |= 1;
4457
4458 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4459 in each stub. */
4460 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4461
4462 for (i = 0; i < nrelocs; i++)
4463 {
4464 Elf_Internal_Rela rel;
4465 bfd_boolean unresolved_reloc;
4466 char *error_message;
4467 bfd_vma points_to =
4468 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4469
4470 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4471 rel.r_info = ELF32_R_INFO (0,
4472 template_sequence[stub_reloc_idx[i]].r_type);
4473 rel.r_addend = 0;
4474
4475 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4476 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4477 template should refer back to the instruction after the original
4478 branch. We use target_section as Cortex-A8 erratum workaround stubs
4479 are only generated when both source and target are in the same
4480 section. */
4481 points_to = stub_entry->target_section->output_section->vma
4482 + stub_entry->target_section->output_offset
4483 + stub_entry->source_value;
4484
4485 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4486 (template_sequence[stub_reloc_idx[i]].r_type),
4487 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4488 points_to, info, stub_entry->target_section, "", STT_FUNC,
4489 stub_entry->branch_type,
4490 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4491 &error_message);
4492 }
4493
4494 return TRUE;
4495 #undef MAXRELOCS
4496 }
4497
4498 /* Calculate the template, template size and instruction size for a stub.
4499 Return value is the instruction size. */
4500
4501 static unsigned int
4502 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4503 const insn_sequence **stub_template,
4504 int *stub_template_size)
4505 {
4506 const insn_sequence *template_sequence = NULL;
4507 int template_size = 0, i;
4508 unsigned int size;
4509
4510 template_sequence = stub_definitions[stub_type].template_sequence;
4511 if (stub_template)
4512 *stub_template = template_sequence;
4513
4514 template_size = stub_definitions[stub_type].template_size;
4515 if (stub_template_size)
4516 *stub_template_size = template_size;
4517
4518 size = 0;
4519 for (i = 0; i < template_size; i++)
4520 {
4521 switch (template_sequence[i].type)
4522 {
4523 case THUMB16_TYPE:
4524 size += 2;
4525 break;
4526
4527 case ARM_TYPE:
4528 case THUMB32_TYPE:
4529 case DATA_TYPE:
4530 size += 4;
4531 break;
4532
4533 default:
4534 BFD_FAIL ();
4535 return 0;
4536 }
4537 }
4538
4539 return size;
4540 }
4541
4542 /* As above, but don't actually build the stub. Just bump offset so
4543 we know stub section sizes. */
4544
4545 static bfd_boolean
4546 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4547 void *in_arg ATTRIBUTE_UNUSED)
4548 {
4549 struct elf32_arm_stub_hash_entry *stub_entry;
4550 const insn_sequence *template_sequence;
4551 int template_size, size;
4552
4553 /* Massage our args to the form they really have. */
4554 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4555
4556 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4557 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4558
4559 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4560 &template_size);
4561
4562 stub_entry->stub_size = size;
4563 stub_entry->stub_template = template_sequence;
4564 stub_entry->stub_template_size = template_size;
4565
4566 size = (size + 7) & ~7;
4567 stub_entry->stub_sec->size += size;
4568
4569 return TRUE;
4570 }
4571
4572 /* External entry points for sizing and building linker stubs. */
4573
4574 /* Set up various things so that we can make a list of input sections
4575 for each output section included in the link. Returns -1 on error,
4576 0 when no stubs will be needed, and 1 on success. */
4577
4578 int
4579 elf32_arm_setup_section_lists (bfd *output_bfd,
4580 struct bfd_link_info *info)
4581 {
4582 bfd *input_bfd;
4583 unsigned int bfd_count;
4584 unsigned int top_id, top_index;
4585 asection *section;
4586 asection **input_list, **list;
4587 bfd_size_type amt;
4588 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4589
4590 if (htab == NULL)
4591 return 0;
4592 if (! is_elf_hash_table (htab))
4593 return 0;
4594
4595 /* Count the number of input BFDs and find the top input section id. */
4596 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4597 input_bfd != NULL;
4598 input_bfd = input_bfd->link.next)
4599 {
4600 bfd_count += 1;
4601 for (section = input_bfd->sections;
4602 section != NULL;
4603 section = section->next)
4604 {
4605 if (top_id < section->id)
4606 top_id = section->id;
4607 }
4608 }
4609 htab->bfd_count = bfd_count;
4610
4611 amt = sizeof (struct map_stub) * (top_id + 1);
4612 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4613 if (htab->stub_group == NULL)
4614 return -1;
4615 htab->top_id = top_id;
4616
4617 /* We can't use output_bfd->section_count here to find the top output
4618 section index as some sections may have been removed, and
4619 _bfd_strip_section_from_output doesn't renumber the indices. */
4620 for (section = output_bfd->sections, top_index = 0;
4621 section != NULL;
4622 section = section->next)
4623 {
4624 if (top_index < section->index)
4625 top_index = section->index;
4626 }
4627
4628 htab->top_index = top_index;
4629 amt = sizeof (asection *) * (top_index + 1);
4630 input_list = (asection **) bfd_malloc (amt);
4631 htab->input_list = input_list;
4632 if (input_list == NULL)
4633 return -1;
4634
4635 /* For sections we aren't interested in, mark their entries with a
4636 value we can check later. */
4637 list = input_list + top_index;
4638 do
4639 *list = bfd_abs_section_ptr;
4640 while (list-- != input_list);
4641
4642 for (section = output_bfd->sections;
4643 section != NULL;
4644 section = section->next)
4645 {
4646 if ((section->flags & SEC_CODE) != 0)
4647 input_list[section->index] = NULL;
4648 }
4649
4650 return 1;
4651 }
4652
4653 /* The linker repeatedly calls this function for each input section,
4654 in the order that input sections are linked into output sections.
4655 Build lists of input sections to determine groupings between which
4656 we may insert linker stubs. */
4657
4658 void
4659 elf32_arm_next_input_section (struct bfd_link_info *info,
4660 asection *isec)
4661 {
4662 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4663
4664 if (htab == NULL)
4665 return;
4666
4667 if (isec->output_section->index <= htab->top_index)
4668 {
4669 asection **list = htab->input_list + isec->output_section->index;
4670
4671 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4672 {
4673 /* Steal the link_sec pointer for our list. */
4674 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4675 /* This happens to make the list in reverse order,
4676 which we reverse later. */
4677 PREV_SEC (isec) = *list;
4678 *list = isec;
4679 }
4680 }
4681 }
4682
4683 /* See whether we can group stub sections together. Grouping stub
4684 sections may result in fewer stubs. More importantly, we need to
4685 put all .init* and .fini* stubs at the end of the .init or
4686 .fini output sections respectively, because glibc splits the
4687 _init and _fini functions into multiple parts. Putting a stub in
4688 the middle of a function is not a good idea. */
4689
4690 static void
4691 group_sections (struct elf32_arm_link_hash_table *htab,
4692 bfd_size_type stub_group_size,
4693 bfd_boolean stubs_always_after_branch)
4694 {
4695 asection **list = htab->input_list;
4696
4697 do
4698 {
4699 asection *tail = *list;
4700 asection *head;
4701
4702 if (tail == bfd_abs_section_ptr)
4703 continue;
4704
4705 /* Reverse the list: we must avoid placing stubs at the
4706 beginning of the section because the beginning of the text
4707 section may be required for an interrupt vector in bare metal
4708 code. */
4709 #define NEXT_SEC PREV_SEC
4710 head = NULL;
4711 while (tail != NULL)
4712 {
4713 /* Pop from tail. */
4714 asection *item = tail;
4715 tail = PREV_SEC (item);
4716
4717 /* Push on head. */
4718 NEXT_SEC (item) = head;
4719 head = item;
4720 }
4721
4722 while (head != NULL)
4723 {
4724 asection *curr;
4725 asection *next;
4726 bfd_vma stub_group_start = head->output_offset;
4727 bfd_vma end_of_next;
4728
4729 curr = head;
4730 while (NEXT_SEC (curr) != NULL)
4731 {
4732 next = NEXT_SEC (curr);
4733 end_of_next = next->output_offset + next->size;
4734 if (end_of_next - stub_group_start >= stub_group_size)
4735 /* End of NEXT is too far from start, so stop. */
4736 break;
4737 /* Add NEXT to the group. */
4738 curr = next;
4739 }
4740
4741 /* OK, the size from the start to the start of CURR is less
4742 than stub_group_size and thus can be handled by one stub
4743 section. (Or the head section is itself larger than
4744 stub_group_size, in which case we may be toast.)
4745 We should really be keeping track of the total size of
4746 stubs added here, as stubs contribute to the final output
4747 section size. */
4748 do
4749 {
4750 next = NEXT_SEC (head);
4751 /* Set up this stub group. */
4752 htab->stub_group[head->id].link_sec = curr;
4753 }
4754 while (head != curr && (head = next) != NULL);
4755
4756 /* But wait, there's more! Input sections up to stub_group_size
4757 bytes after the stub section can be handled by it too. */
4758 if (!stubs_always_after_branch)
4759 {
4760 stub_group_start = curr->output_offset + curr->size;
4761
4762 while (next != NULL)
4763 {
4764 end_of_next = next->output_offset + next->size;
4765 if (end_of_next - stub_group_start >= stub_group_size)
4766 /* End of NEXT is too far from stubs, so stop. */
4767 break;
4768 /* Add NEXT to the stub group. */
4769 head = next;
4770 next = NEXT_SEC (head);
4771 htab->stub_group[head->id].link_sec = curr;
4772 }
4773 }
4774 head = next;
4775 }
4776 }
4777 while (list++ != htab->input_list + htab->top_index);
4778
4779 free (htab->input_list);
4780 #undef PREV_SEC
4781 #undef NEXT_SEC
4782 }
4783
4784 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4785 erratum fix. */
4786
4787 static int
4788 a8_reloc_compare (const void *a, const void *b)
4789 {
4790 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4791 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4792
4793 if (ra->from < rb->from)
4794 return -1;
4795 else if (ra->from > rb->from)
4796 return 1;
4797 else
4798 return 0;
4799 }
4800
4801 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4802 const char *, char **);
4803
4804 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4805 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4806 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4807 otherwise. */
4808
4809 static bfd_boolean
4810 cortex_a8_erratum_scan (bfd *input_bfd,
4811 struct bfd_link_info *info,
4812 struct a8_erratum_fix **a8_fixes_p,
4813 unsigned int *num_a8_fixes_p,
4814 unsigned int *a8_fix_table_size_p,
4815 struct a8_erratum_reloc *a8_relocs,
4816 unsigned int num_a8_relocs,
4817 unsigned prev_num_a8_fixes,
4818 bfd_boolean *stub_changed_p)
4819 {
4820 asection *section;
4821 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4822 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4823 unsigned int num_a8_fixes = *num_a8_fixes_p;
4824 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4825
4826 if (htab == NULL)
4827 return FALSE;
4828
4829 for (section = input_bfd->sections;
4830 section != NULL;
4831 section = section->next)
4832 {
4833 bfd_byte *contents = NULL;
4834 struct _arm_elf_section_data *sec_data;
4835 unsigned int span;
4836 bfd_vma base_vma;
4837
4838 if (elf_section_type (section) != SHT_PROGBITS
4839 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4840 || (section->flags & SEC_EXCLUDE) != 0
4841 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4842 || (section->output_section == bfd_abs_section_ptr))
4843 continue;
4844
4845 base_vma = section->output_section->vma + section->output_offset;
4846
4847 if (elf_section_data (section)->this_hdr.contents != NULL)
4848 contents = elf_section_data (section)->this_hdr.contents;
4849 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4850 return TRUE;
4851
4852 sec_data = elf32_arm_section_data (section);
4853
4854 for (span = 0; span < sec_data->mapcount; span++)
4855 {
4856 unsigned int span_start = sec_data->map[span].vma;
4857 unsigned int span_end = (span == sec_data->mapcount - 1)
4858 ? section->size : sec_data->map[span + 1].vma;
4859 unsigned int i;
4860 char span_type = sec_data->map[span].type;
4861 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4862
4863 if (span_type != 't')
4864 continue;
4865
4866 /* Span is entirely within a single 4KB region: skip scanning. */
4867 if (((base_vma + span_start) & ~0xfff)
4868 == ((base_vma + span_end) & ~0xfff))
4869 continue;
4870
4871 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4872
4873 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4874 * The branch target is in the same 4KB region as the
4875 first half of the branch.
4876 * The instruction before the branch is a 32-bit
4877 length non-branch instruction. */
4878 for (i = span_start; i < span_end;)
4879 {
4880 unsigned int insn = bfd_getl16 (&contents[i]);
4881 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4882 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4883
4884 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4885 insn_32bit = TRUE;
4886
4887 if (insn_32bit)
4888 {
4889 /* Load the rest of the insn (in manual-friendly order). */
4890 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4891
4892 /* Encoding T4: B<c>.W. */
4893 is_b = (insn & 0xf800d000) == 0xf0009000;
4894 /* Encoding T1: BL<c>.W. */
4895 is_bl = (insn & 0xf800d000) == 0xf000d000;
4896 /* Encoding T2: BLX<c>.W. */
4897 is_blx = (insn & 0xf800d000) == 0xf000c000;
4898 /* Encoding T3: B<c>.W (not permitted in IT block). */
4899 is_bcc = (insn & 0xf800d000) == 0xf0008000
4900 && (insn & 0x07f00000) != 0x03800000;
4901 }
4902
4903 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4904
4905 if (((base_vma + i) & 0xfff) == 0xffe
4906 && insn_32bit
4907 && is_32bit_branch
4908 && last_was_32bit
4909 && ! last_was_branch)
4910 {
4911 bfd_signed_vma offset = 0;
4912 bfd_boolean force_target_arm = FALSE;
4913 bfd_boolean force_target_thumb = FALSE;
4914 bfd_vma target;
4915 enum elf32_arm_stub_type stub_type = arm_stub_none;
4916 struct a8_erratum_reloc key, *found;
4917 bfd_boolean use_plt = FALSE;
4918
4919 key.from = base_vma + i;
4920 found = (struct a8_erratum_reloc *)
4921 bsearch (&key, a8_relocs, num_a8_relocs,
4922 sizeof (struct a8_erratum_reloc),
4923 &a8_reloc_compare);
4924
4925 if (found)
4926 {
4927 char *error_message = NULL;
4928 struct elf_link_hash_entry *entry;
4929
4930 /* We don't care about the error returned from this
4931 function, only if there is glue or not. */
4932 entry = find_thumb_glue (info, found->sym_name,
4933 &error_message);
4934
4935 if (entry)
4936 found->non_a8_stub = TRUE;
4937
4938 /* Keep a simpler condition, for the sake of clarity. */
4939 if (htab->root.splt != NULL && found->hash != NULL
4940 && found->hash->root.plt.offset != (bfd_vma) -1)
4941 use_plt = TRUE;
4942
4943 if (found->r_type == R_ARM_THM_CALL)
4944 {
4945 if (found->branch_type == ST_BRANCH_TO_ARM
4946 || use_plt)
4947 force_target_arm = TRUE;
4948 else
4949 force_target_thumb = TRUE;
4950 }
4951 }
4952
4953 /* Check if we have an offending branch instruction. */
4954
4955 if (found && found->non_a8_stub)
4956 /* We've already made a stub for this instruction, e.g.
4957 it's a long branch or a Thumb->ARM stub. Assume that
4958 stub will suffice to work around the A8 erratum (see
4959 setting of always_after_branch above). */
4960 ;
4961 else if (is_bcc)
4962 {
4963 offset = (insn & 0x7ff) << 1;
4964 offset |= (insn & 0x3f0000) >> 4;
4965 offset |= (insn & 0x2000) ? 0x40000 : 0;
4966 offset |= (insn & 0x800) ? 0x80000 : 0;
4967 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4968 if (offset & 0x100000)
4969 offset |= ~ ((bfd_signed_vma) 0xfffff);
4970 stub_type = arm_stub_a8_veneer_b_cond;
4971 }
4972 else if (is_b || is_bl || is_blx)
4973 {
4974 int s = (insn & 0x4000000) != 0;
4975 int j1 = (insn & 0x2000) != 0;
4976 int j2 = (insn & 0x800) != 0;
4977 int i1 = !(j1 ^ s);
4978 int i2 = !(j2 ^ s);
4979
4980 offset = (insn & 0x7ff) << 1;
4981 offset |= (insn & 0x3ff0000) >> 4;
4982 offset |= i2 << 22;
4983 offset |= i1 << 23;
4984 offset |= s << 24;
4985 if (offset & 0x1000000)
4986 offset |= ~ ((bfd_signed_vma) 0xffffff);
4987
4988 if (is_blx)
4989 offset &= ~ ((bfd_signed_vma) 3);
4990
4991 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4992 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4993 }
4994
4995 if (stub_type != arm_stub_none)
4996 {
4997 bfd_vma pc_for_insn = base_vma + i + 4;
4998
4999 /* The original instruction is a BL, but the target is
5000 an ARM instruction. If we were not making a stub,
5001 the BL would have been converted to a BLX. Use the
5002 BLX stub instead in that case. */
5003 if (htab->use_blx && force_target_arm
5004 && stub_type == arm_stub_a8_veneer_bl)
5005 {
5006 stub_type = arm_stub_a8_veneer_blx;
5007 is_blx = TRUE;
5008 is_bl = FALSE;
5009 }
5010 /* Conversely, if the original instruction was
5011 BLX but the target is Thumb mode, use the BL
5012 stub. */
5013 else if (force_target_thumb
5014 && stub_type == arm_stub_a8_veneer_blx)
5015 {
5016 stub_type = arm_stub_a8_veneer_bl;
5017 is_blx = FALSE;
5018 is_bl = TRUE;
5019 }
5020
5021 if (is_blx)
5022 pc_for_insn &= ~ ((bfd_vma) 3);
5023
5024 /* If we found a relocation, use the proper destination,
5025 not the offset in the (unrelocated) instruction.
5026 Note this is always done if we switched the stub type
5027 above. */
5028 if (found)
5029 offset =
5030 (bfd_signed_vma) (found->destination - pc_for_insn);
5031
5032 /* If the stub will use a Thumb-mode branch to a
5033 PLT target, redirect it to the preceding Thumb
5034 entry point. */
5035 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5036 offset -= PLT_THUMB_STUB_SIZE;
5037
5038 target = pc_for_insn + offset;
5039
5040 /* The BLX stub is ARM-mode code. Adjust the offset to
5041 take the different PC value (+8 instead of +4) into
5042 account. */
5043 if (stub_type == arm_stub_a8_veneer_blx)
5044 offset += 4;
5045
5046 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5047 {
5048 char *stub_name = NULL;
5049
5050 if (num_a8_fixes == a8_fix_table_size)
5051 {
5052 a8_fix_table_size *= 2;
5053 a8_fixes = (struct a8_erratum_fix *)
5054 bfd_realloc (a8_fixes,
5055 sizeof (struct a8_erratum_fix)
5056 * a8_fix_table_size);
5057 }
5058
5059 if (num_a8_fixes < prev_num_a8_fixes)
5060 {
5061 /* If we're doing a subsequent scan,
5062 check if we've found the same fix as
5063 before, and try and reuse the stub
5064 name. */
5065 stub_name = a8_fixes[num_a8_fixes].stub_name;
5066 if ((a8_fixes[num_a8_fixes].section != section)
5067 || (a8_fixes[num_a8_fixes].offset != i))
5068 {
5069 free (stub_name);
5070 stub_name = NULL;
5071 *stub_changed_p = TRUE;
5072 }
5073 }
5074
5075 if (!stub_name)
5076 {
5077 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5078 if (stub_name != NULL)
5079 sprintf (stub_name, "%x:%x", section->id, i);
5080 }
5081
5082 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5083 a8_fixes[num_a8_fixes].section = section;
5084 a8_fixes[num_a8_fixes].offset = i;
5085 a8_fixes[num_a8_fixes].target_offset =
5086 target - base_vma;
5087 a8_fixes[num_a8_fixes].orig_insn = insn;
5088 a8_fixes[num_a8_fixes].stub_name = stub_name;
5089 a8_fixes[num_a8_fixes].stub_type = stub_type;
5090 a8_fixes[num_a8_fixes].branch_type =
5091 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5092
5093 num_a8_fixes++;
5094 }
5095 }
5096 }
5097
5098 i += insn_32bit ? 4 : 2;
5099 last_was_32bit = insn_32bit;
5100 last_was_branch = is_32bit_branch;
5101 }
5102 }
5103
5104 if (elf_section_data (section)->this_hdr.contents == NULL)
5105 free (contents);
5106 }
5107
5108 *a8_fixes_p = a8_fixes;
5109 *num_a8_fixes_p = num_a8_fixes;
5110 *a8_fix_table_size_p = a8_fix_table_size;
5111
5112 return FALSE;
5113 }
5114
5115 /* Create or update a stub entry depending on whether the stub can already be
5116 found in HTAB. The stub is identified by:
5117 - its type STUB_TYPE
5118 - its source branch (note that several can share the same stub) whose
5119 section and relocation (if any) are given by SECTION and IRELA
5120 respectively
5121 - its target symbol whose input section, hash, name, value and branch type
5122 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5123 respectively
5124
5125 If found, the value of the stub's target symbol is updated from SYM_VALUE
5126 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5127 TRUE and the stub entry is initialized.
5128
5129 Returns whether the stub could be successfully created or updated, or FALSE
5130 if an error occured. */
5131
5132 static bfd_boolean
5133 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5134 enum elf32_arm_stub_type stub_type, asection *section,
5135 Elf_Internal_Rela *irela, asection *sym_sec,
5136 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5137 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5138 bfd_boolean *new_stub)
5139 {
5140 const asection *id_sec;
5141 char *stub_name;
5142 struct elf32_arm_stub_hash_entry *stub_entry;
5143 unsigned int r_type;
5144
5145 BFD_ASSERT (stub_type != arm_stub_none);
5146 *new_stub = FALSE;
5147
5148 BFD_ASSERT (irela);
5149 BFD_ASSERT (section);
5150
5151 /* Support for grouping stub sections. */
5152 id_sec = htab->stub_group[section->id].link_sec;
5153
5154 /* Get the name of this stub. */
5155 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela, stub_type);
5156 if (!stub_name)
5157 return FALSE;
5158
5159 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5160 FALSE);
5161 /* The proper stub has already been created, just update its value. */
5162 if (stub_entry != NULL)
5163 {
5164 free (stub_name);
5165 stub_entry->target_value = sym_value;
5166 return TRUE;
5167 }
5168
5169 stub_entry = elf32_arm_add_stub (stub_name, section, htab);
5170 if (stub_entry == NULL)
5171 {
5172 free (stub_name);
5173 return FALSE;
5174 }
5175
5176 stub_entry->target_value = sym_value;
5177 stub_entry->target_section = sym_sec;
5178 stub_entry->stub_type = stub_type;
5179 stub_entry->h = hash;
5180 stub_entry->branch_type = branch_type;
5181
5182 if (sym_name == NULL)
5183 sym_name = "unnamed";
5184 stub_entry->output_name = (char *)
5185 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5186 + strlen (sym_name));
5187 if (stub_entry->output_name == NULL)
5188 {
5189 free (stub_name);
5190 return FALSE;
5191 }
5192
5193 /* For historical reasons, use the existing names for ARM-to-Thumb and
5194 Thumb-to-ARM stubs. */
5195 r_type = ELF32_R_TYPE (irela->r_info);
5196 if ((r_type == (unsigned int) R_ARM_THM_CALL
5197 || r_type == (unsigned int) R_ARM_THM_JUMP24
5198 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5199 && branch_type == ST_BRANCH_TO_ARM)
5200 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5201 else if ((r_type == (unsigned int) R_ARM_CALL
5202 || r_type == (unsigned int) R_ARM_JUMP24)
5203 && branch_type == ST_BRANCH_TO_THUMB)
5204 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5205 else
5206 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5207
5208 *new_stub = TRUE;
5209 return TRUE;
5210 }
5211
5212 /* Determine and set the size of the stub section for a final link.
5213
5214 The basic idea here is to examine all the relocations looking for
5215 PC-relative calls to a target that is unreachable with a "bl"
5216 instruction. */
5217
5218 bfd_boolean
5219 elf32_arm_size_stubs (bfd *output_bfd,
5220 bfd *stub_bfd,
5221 struct bfd_link_info *info,
5222 bfd_signed_vma group_size,
5223 asection * (*add_stub_section) (const char *, asection *,
5224 asection *,
5225 unsigned int),
5226 void (*layout_sections_again) (void))
5227 {
5228 bfd_size_type stub_group_size;
5229 bfd_boolean stubs_always_after_branch;
5230 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5231 struct a8_erratum_fix *a8_fixes = NULL;
5232 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5233 struct a8_erratum_reloc *a8_relocs = NULL;
5234 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5235
5236 if (htab == NULL)
5237 return FALSE;
5238
5239 if (htab->fix_cortex_a8)
5240 {
5241 a8_fixes = (struct a8_erratum_fix *)
5242 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5243 a8_relocs = (struct a8_erratum_reloc *)
5244 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5245 }
5246
5247 /* Propagate mach to stub bfd, because it may not have been
5248 finalized when we created stub_bfd. */
5249 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5250 bfd_get_mach (output_bfd));
5251
5252 /* Stash our params away. */
5253 htab->stub_bfd = stub_bfd;
5254 htab->add_stub_section = add_stub_section;
5255 htab->layout_sections_again = layout_sections_again;
5256 stubs_always_after_branch = group_size < 0;
5257
5258 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5259 as the first half of a 32-bit branch straddling two 4K pages. This is a
5260 crude way of enforcing that. */
5261 if (htab->fix_cortex_a8)
5262 stubs_always_after_branch = 1;
5263
5264 if (group_size < 0)
5265 stub_group_size = -group_size;
5266 else
5267 stub_group_size = group_size;
5268
5269 if (stub_group_size == 1)
5270 {
5271 /* Default values. */
5272 /* Thumb branch range is +-4MB has to be used as the default
5273 maximum size (a given section can contain both ARM and Thumb
5274 code, so the worst case has to be taken into account).
5275
5276 This value is 24K less than that, which allows for 2025
5277 12-byte stubs. If we exceed that, then we will fail to link.
5278 The user will have to relink with an explicit group size
5279 option. */
5280 stub_group_size = 4170000;
5281 }
5282
5283 group_sections (htab, stub_group_size, stubs_always_after_branch);
5284
5285 /* If we're applying the cortex A8 fix, we need to determine the
5286 program header size now, because we cannot change it later --
5287 that could alter section placements. Notice the A8 erratum fix
5288 ends up requiring the section addresses to remain unchanged
5289 modulo the page size. That's something we cannot represent
5290 inside BFD, and we don't want to force the section alignment to
5291 be the page size. */
5292 if (htab->fix_cortex_a8)
5293 (*htab->layout_sections_again) ();
5294
5295 while (1)
5296 {
5297 bfd *input_bfd;
5298 unsigned int bfd_indx;
5299 asection *stub_sec;
5300 bfd_boolean stub_changed = FALSE;
5301 unsigned prev_num_a8_fixes = num_a8_fixes;
5302
5303 num_a8_fixes = 0;
5304 for (input_bfd = info->input_bfds, bfd_indx = 0;
5305 input_bfd != NULL;
5306 input_bfd = input_bfd->link.next, bfd_indx++)
5307 {
5308 Elf_Internal_Shdr *symtab_hdr;
5309 asection *section;
5310 Elf_Internal_Sym *local_syms = NULL;
5311
5312 if (!is_arm_elf (input_bfd))
5313 continue;
5314
5315 num_a8_relocs = 0;
5316
5317 /* We'll need the symbol table in a second. */
5318 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5319 if (symtab_hdr->sh_info == 0)
5320 continue;
5321
5322 /* Walk over each section attached to the input bfd. */
5323 for (section = input_bfd->sections;
5324 section != NULL;
5325 section = section->next)
5326 {
5327 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5328
5329 /* If there aren't any relocs, then there's nothing more
5330 to do. */
5331 if ((section->flags & SEC_RELOC) == 0
5332 || section->reloc_count == 0
5333 || (section->flags & SEC_CODE) == 0)
5334 continue;
5335
5336 /* If this section is a link-once section that will be
5337 discarded, then don't create any stubs. */
5338 if (section->output_section == NULL
5339 || section->output_section->owner != output_bfd)
5340 continue;
5341
5342 /* Get the relocs. */
5343 internal_relocs
5344 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5345 NULL, info->keep_memory);
5346 if (internal_relocs == NULL)
5347 goto error_ret_free_local;
5348
5349 /* Now examine each relocation. */
5350 irela = internal_relocs;
5351 irelaend = irela + section->reloc_count;
5352 for (; irela < irelaend; irela++)
5353 {
5354 unsigned int r_type, r_indx;
5355 enum elf32_arm_stub_type stub_type;
5356 asection *sym_sec;
5357 bfd_vma sym_value;
5358 bfd_vma destination;
5359 struct elf32_arm_link_hash_entry *hash;
5360 const char *sym_name;
5361 unsigned char st_type;
5362 enum arm_st_branch_type branch_type;
5363 bfd_boolean created_stub = FALSE;
5364
5365 r_type = ELF32_R_TYPE (irela->r_info);
5366 r_indx = ELF32_R_SYM (irela->r_info);
5367
5368 if (r_type >= (unsigned int) R_ARM_max)
5369 {
5370 bfd_set_error (bfd_error_bad_value);
5371 error_ret_free_internal:
5372 if (elf_section_data (section)->relocs == NULL)
5373 free (internal_relocs);
5374 /* Fall through. */
5375 error_ret_free_local:
5376 if (local_syms != NULL
5377 && (symtab_hdr->contents
5378 != (unsigned char *) local_syms))
5379 free (local_syms);
5380 return FALSE;
5381 }
5382
5383 hash = NULL;
5384 if (r_indx >= symtab_hdr->sh_info)
5385 hash = elf32_arm_hash_entry
5386 (elf_sym_hashes (input_bfd)
5387 [r_indx - symtab_hdr->sh_info]);
5388
5389 /* Only look for stubs on branch instructions, or
5390 non-relaxed TLSCALL */
5391 if ((r_type != (unsigned int) R_ARM_CALL)
5392 && (r_type != (unsigned int) R_ARM_THM_CALL)
5393 && (r_type != (unsigned int) R_ARM_JUMP24)
5394 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5395 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5396 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5397 && (r_type != (unsigned int) R_ARM_PLT32)
5398 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5399 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5400 && r_type == elf32_arm_tls_transition
5401 (info, r_type, &hash->root)
5402 && ((hash ? hash->tls_type
5403 : (elf32_arm_local_got_tls_type
5404 (input_bfd)[r_indx]))
5405 & GOT_TLS_GDESC) != 0))
5406 continue;
5407
5408 /* Now determine the call target, its name, value,
5409 section. */
5410 sym_sec = NULL;
5411 sym_value = 0;
5412 destination = 0;
5413 sym_name = NULL;
5414
5415 if (r_type == (unsigned int) R_ARM_TLS_CALL
5416 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5417 {
5418 /* A non-relaxed TLS call. The target is the
5419 plt-resident trampoline and nothing to do
5420 with the symbol. */
5421 BFD_ASSERT (htab->tls_trampoline > 0);
5422 sym_sec = htab->root.splt;
5423 sym_value = htab->tls_trampoline;
5424 hash = 0;
5425 st_type = STT_FUNC;
5426 branch_type = ST_BRANCH_TO_ARM;
5427 }
5428 else if (!hash)
5429 {
5430 /* It's a local symbol. */
5431 Elf_Internal_Sym *sym;
5432
5433 if (local_syms == NULL)
5434 {
5435 local_syms
5436 = (Elf_Internal_Sym *) symtab_hdr->contents;
5437 if (local_syms == NULL)
5438 local_syms
5439 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5440 symtab_hdr->sh_info, 0,
5441 NULL, NULL, NULL);
5442 if (local_syms == NULL)
5443 goto error_ret_free_internal;
5444 }
5445
5446 sym = local_syms + r_indx;
5447 if (sym->st_shndx == SHN_UNDEF)
5448 sym_sec = bfd_und_section_ptr;
5449 else if (sym->st_shndx == SHN_ABS)
5450 sym_sec = bfd_abs_section_ptr;
5451 else if (sym->st_shndx == SHN_COMMON)
5452 sym_sec = bfd_com_section_ptr;
5453 else
5454 sym_sec =
5455 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5456
5457 if (!sym_sec)
5458 /* This is an undefined symbol. It can never
5459 be resolved. */
5460 continue;
5461
5462 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5463 sym_value = sym->st_value;
5464 destination = (sym_value + irela->r_addend
5465 + sym_sec->output_offset
5466 + sym_sec->output_section->vma);
5467 st_type = ELF_ST_TYPE (sym->st_info);
5468 branch_type =
5469 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5470 sym_name
5471 = bfd_elf_string_from_elf_section (input_bfd,
5472 symtab_hdr->sh_link,
5473 sym->st_name);
5474 }
5475 else
5476 {
5477 /* It's an external symbol. */
5478 while (hash->root.root.type == bfd_link_hash_indirect
5479 || hash->root.root.type == bfd_link_hash_warning)
5480 hash = ((struct elf32_arm_link_hash_entry *)
5481 hash->root.root.u.i.link);
5482
5483 if (hash->root.root.type == bfd_link_hash_defined
5484 || hash->root.root.type == bfd_link_hash_defweak)
5485 {
5486 sym_sec = hash->root.root.u.def.section;
5487 sym_value = hash->root.root.u.def.value;
5488
5489 struct elf32_arm_link_hash_table *globals =
5490 elf32_arm_hash_table (info);
5491
5492 /* For a destination in a shared library,
5493 use the PLT stub as target address to
5494 decide whether a branch stub is
5495 needed. */
5496 if (globals != NULL
5497 && globals->root.splt != NULL
5498 && hash != NULL
5499 && hash->root.plt.offset != (bfd_vma) -1)
5500 {
5501 sym_sec = globals->root.splt;
5502 sym_value = hash->root.plt.offset;
5503 if (sym_sec->output_section != NULL)
5504 destination = (sym_value
5505 + sym_sec->output_offset
5506 + sym_sec->output_section->vma);
5507 }
5508 else if (sym_sec->output_section != NULL)
5509 destination = (sym_value + irela->r_addend
5510 + sym_sec->output_offset
5511 + sym_sec->output_section->vma);
5512 }
5513 else if ((hash->root.root.type == bfd_link_hash_undefined)
5514 || (hash->root.root.type == bfd_link_hash_undefweak))
5515 {
5516 /* For a shared library, use the PLT stub as
5517 target address to decide whether a long
5518 branch stub is needed.
5519 For absolute code, they cannot be handled. */
5520 struct elf32_arm_link_hash_table *globals =
5521 elf32_arm_hash_table (info);
5522
5523 if (globals != NULL
5524 && globals->root.splt != NULL
5525 && hash != NULL
5526 && hash->root.plt.offset != (bfd_vma) -1)
5527 {
5528 sym_sec = globals->root.splt;
5529 sym_value = hash->root.plt.offset;
5530 if (sym_sec->output_section != NULL)
5531 destination = (sym_value
5532 + sym_sec->output_offset
5533 + sym_sec->output_section->vma);
5534 }
5535 else
5536 continue;
5537 }
5538 else
5539 {
5540 bfd_set_error (bfd_error_bad_value);
5541 goto error_ret_free_internal;
5542 }
5543 st_type = hash->root.type;
5544 branch_type =
5545 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5546 sym_name = hash->root.root.root.string;
5547 }
5548
5549 do
5550 {
5551 bfd_boolean new_stub;
5552
5553 /* Determine what (if any) linker stub is needed. */
5554 stub_type = arm_type_of_stub (info, section, irela,
5555 st_type, &branch_type,
5556 hash, destination, sym_sec,
5557 input_bfd, sym_name);
5558 if (stub_type == arm_stub_none)
5559 break;
5560
5561 /* We've either created a stub for this reloc already,
5562 or we are about to. */
5563 created_stub =
5564 elf32_arm_create_stub (htab, stub_type, section, irela,
5565 sym_sec, hash,
5566 (char *) sym_name, sym_value,
5567 branch_type, &new_stub);
5568
5569 if (!created_stub)
5570 goto error_ret_free_internal;
5571 else if (!new_stub)
5572 break;
5573 else
5574 stub_changed = TRUE;
5575 }
5576 while (0);
5577
5578 /* Look for relocations which might trigger Cortex-A8
5579 erratum. */
5580 if (htab->fix_cortex_a8
5581 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5582 || r_type == (unsigned int) R_ARM_THM_JUMP19
5583 || r_type == (unsigned int) R_ARM_THM_CALL
5584 || r_type == (unsigned int) R_ARM_THM_XPC22))
5585 {
5586 bfd_vma from = section->output_section->vma
5587 + section->output_offset
5588 + irela->r_offset;
5589
5590 if ((from & 0xfff) == 0xffe)
5591 {
5592 /* Found a candidate. Note we haven't checked the
5593 destination is within 4K here: if we do so (and
5594 don't create an entry in a8_relocs) we can't tell
5595 that a branch should have been relocated when
5596 scanning later. */
5597 if (num_a8_relocs == a8_reloc_table_size)
5598 {
5599 a8_reloc_table_size *= 2;
5600 a8_relocs = (struct a8_erratum_reloc *)
5601 bfd_realloc (a8_relocs,
5602 sizeof (struct a8_erratum_reloc)
5603 * a8_reloc_table_size);
5604 }
5605
5606 a8_relocs[num_a8_relocs].from = from;
5607 a8_relocs[num_a8_relocs].destination = destination;
5608 a8_relocs[num_a8_relocs].r_type = r_type;
5609 a8_relocs[num_a8_relocs].branch_type = branch_type;
5610 a8_relocs[num_a8_relocs].sym_name = sym_name;
5611 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5612 a8_relocs[num_a8_relocs].hash = hash;
5613
5614 num_a8_relocs++;
5615 }
5616 }
5617 }
5618
5619 /* We're done with the internal relocs, free them. */
5620 if (elf_section_data (section)->relocs == NULL)
5621 free (internal_relocs);
5622 }
5623
5624 if (htab->fix_cortex_a8)
5625 {
5626 /* Sort relocs which might apply to Cortex-A8 erratum. */
5627 qsort (a8_relocs, num_a8_relocs,
5628 sizeof (struct a8_erratum_reloc),
5629 &a8_reloc_compare);
5630
5631 /* Scan for branches which might trigger Cortex-A8 erratum. */
5632 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5633 &num_a8_fixes, &a8_fix_table_size,
5634 a8_relocs, num_a8_relocs,
5635 prev_num_a8_fixes, &stub_changed)
5636 != 0)
5637 goto error_ret_free_local;
5638 }
5639 }
5640
5641 if (prev_num_a8_fixes != num_a8_fixes)
5642 stub_changed = TRUE;
5643
5644 if (!stub_changed)
5645 break;
5646
5647 /* OK, we've added some stubs. Find out the new size of the
5648 stub sections. */
5649 for (stub_sec = htab->stub_bfd->sections;
5650 stub_sec != NULL;
5651 stub_sec = stub_sec->next)
5652 {
5653 /* Ignore non-stub sections. */
5654 if (!strstr (stub_sec->name, STUB_SUFFIX))
5655 continue;
5656
5657 stub_sec->size = 0;
5658 }
5659
5660 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5661
5662 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5663 if (htab->fix_cortex_a8)
5664 for (i = 0; i < num_a8_fixes; i++)
5665 {
5666 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5667 a8_fixes[i].section, htab);
5668
5669 if (stub_sec == NULL)
5670 goto error_ret_free_local;
5671
5672 stub_sec->size
5673 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5674 NULL);
5675 }
5676
5677
5678 /* Ask the linker to do its stuff. */
5679 (*htab->layout_sections_again) ();
5680 }
5681
5682 /* Add stubs for Cortex-A8 erratum fixes now. */
5683 if (htab->fix_cortex_a8)
5684 {
5685 for (i = 0; i < num_a8_fixes; i++)
5686 {
5687 struct elf32_arm_stub_hash_entry *stub_entry;
5688 char *stub_name = a8_fixes[i].stub_name;
5689 asection *section = a8_fixes[i].section;
5690 unsigned int section_id = a8_fixes[i].section->id;
5691 asection *link_sec = htab->stub_group[section_id].link_sec;
5692 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5693 const insn_sequence *template_sequence;
5694 int template_size, size = 0;
5695
5696 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5697 TRUE, FALSE);
5698 if (stub_entry == NULL)
5699 {
5700 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5701 section->owner,
5702 stub_name);
5703 return FALSE;
5704 }
5705
5706 stub_entry->stub_sec = stub_sec;
5707 stub_entry->stub_offset = 0;
5708 stub_entry->id_sec = link_sec;
5709 stub_entry->stub_type = a8_fixes[i].stub_type;
5710 stub_entry->source_value = a8_fixes[i].offset;
5711 stub_entry->target_section = a8_fixes[i].section;
5712 stub_entry->target_value = a8_fixes[i].target_offset;
5713 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5714 stub_entry->branch_type = a8_fixes[i].branch_type;
5715
5716 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5717 &template_sequence,
5718 &template_size);
5719
5720 stub_entry->stub_size = size;
5721 stub_entry->stub_template = template_sequence;
5722 stub_entry->stub_template_size = template_size;
5723 }
5724
5725 /* Stash the Cortex-A8 erratum fix array for use later in
5726 elf32_arm_write_section(). */
5727 htab->a8_erratum_fixes = a8_fixes;
5728 htab->num_a8_erratum_fixes = num_a8_fixes;
5729 }
5730 else
5731 {
5732 htab->a8_erratum_fixes = NULL;
5733 htab->num_a8_erratum_fixes = 0;
5734 }
5735 return TRUE;
5736 }
5737
5738 /* Build all the stubs associated with the current output file. The
5739 stubs are kept in a hash table attached to the main linker hash
5740 table. We also set up the .plt entries for statically linked PIC
5741 functions here. This function is called via arm_elf_finish in the
5742 linker. */
5743
5744 bfd_boolean
5745 elf32_arm_build_stubs (struct bfd_link_info *info)
5746 {
5747 asection *stub_sec;
5748 struct bfd_hash_table *table;
5749 struct elf32_arm_link_hash_table *htab;
5750
5751 htab = elf32_arm_hash_table (info);
5752 if (htab == NULL)
5753 return FALSE;
5754
5755 for (stub_sec = htab->stub_bfd->sections;
5756 stub_sec != NULL;
5757 stub_sec = stub_sec->next)
5758 {
5759 bfd_size_type size;
5760
5761 /* Ignore non-stub sections. */
5762 if (!strstr (stub_sec->name, STUB_SUFFIX))
5763 continue;
5764
5765 /* Allocate memory to hold the linker stubs. */
5766 size = stub_sec->size;
5767 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5768 if (stub_sec->contents == NULL && size != 0)
5769 return FALSE;
5770 stub_sec->size = 0;
5771 }
5772
5773 /* Build the stubs as directed by the stub hash table. */
5774 table = &htab->stub_hash_table;
5775 bfd_hash_traverse (table, arm_build_one_stub, info);
5776 if (htab->fix_cortex_a8)
5777 {
5778 /* Place the cortex a8 stubs last. */
5779 htab->fix_cortex_a8 = -1;
5780 bfd_hash_traverse (table, arm_build_one_stub, info);
5781 }
5782
5783 return TRUE;
5784 }
5785
5786 /* Locate the Thumb encoded calling stub for NAME. */
5787
5788 static struct elf_link_hash_entry *
5789 find_thumb_glue (struct bfd_link_info *link_info,
5790 const char *name,
5791 char **error_message)
5792 {
5793 char *tmp_name;
5794 struct elf_link_hash_entry *hash;
5795 struct elf32_arm_link_hash_table *hash_table;
5796
5797 /* We need a pointer to the armelf specific hash table. */
5798 hash_table = elf32_arm_hash_table (link_info);
5799 if (hash_table == NULL)
5800 return NULL;
5801
5802 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5803 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5804
5805 BFD_ASSERT (tmp_name);
5806
5807 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5808
5809 hash = elf_link_hash_lookup
5810 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5811
5812 if (hash == NULL
5813 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5814 tmp_name, name) == -1)
5815 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5816
5817 free (tmp_name);
5818
5819 return hash;
5820 }
5821
5822 /* Locate the ARM encoded calling stub for NAME. */
5823
5824 static struct elf_link_hash_entry *
5825 find_arm_glue (struct bfd_link_info *link_info,
5826 const char *name,
5827 char **error_message)
5828 {
5829 char *tmp_name;
5830 struct elf_link_hash_entry *myh;
5831 struct elf32_arm_link_hash_table *hash_table;
5832
5833 /* We need a pointer to the elfarm specific hash table. */
5834 hash_table = elf32_arm_hash_table (link_info);
5835 if (hash_table == NULL)
5836 return NULL;
5837
5838 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5839 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5840
5841 BFD_ASSERT (tmp_name);
5842
5843 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5844
5845 myh = elf_link_hash_lookup
5846 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5847
5848 if (myh == NULL
5849 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5850 tmp_name, name) == -1)
5851 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5852
5853 free (tmp_name);
5854
5855 return myh;
5856 }
5857
5858 /* ARM->Thumb glue (static images):
5859
5860 .arm
5861 __func_from_arm:
5862 ldr r12, __func_addr
5863 bx r12
5864 __func_addr:
5865 .word func @ behave as if you saw a ARM_32 reloc.
5866
5867 (v5t static images)
5868 .arm
5869 __func_from_arm:
5870 ldr pc, __func_addr
5871 __func_addr:
5872 .word func @ behave as if you saw a ARM_32 reloc.
5873
5874 (relocatable images)
5875 .arm
5876 __func_from_arm:
5877 ldr r12, __func_offset
5878 add r12, r12, pc
5879 bx r12
5880 __func_offset:
5881 .word func - . */
5882
5883 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5884 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5885 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5886 static const insn32 a2t3_func_addr_insn = 0x00000001;
5887
5888 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5889 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5890 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5891
5892 #define ARM2THUMB_PIC_GLUE_SIZE 16
5893 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5894 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5895 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5896
5897 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5898
5899 .thumb .thumb
5900 .align 2 .align 2
5901 __func_from_thumb: __func_from_thumb:
5902 bx pc push {r6, lr}
5903 nop ldr r6, __func_addr
5904 .arm mov lr, pc
5905 b func bx r6
5906 .arm
5907 ;; back_to_thumb
5908 ldmia r13! {r6, lr}
5909 bx lr
5910 __func_addr:
5911 .word func */
5912
5913 #define THUMB2ARM_GLUE_SIZE 8
5914 static const insn16 t2a1_bx_pc_insn = 0x4778;
5915 static const insn16 t2a2_noop_insn = 0x46c0;
5916 static const insn32 t2a3_b_insn = 0xea000000;
5917
5918 #define VFP11_ERRATUM_VENEER_SIZE 8
5919 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
5920 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
5921
5922 #define ARM_BX_VENEER_SIZE 12
5923 static const insn32 armbx1_tst_insn = 0xe3100001;
5924 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5925 static const insn32 armbx3_bx_insn = 0xe12fff10;
5926
5927 #ifndef ELFARM_NABI_C_INCLUDED
5928 static void
5929 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5930 {
5931 asection * s;
5932 bfd_byte * contents;
5933
5934 if (size == 0)
5935 {
5936 /* Do not include empty glue sections in the output. */
5937 if (abfd != NULL)
5938 {
5939 s = bfd_get_linker_section (abfd, name);
5940 if (s != NULL)
5941 s->flags |= SEC_EXCLUDE;
5942 }
5943 return;
5944 }
5945
5946 BFD_ASSERT (abfd != NULL);
5947
5948 s = bfd_get_linker_section (abfd, name);
5949 BFD_ASSERT (s != NULL);
5950
5951 contents = (bfd_byte *) bfd_alloc (abfd, size);
5952
5953 BFD_ASSERT (s->size == size);
5954 s->contents = contents;
5955 }
5956
5957 bfd_boolean
5958 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5959 {
5960 struct elf32_arm_link_hash_table * globals;
5961
5962 globals = elf32_arm_hash_table (info);
5963 BFD_ASSERT (globals != NULL);
5964
5965 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5966 globals->arm_glue_size,
5967 ARM2THUMB_GLUE_SECTION_NAME);
5968
5969 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5970 globals->thumb_glue_size,
5971 THUMB2ARM_GLUE_SECTION_NAME);
5972
5973 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5974 globals->vfp11_erratum_glue_size,
5975 VFP11_ERRATUM_VENEER_SECTION_NAME);
5976
5977 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5978 globals->stm32l4xx_erratum_glue_size,
5979 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
5980
5981 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5982 globals->bx_glue_size,
5983 ARM_BX_GLUE_SECTION_NAME);
5984
5985 return TRUE;
5986 }
5987
5988 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5989 returns the symbol identifying the stub. */
5990
5991 static struct elf_link_hash_entry *
5992 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5993 struct elf_link_hash_entry * h)
5994 {
5995 const char * name = h->root.root.string;
5996 asection * s;
5997 char * tmp_name;
5998 struct elf_link_hash_entry * myh;
5999 struct bfd_link_hash_entry * bh;
6000 struct elf32_arm_link_hash_table * globals;
6001 bfd_vma val;
6002 bfd_size_type size;
6003
6004 globals = elf32_arm_hash_table (link_info);
6005 BFD_ASSERT (globals != NULL);
6006 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6007
6008 s = bfd_get_linker_section
6009 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6010
6011 BFD_ASSERT (s != NULL);
6012
6013 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6014 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6015
6016 BFD_ASSERT (tmp_name);
6017
6018 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6019
6020 myh = elf_link_hash_lookup
6021 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6022
6023 if (myh != NULL)
6024 {
6025 /* We've already seen this guy. */
6026 free (tmp_name);
6027 return myh;
6028 }
6029
6030 /* The only trick here is using hash_table->arm_glue_size as the value.
6031 Even though the section isn't allocated yet, this is where we will be
6032 putting it. The +1 on the value marks that the stub has not been
6033 output yet - not that it is a Thumb function. */
6034 bh = NULL;
6035 val = globals->arm_glue_size + 1;
6036 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6037 tmp_name, BSF_GLOBAL, s, val,
6038 NULL, TRUE, FALSE, &bh);
6039
6040 myh = (struct elf_link_hash_entry *) bh;
6041 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6042 myh->forced_local = 1;
6043
6044 free (tmp_name);
6045
6046 if (bfd_link_pic (link_info)
6047 || globals->root.is_relocatable_executable
6048 || globals->pic_veneer)
6049 size = ARM2THUMB_PIC_GLUE_SIZE;
6050 else if (globals->use_blx)
6051 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6052 else
6053 size = ARM2THUMB_STATIC_GLUE_SIZE;
6054
6055 s->size += size;
6056 globals->arm_glue_size += size;
6057
6058 return myh;
6059 }
6060
6061 /* Allocate space for ARMv4 BX veneers. */
6062
6063 static void
6064 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6065 {
6066 asection * s;
6067 struct elf32_arm_link_hash_table *globals;
6068 char *tmp_name;
6069 struct elf_link_hash_entry *myh;
6070 struct bfd_link_hash_entry *bh;
6071 bfd_vma val;
6072
6073 /* BX PC does not need a veneer. */
6074 if (reg == 15)
6075 return;
6076
6077 globals = elf32_arm_hash_table (link_info);
6078 BFD_ASSERT (globals != NULL);
6079 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6080
6081 /* Check if this veneer has already been allocated. */
6082 if (globals->bx_glue_offset[reg])
6083 return;
6084
6085 s = bfd_get_linker_section
6086 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6087
6088 BFD_ASSERT (s != NULL);
6089
6090 /* Add symbol for veneer. */
6091 tmp_name = (char *)
6092 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6093
6094 BFD_ASSERT (tmp_name);
6095
6096 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6097
6098 myh = elf_link_hash_lookup
6099 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6100
6101 BFD_ASSERT (myh == NULL);
6102
6103 bh = NULL;
6104 val = globals->bx_glue_size;
6105 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6106 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6107 NULL, TRUE, FALSE, &bh);
6108
6109 myh = (struct elf_link_hash_entry *) bh;
6110 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6111 myh->forced_local = 1;
6112
6113 s->size += ARM_BX_VENEER_SIZE;
6114 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6115 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6116 }
6117
6118
6119 /* Add an entry to the code/data map for section SEC. */
6120
6121 static void
6122 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6123 {
6124 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6125 unsigned int newidx;
6126
6127 if (sec_data->map == NULL)
6128 {
6129 sec_data->map = (elf32_arm_section_map *)
6130 bfd_malloc (sizeof (elf32_arm_section_map));
6131 sec_data->mapcount = 0;
6132 sec_data->mapsize = 1;
6133 }
6134
6135 newidx = sec_data->mapcount++;
6136
6137 if (sec_data->mapcount > sec_data->mapsize)
6138 {
6139 sec_data->mapsize *= 2;
6140 sec_data->map = (elf32_arm_section_map *)
6141 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6142 * sizeof (elf32_arm_section_map));
6143 }
6144
6145 if (sec_data->map)
6146 {
6147 sec_data->map[newidx].vma = vma;
6148 sec_data->map[newidx].type = type;
6149 }
6150 }
6151
6152
6153 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6154 veneers are handled for now. */
6155
6156 static bfd_vma
6157 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6158 elf32_vfp11_erratum_list *branch,
6159 bfd *branch_bfd,
6160 asection *branch_sec,
6161 unsigned int offset)
6162 {
6163 asection *s;
6164 struct elf32_arm_link_hash_table *hash_table;
6165 char *tmp_name;
6166 struct elf_link_hash_entry *myh;
6167 struct bfd_link_hash_entry *bh;
6168 bfd_vma val;
6169 struct _arm_elf_section_data *sec_data;
6170 elf32_vfp11_erratum_list *newerr;
6171
6172 hash_table = elf32_arm_hash_table (link_info);
6173 BFD_ASSERT (hash_table != NULL);
6174 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6175
6176 s = bfd_get_linker_section
6177 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6178
6179 sec_data = elf32_arm_section_data (s);
6180
6181 BFD_ASSERT (s != NULL);
6182
6183 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6184 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6185
6186 BFD_ASSERT (tmp_name);
6187
6188 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6189 hash_table->num_vfp11_fixes);
6190
6191 myh = elf_link_hash_lookup
6192 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6193
6194 BFD_ASSERT (myh == NULL);
6195
6196 bh = NULL;
6197 val = hash_table->vfp11_erratum_glue_size;
6198 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6199 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6200 NULL, TRUE, FALSE, &bh);
6201
6202 myh = (struct elf_link_hash_entry *) bh;
6203 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6204 myh->forced_local = 1;
6205
6206 /* Link veneer back to calling location. */
6207 sec_data->erratumcount += 1;
6208 newerr = (elf32_vfp11_erratum_list *)
6209 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6210
6211 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6212 newerr->vma = -1;
6213 newerr->u.v.branch = branch;
6214 newerr->u.v.id = hash_table->num_vfp11_fixes;
6215 branch->u.b.veneer = newerr;
6216
6217 newerr->next = sec_data->erratumlist;
6218 sec_data->erratumlist = newerr;
6219
6220 /* A symbol for the return from the veneer. */
6221 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6222 hash_table->num_vfp11_fixes);
6223
6224 myh = elf_link_hash_lookup
6225 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6226
6227 if (myh != NULL)
6228 abort ();
6229
6230 bh = NULL;
6231 val = offset + 4;
6232 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6233 branch_sec, val, NULL, TRUE, FALSE, &bh);
6234
6235 myh = (struct elf_link_hash_entry *) bh;
6236 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6237 myh->forced_local = 1;
6238
6239 free (tmp_name);
6240
6241 /* Generate a mapping symbol for the veneer section, and explicitly add an
6242 entry for that symbol to the code/data map for the section. */
6243 if (hash_table->vfp11_erratum_glue_size == 0)
6244 {
6245 bh = NULL;
6246 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6247 ever requires this erratum fix. */
6248 _bfd_generic_link_add_one_symbol (link_info,
6249 hash_table->bfd_of_glue_owner, "$a",
6250 BSF_LOCAL, s, 0, NULL,
6251 TRUE, FALSE, &bh);
6252
6253 myh = (struct elf_link_hash_entry *) bh;
6254 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6255 myh->forced_local = 1;
6256
6257 /* The elf32_arm_init_maps function only cares about symbols from input
6258 BFDs. We must make a note of this generated mapping symbol
6259 ourselves so that code byteswapping works properly in
6260 elf32_arm_write_section. */
6261 elf32_arm_section_map_add (s, 'a', 0);
6262 }
6263
6264 s->size += VFP11_ERRATUM_VENEER_SIZE;
6265 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6266 hash_table->num_vfp11_fixes++;
6267
6268 /* The offset of the veneer. */
6269 return val;
6270 }
6271
6272 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6273 veneers need to be handled because used only in Cortex-M. */
6274
6275 static bfd_vma
6276 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6277 elf32_stm32l4xx_erratum_list *branch,
6278 bfd *branch_bfd,
6279 asection *branch_sec,
6280 unsigned int offset,
6281 bfd_size_type veneer_size)
6282 {
6283 asection *s;
6284 struct elf32_arm_link_hash_table *hash_table;
6285 char *tmp_name;
6286 struct elf_link_hash_entry *myh;
6287 struct bfd_link_hash_entry *bh;
6288 bfd_vma val;
6289 struct _arm_elf_section_data *sec_data;
6290 elf32_stm32l4xx_erratum_list *newerr;
6291
6292 hash_table = elf32_arm_hash_table (link_info);
6293 BFD_ASSERT (hash_table != NULL);
6294 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6295
6296 s = bfd_get_linker_section
6297 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6298
6299 BFD_ASSERT (s != NULL);
6300
6301 sec_data = elf32_arm_section_data (s);
6302
6303 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6304 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6305
6306 BFD_ASSERT (tmp_name);
6307
6308 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6309 hash_table->num_stm32l4xx_fixes);
6310
6311 myh = elf_link_hash_lookup
6312 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6313
6314 BFD_ASSERT (myh == NULL);
6315
6316 bh = NULL;
6317 val = hash_table->stm32l4xx_erratum_glue_size;
6318 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6319 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6320 NULL, TRUE, FALSE, &bh);
6321
6322 myh = (struct elf_link_hash_entry *) bh;
6323 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6324 myh->forced_local = 1;
6325
6326 /* Link veneer back to calling location. */
6327 sec_data->stm32l4xx_erratumcount += 1;
6328 newerr = (elf32_stm32l4xx_erratum_list *)
6329 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6330
6331 newerr->type = STM32L4XX_ERRATUM_VENEER;
6332 newerr->vma = -1;
6333 newerr->u.v.branch = branch;
6334 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6335 branch->u.b.veneer = newerr;
6336
6337 newerr->next = sec_data->stm32l4xx_erratumlist;
6338 sec_data->stm32l4xx_erratumlist = newerr;
6339
6340 /* A symbol for the return from the veneer. */
6341 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6342 hash_table->num_stm32l4xx_fixes);
6343
6344 myh = elf_link_hash_lookup
6345 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6346
6347 if (myh != NULL)
6348 abort ();
6349
6350 bh = NULL;
6351 val = offset + 4;
6352 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6353 branch_sec, val, NULL, TRUE, FALSE, &bh);
6354
6355 myh = (struct elf_link_hash_entry *) bh;
6356 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6357 myh->forced_local = 1;
6358
6359 free (tmp_name);
6360
6361 /* Generate a mapping symbol for the veneer section, and explicitly add an
6362 entry for that symbol to the code/data map for the section. */
6363 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6364 {
6365 bh = NULL;
6366 /* Creates a THUMB symbol since there is no other choice. */
6367 _bfd_generic_link_add_one_symbol (link_info,
6368 hash_table->bfd_of_glue_owner, "$t",
6369 BSF_LOCAL, s, 0, NULL,
6370 TRUE, FALSE, &bh);
6371
6372 myh = (struct elf_link_hash_entry *) bh;
6373 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6374 myh->forced_local = 1;
6375
6376 /* The elf32_arm_init_maps function only cares about symbols from input
6377 BFDs. We must make a note of this generated mapping symbol
6378 ourselves so that code byteswapping works properly in
6379 elf32_arm_write_section. */
6380 elf32_arm_section_map_add (s, 't', 0);
6381 }
6382
6383 s->size += veneer_size;
6384 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6385 hash_table->num_stm32l4xx_fixes++;
6386
6387 /* The offset of the veneer. */
6388 return val;
6389 }
6390
6391 #define ARM_GLUE_SECTION_FLAGS \
6392 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6393 | SEC_READONLY | SEC_LINKER_CREATED)
6394
6395 /* Create a fake section for use by the ARM backend of the linker. */
6396
6397 static bfd_boolean
6398 arm_make_glue_section (bfd * abfd, const char * name)
6399 {
6400 asection * sec;
6401
6402 sec = bfd_get_linker_section (abfd, name);
6403 if (sec != NULL)
6404 /* Already made. */
6405 return TRUE;
6406
6407 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6408
6409 if (sec == NULL
6410 || !bfd_set_section_alignment (abfd, sec, 2))
6411 return FALSE;
6412
6413 /* Set the gc mark to prevent the section from being removed by garbage
6414 collection, despite the fact that no relocs refer to this section. */
6415 sec->gc_mark = 1;
6416
6417 return TRUE;
6418 }
6419
6420 /* Set size of .plt entries. This function is called from the
6421 linker scripts in ld/emultempl/{armelf}.em. */
6422
6423 void
6424 bfd_elf32_arm_use_long_plt (void)
6425 {
6426 elf32_arm_use_long_plt_entry = TRUE;
6427 }
6428
6429 /* Add the glue sections to ABFD. This function is called from the
6430 linker scripts in ld/emultempl/{armelf}.em. */
6431
6432 bfd_boolean
6433 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6434 struct bfd_link_info *info)
6435 {
6436 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6437 bfd_boolean dostm32l4xx = globals
6438 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6439 bfd_boolean addglue;
6440
6441 /* If we are only performing a partial
6442 link do not bother adding the glue. */
6443 if (bfd_link_relocatable (info))
6444 return TRUE;
6445
6446 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6447 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6448 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6449 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6450
6451 if (!dostm32l4xx)
6452 return addglue;
6453
6454 return addglue
6455 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6456 }
6457
6458 /* Select a BFD to be used to hold the sections used by the glue code.
6459 This function is called from the linker scripts in ld/emultempl/
6460 {armelf/pe}.em. */
6461
6462 bfd_boolean
6463 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6464 {
6465 struct elf32_arm_link_hash_table *globals;
6466
6467 /* If we are only performing a partial link
6468 do not bother getting a bfd to hold the glue. */
6469 if (bfd_link_relocatable (info))
6470 return TRUE;
6471
6472 /* Make sure we don't attach the glue sections to a dynamic object. */
6473 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6474
6475 globals = elf32_arm_hash_table (info);
6476 BFD_ASSERT (globals != NULL);
6477
6478 if (globals->bfd_of_glue_owner != NULL)
6479 return TRUE;
6480
6481 /* Save the bfd for later use. */
6482 globals->bfd_of_glue_owner = abfd;
6483
6484 return TRUE;
6485 }
6486
6487 static void
6488 check_use_blx (struct elf32_arm_link_hash_table *globals)
6489 {
6490 int cpu_arch;
6491
6492 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6493 Tag_CPU_arch);
6494
6495 if (globals->fix_arm1176)
6496 {
6497 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6498 globals->use_blx = 1;
6499 }
6500 else
6501 {
6502 if (cpu_arch > TAG_CPU_ARCH_V4T)
6503 globals->use_blx = 1;
6504 }
6505 }
6506
6507 bfd_boolean
6508 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6509 struct bfd_link_info *link_info)
6510 {
6511 Elf_Internal_Shdr *symtab_hdr;
6512 Elf_Internal_Rela *internal_relocs = NULL;
6513 Elf_Internal_Rela *irel, *irelend;
6514 bfd_byte *contents = NULL;
6515
6516 asection *sec;
6517 struct elf32_arm_link_hash_table *globals;
6518
6519 /* If we are only performing a partial link do not bother
6520 to construct any glue. */
6521 if (bfd_link_relocatable (link_info))
6522 return TRUE;
6523
6524 /* Here we have a bfd that is to be included on the link. We have a
6525 hook to do reloc rummaging, before section sizes are nailed down. */
6526 globals = elf32_arm_hash_table (link_info);
6527 BFD_ASSERT (globals != NULL);
6528
6529 check_use_blx (globals);
6530
6531 if (globals->byteswap_code && !bfd_big_endian (abfd))
6532 {
6533 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6534 abfd);
6535 return FALSE;
6536 }
6537
6538 /* PR 5398: If we have not decided to include any loadable sections in
6539 the output then we will not have a glue owner bfd. This is OK, it
6540 just means that there is nothing else for us to do here. */
6541 if (globals->bfd_of_glue_owner == NULL)
6542 return TRUE;
6543
6544 /* Rummage around all the relocs and map the glue vectors. */
6545 sec = abfd->sections;
6546
6547 if (sec == NULL)
6548 return TRUE;
6549
6550 for (; sec != NULL; sec = sec->next)
6551 {
6552 if (sec->reloc_count == 0)
6553 continue;
6554
6555 if ((sec->flags & SEC_EXCLUDE) != 0)
6556 continue;
6557
6558 symtab_hdr = & elf_symtab_hdr (abfd);
6559
6560 /* Load the relocs. */
6561 internal_relocs
6562 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6563
6564 if (internal_relocs == NULL)
6565 goto error_return;
6566
6567 irelend = internal_relocs + sec->reloc_count;
6568 for (irel = internal_relocs; irel < irelend; irel++)
6569 {
6570 long r_type;
6571 unsigned long r_index;
6572
6573 struct elf_link_hash_entry *h;
6574
6575 r_type = ELF32_R_TYPE (irel->r_info);
6576 r_index = ELF32_R_SYM (irel->r_info);
6577
6578 /* These are the only relocation types we care about. */
6579 if ( r_type != R_ARM_PC24
6580 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6581 continue;
6582
6583 /* Get the section contents if we haven't done so already. */
6584 if (contents == NULL)
6585 {
6586 /* Get cached copy if it exists. */
6587 if (elf_section_data (sec)->this_hdr.contents != NULL)
6588 contents = elf_section_data (sec)->this_hdr.contents;
6589 else
6590 {
6591 /* Go get them off disk. */
6592 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6593 goto error_return;
6594 }
6595 }
6596
6597 if (r_type == R_ARM_V4BX)
6598 {
6599 int reg;
6600
6601 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6602 record_arm_bx_glue (link_info, reg);
6603 continue;
6604 }
6605
6606 /* If the relocation is not against a symbol it cannot concern us. */
6607 h = NULL;
6608
6609 /* We don't care about local symbols. */
6610 if (r_index < symtab_hdr->sh_info)
6611 continue;
6612
6613 /* This is an external symbol. */
6614 r_index -= symtab_hdr->sh_info;
6615 h = (struct elf_link_hash_entry *)
6616 elf_sym_hashes (abfd)[r_index];
6617
6618 /* If the relocation is against a static symbol it must be within
6619 the current section and so cannot be a cross ARM/Thumb relocation. */
6620 if (h == NULL)
6621 continue;
6622
6623 /* If the call will go through a PLT entry then we do not need
6624 glue. */
6625 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6626 continue;
6627
6628 switch (r_type)
6629 {
6630 case R_ARM_PC24:
6631 /* This one is a call from arm code. We need to look up
6632 the target of the call. If it is a thumb target, we
6633 insert glue. */
6634 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6635 == ST_BRANCH_TO_THUMB)
6636 record_arm_to_thumb_glue (link_info, h);
6637 break;
6638
6639 default:
6640 abort ();
6641 }
6642 }
6643
6644 if (contents != NULL
6645 && elf_section_data (sec)->this_hdr.contents != contents)
6646 free (contents);
6647 contents = NULL;
6648
6649 if (internal_relocs != NULL
6650 && elf_section_data (sec)->relocs != internal_relocs)
6651 free (internal_relocs);
6652 internal_relocs = NULL;
6653 }
6654
6655 return TRUE;
6656
6657 error_return:
6658 if (contents != NULL
6659 && elf_section_data (sec)->this_hdr.contents != contents)
6660 free (contents);
6661 if (internal_relocs != NULL
6662 && elf_section_data (sec)->relocs != internal_relocs)
6663 free (internal_relocs);
6664
6665 return FALSE;
6666 }
6667 #endif
6668
6669
6670 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6671
6672 void
6673 bfd_elf32_arm_init_maps (bfd *abfd)
6674 {
6675 Elf_Internal_Sym *isymbuf;
6676 Elf_Internal_Shdr *hdr;
6677 unsigned int i, localsyms;
6678
6679 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6680 if (! is_arm_elf (abfd))
6681 return;
6682
6683 if ((abfd->flags & DYNAMIC) != 0)
6684 return;
6685
6686 hdr = & elf_symtab_hdr (abfd);
6687 localsyms = hdr->sh_info;
6688
6689 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6690 should contain the number of local symbols, which should come before any
6691 global symbols. Mapping symbols are always local. */
6692 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6693 NULL);
6694
6695 /* No internal symbols read? Skip this BFD. */
6696 if (isymbuf == NULL)
6697 return;
6698
6699 for (i = 0; i < localsyms; i++)
6700 {
6701 Elf_Internal_Sym *isym = &isymbuf[i];
6702 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6703 const char *name;
6704
6705 if (sec != NULL
6706 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6707 {
6708 name = bfd_elf_string_from_elf_section (abfd,
6709 hdr->sh_link, isym->st_name);
6710
6711 if (bfd_is_arm_special_symbol_name (name,
6712 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6713 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6714 }
6715 }
6716 }
6717
6718
6719 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6720 say what they wanted. */
6721
6722 void
6723 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6724 {
6725 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6726 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6727
6728 if (globals == NULL)
6729 return;
6730
6731 if (globals->fix_cortex_a8 == -1)
6732 {
6733 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6734 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6735 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6736 || out_attr[Tag_CPU_arch_profile].i == 0))
6737 globals->fix_cortex_a8 = 1;
6738 else
6739 globals->fix_cortex_a8 = 0;
6740 }
6741 }
6742
6743
6744 void
6745 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6746 {
6747 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6748 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6749
6750 if (globals == NULL)
6751 return;
6752 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6753 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6754 {
6755 switch (globals->vfp11_fix)
6756 {
6757 case BFD_ARM_VFP11_FIX_DEFAULT:
6758 case BFD_ARM_VFP11_FIX_NONE:
6759 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6760 break;
6761
6762 default:
6763 /* Give a warning, but do as the user requests anyway. */
6764 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6765 "workaround is not necessary for target architecture"), obfd);
6766 }
6767 }
6768 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6769 /* For earlier architectures, we might need the workaround, but do not
6770 enable it by default. If users is running with broken hardware, they
6771 must enable the erratum fix explicitly. */
6772 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6773 }
6774
6775 void
6776 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6777 {
6778 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6779 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6780
6781 if (globals == NULL)
6782 return;
6783
6784 /* We assume only Cortex-M4 may require the fix. */
6785 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
6786 || out_attr[Tag_CPU_arch_profile].i != 'M')
6787 {
6788 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
6789 /* Give a warning, but do as the user requests anyway. */
6790 (*_bfd_error_handler)
6791 (_("%B: warning: selected STM32L4XX erratum "
6792 "workaround is not necessary for target architecture"), obfd);
6793 }
6794 }
6795
6796 enum bfd_arm_vfp11_pipe
6797 {
6798 VFP11_FMAC,
6799 VFP11_LS,
6800 VFP11_DS,
6801 VFP11_BAD
6802 };
6803
6804 /* Return a VFP register number. This is encoded as RX:X for single-precision
6805 registers, or X:RX for double-precision registers, where RX is the group of
6806 four bits in the instruction encoding and X is the single extension bit.
6807 RX and X fields are specified using their lowest (starting) bit. The return
6808 value is:
6809
6810 0...31: single-precision registers s0...s31
6811 32...63: double-precision registers d0...d31.
6812
6813 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6814 encounter VFP3 instructions, so we allow the full range for DP registers. */
6815
6816 static unsigned int
6817 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6818 unsigned int x)
6819 {
6820 if (is_double)
6821 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6822 else
6823 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6824 }
6825
6826 /* Set bits in *WMASK according to a register number REG as encoded by
6827 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6828
6829 static void
6830 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6831 {
6832 if (reg < 32)
6833 *wmask |= 1 << reg;
6834 else if (reg < 48)
6835 *wmask |= 3 << ((reg - 32) * 2);
6836 }
6837
6838 /* Return TRUE if WMASK overwrites anything in REGS. */
6839
6840 static bfd_boolean
6841 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6842 {
6843 int i;
6844
6845 for (i = 0; i < numregs; i++)
6846 {
6847 unsigned int reg = regs[i];
6848
6849 if (reg < 32 && (wmask & (1 << reg)) != 0)
6850 return TRUE;
6851
6852 reg -= 32;
6853
6854 if (reg >= 16)
6855 continue;
6856
6857 if ((wmask & (3 << (reg * 2))) != 0)
6858 return TRUE;
6859 }
6860
6861 return FALSE;
6862 }
6863
6864 /* In this function, we're interested in two things: finding input registers
6865 for VFP data-processing instructions, and finding the set of registers which
6866 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6867 hold the written set, so FLDM etc. are easy to deal with (we're only
6868 interested in 32 SP registers or 16 dp registers, due to the VFP version
6869 implemented by the chip in question). DP registers are marked by setting
6870 both SP registers in the write mask). */
6871
6872 static enum bfd_arm_vfp11_pipe
6873 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6874 int *numregs)
6875 {
6876 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6877 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6878
6879 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6880 {
6881 unsigned int pqrs;
6882 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6883 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6884
6885 pqrs = ((insn & 0x00800000) >> 20)
6886 | ((insn & 0x00300000) >> 19)
6887 | ((insn & 0x00000040) >> 6);
6888
6889 switch (pqrs)
6890 {
6891 case 0: /* fmac[sd]. */
6892 case 1: /* fnmac[sd]. */
6893 case 2: /* fmsc[sd]. */
6894 case 3: /* fnmsc[sd]. */
6895 vpipe = VFP11_FMAC;
6896 bfd_arm_vfp11_write_mask (destmask, fd);
6897 regs[0] = fd;
6898 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6899 regs[2] = fm;
6900 *numregs = 3;
6901 break;
6902
6903 case 4: /* fmul[sd]. */
6904 case 5: /* fnmul[sd]. */
6905 case 6: /* fadd[sd]. */
6906 case 7: /* fsub[sd]. */
6907 vpipe = VFP11_FMAC;
6908 goto vfp_binop;
6909
6910 case 8: /* fdiv[sd]. */
6911 vpipe = VFP11_DS;
6912 vfp_binop:
6913 bfd_arm_vfp11_write_mask (destmask, fd);
6914 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6915 regs[1] = fm;
6916 *numregs = 2;
6917 break;
6918
6919 case 15: /* extended opcode. */
6920 {
6921 unsigned int extn = ((insn >> 15) & 0x1e)
6922 | ((insn >> 7) & 1);
6923
6924 switch (extn)
6925 {
6926 case 0: /* fcpy[sd]. */
6927 case 1: /* fabs[sd]. */
6928 case 2: /* fneg[sd]. */
6929 case 8: /* fcmp[sd]. */
6930 case 9: /* fcmpe[sd]. */
6931 case 10: /* fcmpz[sd]. */
6932 case 11: /* fcmpez[sd]. */
6933 case 16: /* fuito[sd]. */
6934 case 17: /* fsito[sd]. */
6935 case 24: /* ftoui[sd]. */
6936 case 25: /* ftouiz[sd]. */
6937 case 26: /* ftosi[sd]. */
6938 case 27: /* ftosiz[sd]. */
6939 /* These instructions will not bounce due to underflow. */
6940 *numregs = 0;
6941 vpipe = VFP11_FMAC;
6942 break;
6943
6944 case 3: /* fsqrt[sd]. */
6945 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6946 registers to cause the erratum in previous instructions. */
6947 bfd_arm_vfp11_write_mask (destmask, fd);
6948 vpipe = VFP11_DS;
6949 break;
6950
6951 case 15: /* fcvt{ds,sd}. */
6952 {
6953 int rnum = 0;
6954
6955 bfd_arm_vfp11_write_mask (destmask, fd);
6956
6957 /* Only FCVTSD can underflow. */
6958 if ((insn & 0x100) != 0)
6959 regs[rnum++] = fm;
6960
6961 *numregs = rnum;
6962
6963 vpipe = VFP11_FMAC;
6964 }
6965 break;
6966
6967 default:
6968 return VFP11_BAD;
6969 }
6970 }
6971 break;
6972
6973 default:
6974 return VFP11_BAD;
6975 }
6976 }
6977 /* Two-register transfer. */
6978 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6979 {
6980 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6981
6982 if ((insn & 0x100000) == 0)
6983 {
6984 if (is_double)
6985 bfd_arm_vfp11_write_mask (destmask, fm);
6986 else
6987 {
6988 bfd_arm_vfp11_write_mask (destmask, fm);
6989 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6990 }
6991 }
6992
6993 vpipe = VFP11_LS;
6994 }
6995 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6996 {
6997 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6998 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6999
7000 switch (puw)
7001 {
7002 case 0: /* Two-reg transfer. We should catch these above. */
7003 abort ();
7004
7005 case 2: /* fldm[sdx]. */
7006 case 3:
7007 case 5:
7008 {
7009 unsigned int i, offset = insn & 0xff;
7010
7011 if (is_double)
7012 offset >>= 1;
7013
7014 for (i = fd; i < fd + offset; i++)
7015 bfd_arm_vfp11_write_mask (destmask, i);
7016 }
7017 break;
7018
7019 case 4: /* fld[sd]. */
7020 case 6:
7021 bfd_arm_vfp11_write_mask (destmask, fd);
7022 break;
7023
7024 default:
7025 return VFP11_BAD;
7026 }
7027
7028 vpipe = VFP11_LS;
7029 }
7030 /* Single-register transfer. Note L==0. */
7031 else if ((insn & 0x0f100e10) == 0x0e000a10)
7032 {
7033 unsigned int opcode = (insn >> 21) & 7;
7034 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7035
7036 switch (opcode)
7037 {
7038 case 0: /* fmsr/fmdlr. */
7039 case 1: /* fmdhr. */
7040 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7041 destination register. I don't know if this is exactly right,
7042 but it is the conservative choice. */
7043 bfd_arm_vfp11_write_mask (destmask, fn);
7044 break;
7045
7046 case 7: /* fmxr. */
7047 break;
7048 }
7049
7050 vpipe = VFP11_LS;
7051 }
7052
7053 return vpipe;
7054 }
7055
7056
7057 static int elf32_arm_compare_mapping (const void * a, const void * b);
7058
7059
7060 /* Look for potentially-troublesome code sequences which might trigger the
7061 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7062 (available from ARM) for details of the erratum. A short version is
7063 described in ld.texinfo. */
7064
7065 bfd_boolean
7066 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7067 {
7068 asection *sec;
7069 bfd_byte *contents = NULL;
7070 int state = 0;
7071 int regs[3], numregs = 0;
7072 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7073 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7074
7075 if (globals == NULL)
7076 return FALSE;
7077
7078 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7079 The states transition as follows:
7080
7081 0 -> 1 (vector) or 0 -> 2 (scalar)
7082 A VFP FMAC-pipeline instruction has been seen. Fill
7083 regs[0]..regs[numregs-1] with its input operands. Remember this
7084 instruction in 'first_fmac'.
7085
7086 1 -> 2
7087 Any instruction, except for a VFP instruction which overwrites
7088 regs[*].
7089
7090 1 -> 3 [ -> 0 ] or
7091 2 -> 3 [ -> 0 ]
7092 A VFP instruction has been seen which overwrites any of regs[*].
7093 We must make a veneer! Reset state to 0 before examining next
7094 instruction.
7095
7096 2 -> 0
7097 If we fail to match anything in state 2, reset to state 0 and reset
7098 the instruction pointer to the instruction after 'first_fmac'.
7099
7100 If the VFP11 vector mode is in use, there must be at least two unrelated
7101 instructions between anti-dependent VFP11 instructions to properly avoid
7102 triggering the erratum, hence the use of the extra state 1. */
7103
7104 /* If we are only performing a partial link do not bother
7105 to construct any glue. */
7106 if (bfd_link_relocatable (link_info))
7107 return TRUE;
7108
7109 /* Skip if this bfd does not correspond to an ELF image. */
7110 if (! is_arm_elf (abfd))
7111 return TRUE;
7112
7113 /* We should have chosen a fix type by the time we get here. */
7114 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7115
7116 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7117 return TRUE;
7118
7119 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7120 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7121 return TRUE;
7122
7123 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7124 {
7125 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7126 struct _arm_elf_section_data *sec_data;
7127
7128 /* If we don't have executable progbits, we're not interested in this
7129 section. Also skip if section is to be excluded. */
7130 if (elf_section_type (sec) != SHT_PROGBITS
7131 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7132 || (sec->flags & SEC_EXCLUDE) != 0
7133 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7134 || sec->output_section == bfd_abs_section_ptr
7135 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7136 continue;
7137
7138 sec_data = elf32_arm_section_data (sec);
7139
7140 if (sec_data->mapcount == 0)
7141 continue;
7142
7143 if (elf_section_data (sec)->this_hdr.contents != NULL)
7144 contents = elf_section_data (sec)->this_hdr.contents;
7145 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7146 goto error_return;
7147
7148 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7149 elf32_arm_compare_mapping);
7150
7151 for (span = 0; span < sec_data->mapcount; span++)
7152 {
7153 unsigned int span_start = sec_data->map[span].vma;
7154 unsigned int span_end = (span == sec_data->mapcount - 1)
7155 ? sec->size : sec_data->map[span + 1].vma;
7156 char span_type = sec_data->map[span].type;
7157
7158 /* FIXME: Only ARM mode is supported at present. We may need to
7159 support Thumb-2 mode also at some point. */
7160 if (span_type != 'a')
7161 continue;
7162
7163 for (i = span_start; i < span_end;)
7164 {
7165 unsigned int next_i = i + 4;
7166 unsigned int insn = bfd_big_endian (abfd)
7167 ? (contents[i] << 24)
7168 | (contents[i + 1] << 16)
7169 | (contents[i + 2] << 8)
7170 | contents[i + 3]
7171 : (contents[i + 3] << 24)
7172 | (contents[i + 2] << 16)
7173 | (contents[i + 1] << 8)
7174 | contents[i];
7175 unsigned int writemask = 0;
7176 enum bfd_arm_vfp11_pipe vpipe;
7177
7178 switch (state)
7179 {
7180 case 0:
7181 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7182 &numregs);
7183 /* I'm assuming the VFP11 erratum can trigger with denorm
7184 operands on either the FMAC or the DS pipeline. This might
7185 lead to slightly overenthusiastic veneer insertion. */
7186 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7187 {
7188 state = use_vector ? 1 : 2;
7189 first_fmac = i;
7190 veneer_of_insn = insn;
7191 }
7192 break;
7193
7194 case 1:
7195 {
7196 int other_regs[3], other_numregs;
7197 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7198 other_regs,
7199 &other_numregs);
7200 if (vpipe != VFP11_BAD
7201 && bfd_arm_vfp11_antidependency (writemask, regs,
7202 numregs))
7203 state = 3;
7204 else
7205 state = 2;
7206 }
7207 break;
7208
7209 case 2:
7210 {
7211 int other_regs[3], other_numregs;
7212 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7213 other_regs,
7214 &other_numregs);
7215 if (vpipe != VFP11_BAD
7216 && bfd_arm_vfp11_antidependency (writemask, regs,
7217 numregs))
7218 state = 3;
7219 else
7220 {
7221 state = 0;
7222 next_i = first_fmac + 4;
7223 }
7224 }
7225 break;
7226
7227 case 3:
7228 abort (); /* Should be unreachable. */
7229 }
7230
7231 if (state == 3)
7232 {
7233 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7234 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7235
7236 elf32_arm_section_data (sec)->erratumcount += 1;
7237
7238 newerr->u.b.vfp_insn = veneer_of_insn;
7239
7240 switch (span_type)
7241 {
7242 case 'a':
7243 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7244 break;
7245
7246 default:
7247 abort ();
7248 }
7249
7250 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7251 first_fmac);
7252
7253 newerr->vma = -1;
7254
7255 newerr->next = sec_data->erratumlist;
7256 sec_data->erratumlist = newerr;
7257
7258 state = 0;
7259 }
7260
7261 i = next_i;
7262 }
7263 }
7264
7265 if (contents != NULL
7266 && elf_section_data (sec)->this_hdr.contents != contents)
7267 free (contents);
7268 contents = NULL;
7269 }
7270
7271 return TRUE;
7272
7273 error_return:
7274 if (contents != NULL
7275 && elf_section_data (sec)->this_hdr.contents != contents)
7276 free (contents);
7277
7278 return FALSE;
7279 }
7280
7281 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7282 after sections have been laid out, using specially-named symbols. */
7283
7284 void
7285 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7286 struct bfd_link_info *link_info)
7287 {
7288 asection *sec;
7289 struct elf32_arm_link_hash_table *globals;
7290 char *tmp_name;
7291
7292 if (bfd_link_relocatable (link_info))
7293 return;
7294
7295 /* Skip if this bfd does not correspond to an ELF image. */
7296 if (! is_arm_elf (abfd))
7297 return;
7298
7299 globals = elf32_arm_hash_table (link_info);
7300 if (globals == NULL)
7301 return;
7302
7303 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7304 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7305
7306 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7307 {
7308 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7309 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7310
7311 for (; errnode != NULL; errnode = errnode->next)
7312 {
7313 struct elf_link_hash_entry *myh;
7314 bfd_vma vma;
7315
7316 switch (errnode->type)
7317 {
7318 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7319 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7320 /* Find veneer symbol. */
7321 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7322 errnode->u.b.veneer->u.v.id);
7323
7324 myh = elf_link_hash_lookup
7325 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7326
7327 if (myh == NULL)
7328 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7329 "`%s'"), abfd, tmp_name);
7330
7331 vma = myh->root.u.def.section->output_section->vma
7332 + myh->root.u.def.section->output_offset
7333 + myh->root.u.def.value;
7334
7335 errnode->u.b.veneer->vma = vma;
7336 break;
7337
7338 case VFP11_ERRATUM_ARM_VENEER:
7339 case VFP11_ERRATUM_THUMB_VENEER:
7340 /* Find return location. */
7341 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7342 errnode->u.v.id);
7343
7344 myh = elf_link_hash_lookup
7345 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7346
7347 if (myh == NULL)
7348 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7349 "`%s'"), abfd, tmp_name);
7350
7351 vma = myh->root.u.def.section->output_section->vma
7352 + myh->root.u.def.section->output_offset
7353 + myh->root.u.def.value;
7354
7355 errnode->u.v.branch->vma = vma;
7356 break;
7357
7358 default:
7359 abort ();
7360 }
7361 }
7362 }
7363
7364 free (tmp_name);
7365 }
7366
7367 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7368 return locations after sections have been laid out, using
7369 specially-named symbols. */
7370
7371 void
7372 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7373 struct bfd_link_info *link_info)
7374 {
7375 asection *sec;
7376 struct elf32_arm_link_hash_table *globals;
7377 char *tmp_name;
7378
7379 if (bfd_link_relocatable (link_info))
7380 return;
7381
7382 /* Skip if this bfd does not correspond to an ELF image. */
7383 if (! is_arm_elf (abfd))
7384 return;
7385
7386 globals = elf32_arm_hash_table (link_info);
7387 if (globals == NULL)
7388 return;
7389
7390 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7391 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7392
7393 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7394 {
7395 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7396 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7397
7398 for (; errnode != NULL; errnode = errnode->next)
7399 {
7400 struct elf_link_hash_entry *myh;
7401 bfd_vma vma;
7402
7403 switch (errnode->type)
7404 {
7405 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7406 /* Find veneer symbol. */
7407 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7408 errnode->u.b.veneer->u.v.id);
7409
7410 myh = elf_link_hash_lookup
7411 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7412
7413 if (myh == NULL)
7414 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7415 "`%s'"), abfd, tmp_name);
7416
7417 vma = myh->root.u.def.section->output_section->vma
7418 + myh->root.u.def.section->output_offset
7419 + myh->root.u.def.value;
7420
7421 errnode->u.b.veneer->vma = vma;
7422 break;
7423
7424 case STM32L4XX_ERRATUM_VENEER:
7425 /* Find return location. */
7426 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7427 errnode->u.v.id);
7428
7429 myh = elf_link_hash_lookup
7430 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7431
7432 if (myh == NULL)
7433 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7434 "`%s'"), abfd, tmp_name);
7435
7436 vma = myh->root.u.def.section->output_section->vma
7437 + myh->root.u.def.section->output_offset
7438 + myh->root.u.def.value;
7439
7440 errnode->u.v.branch->vma = vma;
7441 break;
7442
7443 default:
7444 abort ();
7445 }
7446 }
7447 }
7448
7449 free (tmp_name);
7450 }
7451
7452 static inline bfd_boolean
7453 is_thumb2_ldmia (const insn32 insn)
7454 {
7455 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7456 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7457 return (insn & 0xffd02000) == 0xe8900000;
7458 }
7459
7460 static inline bfd_boolean
7461 is_thumb2_ldmdb (const insn32 insn)
7462 {
7463 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7464 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7465 return (insn & 0xffd02000) == 0xe9100000;
7466 }
7467
7468 static inline bfd_boolean
7469 is_thumb2_vldm (const insn32 insn)
7470 {
7471 /* A6.5 Extension register load or store instruction
7472 A7.7.229
7473 We look for SP 32-bit and DP 64-bit registers.
7474 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7475 <list> is consecutive 64-bit registers
7476 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7477 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7478 <list> is consecutive 32-bit registers
7479 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7480 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7481 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7482 return
7483 (((insn & 0xfe100f00) == 0xec100b00) ||
7484 ((insn & 0xfe100f00) == 0xec100a00))
7485 && /* (IA without !). */
7486 (((((insn << 7) >> 28) & 0xd) == 0x4)
7487 /* (IA with !), includes VPOP (when reg number is SP). */
7488 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7489 /* (DB with !). */
7490 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7491 }
7492
7493 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7494 VLDM opcode and:
7495 - computes the number and the mode of memory accesses
7496 - decides if the replacement should be done:
7497 . replaces only if > 8-word accesses
7498 . or (testing purposes only) replaces all accesses. */
7499
7500 static bfd_boolean
7501 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7502 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7503 {
7504 int nb_words = 0;
7505
7506 /* The field encoding the register list is the same for both LDMIA
7507 and LDMDB encodings. */
7508 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7509 nb_words = popcount (insn & 0x0000ffff);
7510 else if (is_thumb2_vldm (insn))
7511 nb_words = (insn & 0xff);
7512
7513 /* DEFAULT mode accounts for the real bug condition situation,
7514 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7515 return
7516 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7517 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7518 }
7519
7520 /* Look for potentially-troublesome code sequences which might trigger
7521 the STM STM32L4XX erratum. */
7522
7523 bfd_boolean
7524 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7525 struct bfd_link_info *link_info)
7526 {
7527 asection *sec;
7528 bfd_byte *contents = NULL;
7529 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7530
7531 if (globals == NULL)
7532 return FALSE;
7533
7534 /* If we are only performing a partial link do not bother
7535 to construct any glue. */
7536 if (bfd_link_relocatable (link_info))
7537 return TRUE;
7538
7539 /* Skip if this bfd does not correspond to an ELF image. */
7540 if (! is_arm_elf (abfd))
7541 return TRUE;
7542
7543 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7544 return TRUE;
7545
7546 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7547 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7548 return TRUE;
7549
7550 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7551 {
7552 unsigned int i, span;
7553 struct _arm_elf_section_data *sec_data;
7554
7555 /* If we don't have executable progbits, we're not interested in this
7556 section. Also skip if section is to be excluded. */
7557 if (elf_section_type (sec) != SHT_PROGBITS
7558 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7559 || (sec->flags & SEC_EXCLUDE) != 0
7560 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7561 || sec->output_section == bfd_abs_section_ptr
7562 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7563 continue;
7564
7565 sec_data = elf32_arm_section_data (sec);
7566
7567 if (sec_data->mapcount == 0)
7568 continue;
7569
7570 if (elf_section_data (sec)->this_hdr.contents != NULL)
7571 contents = elf_section_data (sec)->this_hdr.contents;
7572 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7573 goto error_return;
7574
7575 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7576 elf32_arm_compare_mapping);
7577
7578 for (span = 0; span < sec_data->mapcount; span++)
7579 {
7580 unsigned int span_start = sec_data->map[span].vma;
7581 unsigned int span_end = (span == sec_data->mapcount - 1)
7582 ? sec->size : sec_data->map[span + 1].vma;
7583 char span_type = sec_data->map[span].type;
7584 int itblock_current_pos = 0;
7585
7586 /* Only Thumb2 mode need be supported with this CM4 specific
7587 code, we should not encounter any arm mode eg span_type
7588 != 'a'. */
7589 if (span_type != 't')
7590 continue;
7591
7592 for (i = span_start; i < span_end;)
7593 {
7594 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7595 bfd_boolean insn_32bit = FALSE;
7596 bfd_boolean is_ldm = FALSE;
7597 bfd_boolean is_vldm = FALSE;
7598 bfd_boolean is_not_last_in_it_block = FALSE;
7599
7600 /* The first 16-bits of all 32-bit thumb2 instructions start
7601 with opcode[15..13]=0b111 and the encoded op1 can be anything
7602 except opcode[12..11]!=0b00.
7603 See 32-bit Thumb instruction encoding. */
7604 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7605 insn_32bit = TRUE;
7606
7607 /* Compute the predicate that tells if the instruction
7608 is concerned by the IT block
7609 - Creates an error if there is a ldm that is not
7610 last in the IT block thus cannot be replaced
7611 - Otherwise we can create a branch at the end of the
7612 IT block, it will be controlled naturally by IT
7613 with the proper pseudo-predicate
7614 - So the only interesting predicate is the one that
7615 tells that we are not on the last item of an IT
7616 block. */
7617 if (itblock_current_pos != 0)
7618 is_not_last_in_it_block = !!--itblock_current_pos;
7619
7620 if (insn_32bit)
7621 {
7622 /* Load the rest of the insn (in manual-friendly order). */
7623 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7624 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7625 is_vldm = is_thumb2_vldm (insn);
7626
7627 /* Veneers are created for (v)ldm depending on
7628 option flags and memory accesses conditions; but
7629 if the instruction is not the last instruction of
7630 an IT block, we cannot create a jump there, so we
7631 bail out. */
7632 if ((is_ldm || is_vldm) &&
7633 stm32l4xx_need_create_replacing_stub
7634 (insn, globals->stm32l4xx_fix))
7635 {
7636 if (is_not_last_in_it_block)
7637 {
7638 (*_bfd_error_handler)
7639 /* Note - overlong line used here to allow for translation. */
7640 (_("\
7641 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7642 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7643 abfd, sec, (long)i);
7644 }
7645 else
7646 {
7647 elf32_stm32l4xx_erratum_list *newerr =
7648 (elf32_stm32l4xx_erratum_list *)
7649 bfd_zmalloc
7650 (sizeof (elf32_stm32l4xx_erratum_list));
7651
7652 elf32_arm_section_data (sec)
7653 ->stm32l4xx_erratumcount += 1;
7654 newerr->u.b.insn = insn;
7655 /* We create only thumb branches. */
7656 newerr->type =
7657 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7658 record_stm32l4xx_erratum_veneer
7659 (link_info, newerr, abfd, sec,
7660 i,
7661 is_ldm ?
7662 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7663 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7664 newerr->vma = -1;
7665 newerr->next = sec_data->stm32l4xx_erratumlist;
7666 sec_data->stm32l4xx_erratumlist = newerr;
7667 }
7668 }
7669 }
7670 else
7671 {
7672 /* A7.7.37 IT p208
7673 IT blocks are only encoded in T1
7674 Encoding T1: IT{x{y{z}}} <firstcond>
7675 1 0 1 1 - 1 1 1 1 - firstcond - mask
7676 if mask = '0000' then see 'related encodings'
7677 We don't deal with UNPREDICTABLE, just ignore these.
7678 There can be no nested IT blocks so an IT block
7679 is naturally a new one for which it is worth
7680 computing its size. */
7681 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7682 ((insn & 0x000f) != 0x0000);
7683 /* If we have a new IT block we compute its size. */
7684 if (is_newitblock)
7685 {
7686 /* Compute the number of instructions controlled
7687 by the IT block, it will be used to decide
7688 whether we are inside an IT block or not. */
7689 unsigned int mask = insn & 0x000f;
7690 itblock_current_pos = 4 - ctz (mask);
7691 }
7692 }
7693
7694 i += insn_32bit ? 4 : 2;
7695 }
7696 }
7697
7698 if (contents != NULL
7699 && elf_section_data (sec)->this_hdr.contents != contents)
7700 free (contents);
7701 contents = NULL;
7702 }
7703
7704 return TRUE;
7705
7706 error_return:
7707 if (contents != NULL
7708 && elf_section_data (sec)->this_hdr.contents != contents)
7709 free (contents);
7710
7711 return FALSE;
7712 }
7713
7714 /* Set target relocation values needed during linking. */
7715
7716 void
7717 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7718 struct bfd_link_info *link_info,
7719 int target1_is_rel,
7720 char * target2_type,
7721 int fix_v4bx,
7722 int use_blx,
7723 bfd_arm_vfp11_fix vfp11_fix,
7724 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7725 int no_enum_warn, int no_wchar_warn,
7726 int pic_veneer, int fix_cortex_a8,
7727 int fix_arm1176)
7728 {
7729 struct elf32_arm_link_hash_table *globals;
7730
7731 globals = elf32_arm_hash_table (link_info);
7732 if (globals == NULL)
7733 return;
7734
7735 globals->target1_is_rel = target1_is_rel;
7736 if (strcmp (target2_type, "rel") == 0)
7737 globals->target2_reloc = R_ARM_REL32;
7738 else if (strcmp (target2_type, "abs") == 0)
7739 globals->target2_reloc = R_ARM_ABS32;
7740 else if (strcmp (target2_type, "got-rel") == 0)
7741 globals->target2_reloc = R_ARM_GOT_PREL;
7742 else
7743 {
7744 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7745 target2_type);
7746 }
7747 globals->fix_v4bx = fix_v4bx;
7748 globals->use_blx |= use_blx;
7749 globals->vfp11_fix = vfp11_fix;
7750 globals->stm32l4xx_fix = stm32l4xx_fix;
7751 globals->pic_veneer = pic_veneer;
7752 globals->fix_cortex_a8 = fix_cortex_a8;
7753 globals->fix_arm1176 = fix_arm1176;
7754
7755 BFD_ASSERT (is_arm_elf (output_bfd));
7756 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7757 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7758 }
7759
7760 /* Replace the target offset of a Thumb bl or b.w instruction. */
7761
7762 static void
7763 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7764 {
7765 bfd_vma upper;
7766 bfd_vma lower;
7767 int reloc_sign;
7768
7769 BFD_ASSERT ((offset & 1) == 0);
7770
7771 upper = bfd_get_16 (abfd, insn);
7772 lower = bfd_get_16 (abfd, insn + 2);
7773 reloc_sign = (offset < 0) ? 1 : 0;
7774 upper = (upper & ~(bfd_vma) 0x7ff)
7775 | ((offset >> 12) & 0x3ff)
7776 | (reloc_sign << 10);
7777 lower = (lower & ~(bfd_vma) 0x2fff)
7778 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7779 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7780 | ((offset >> 1) & 0x7ff);
7781 bfd_put_16 (abfd, upper, insn);
7782 bfd_put_16 (abfd, lower, insn + 2);
7783 }
7784
7785 /* Thumb code calling an ARM function. */
7786
7787 static int
7788 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7789 const char * name,
7790 bfd * input_bfd,
7791 bfd * output_bfd,
7792 asection * input_section,
7793 bfd_byte * hit_data,
7794 asection * sym_sec,
7795 bfd_vma offset,
7796 bfd_signed_vma addend,
7797 bfd_vma val,
7798 char **error_message)
7799 {
7800 asection * s = 0;
7801 bfd_vma my_offset;
7802 long int ret_offset;
7803 struct elf_link_hash_entry * myh;
7804 struct elf32_arm_link_hash_table * globals;
7805
7806 myh = find_thumb_glue (info, name, error_message);
7807 if (myh == NULL)
7808 return FALSE;
7809
7810 globals = elf32_arm_hash_table (info);
7811 BFD_ASSERT (globals != NULL);
7812 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7813
7814 my_offset = myh->root.u.def.value;
7815
7816 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7817 THUMB2ARM_GLUE_SECTION_NAME);
7818
7819 BFD_ASSERT (s != NULL);
7820 BFD_ASSERT (s->contents != NULL);
7821 BFD_ASSERT (s->output_section != NULL);
7822
7823 if ((my_offset & 0x01) == 0x01)
7824 {
7825 if (sym_sec != NULL
7826 && sym_sec->owner != NULL
7827 && !INTERWORK_FLAG (sym_sec->owner))
7828 {
7829 (*_bfd_error_handler)
7830 (_("%B(%s): warning: interworking not enabled.\n"
7831 " first occurrence: %B: Thumb call to ARM"),
7832 sym_sec->owner, input_bfd, name);
7833
7834 return FALSE;
7835 }
7836
7837 --my_offset;
7838 myh->root.u.def.value = my_offset;
7839
7840 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7841 s->contents + my_offset);
7842
7843 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7844 s->contents + my_offset + 2);
7845
7846 ret_offset =
7847 /* Address of destination of the stub. */
7848 ((bfd_signed_vma) val)
7849 - ((bfd_signed_vma)
7850 /* Offset from the start of the current section
7851 to the start of the stubs. */
7852 (s->output_offset
7853 /* Offset of the start of this stub from the start of the stubs. */
7854 + my_offset
7855 /* Address of the start of the current section. */
7856 + s->output_section->vma)
7857 /* The branch instruction is 4 bytes into the stub. */
7858 + 4
7859 /* ARM branches work from the pc of the instruction + 8. */
7860 + 8);
7861
7862 put_arm_insn (globals, output_bfd,
7863 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7864 s->contents + my_offset + 4);
7865 }
7866
7867 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7868
7869 /* Now go back and fix up the original BL insn to point to here. */
7870 ret_offset =
7871 /* Address of where the stub is located. */
7872 (s->output_section->vma + s->output_offset + my_offset)
7873 /* Address of where the BL is located. */
7874 - (input_section->output_section->vma + input_section->output_offset
7875 + offset)
7876 /* Addend in the relocation. */
7877 - addend
7878 /* Biassing for PC-relative addressing. */
7879 - 8;
7880
7881 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7882
7883 return TRUE;
7884 }
7885
7886 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7887
7888 static struct elf_link_hash_entry *
7889 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7890 const char * name,
7891 bfd * input_bfd,
7892 bfd * output_bfd,
7893 asection * sym_sec,
7894 bfd_vma val,
7895 asection * s,
7896 char ** error_message)
7897 {
7898 bfd_vma my_offset;
7899 long int ret_offset;
7900 struct elf_link_hash_entry * myh;
7901 struct elf32_arm_link_hash_table * globals;
7902
7903 myh = find_arm_glue (info, name, error_message);
7904 if (myh == NULL)
7905 return NULL;
7906
7907 globals = elf32_arm_hash_table (info);
7908 BFD_ASSERT (globals != NULL);
7909 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7910
7911 my_offset = myh->root.u.def.value;
7912
7913 if ((my_offset & 0x01) == 0x01)
7914 {
7915 if (sym_sec != NULL
7916 && sym_sec->owner != NULL
7917 && !INTERWORK_FLAG (sym_sec->owner))
7918 {
7919 (*_bfd_error_handler)
7920 (_("%B(%s): warning: interworking not enabled.\n"
7921 " first occurrence: %B: arm call to thumb"),
7922 sym_sec->owner, input_bfd, name);
7923 }
7924
7925 --my_offset;
7926 myh->root.u.def.value = my_offset;
7927
7928 if (bfd_link_pic (info)
7929 || globals->root.is_relocatable_executable
7930 || globals->pic_veneer)
7931 {
7932 /* For relocatable objects we can't use absolute addresses,
7933 so construct the address from a relative offset. */
7934 /* TODO: If the offset is small it's probably worth
7935 constructing the address with adds. */
7936 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7937 s->contents + my_offset);
7938 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7939 s->contents + my_offset + 4);
7940 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7941 s->contents + my_offset + 8);
7942 /* Adjust the offset by 4 for the position of the add,
7943 and 8 for the pipeline offset. */
7944 ret_offset = (val - (s->output_offset
7945 + s->output_section->vma
7946 + my_offset + 12))
7947 | 1;
7948 bfd_put_32 (output_bfd, ret_offset,
7949 s->contents + my_offset + 12);
7950 }
7951 else if (globals->use_blx)
7952 {
7953 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7954 s->contents + my_offset);
7955
7956 /* It's a thumb address. Add the low order bit. */
7957 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7958 s->contents + my_offset + 4);
7959 }
7960 else
7961 {
7962 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7963 s->contents + my_offset);
7964
7965 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7966 s->contents + my_offset + 4);
7967
7968 /* It's a thumb address. Add the low order bit. */
7969 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7970 s->contents + my_offset + 8);
7971
7972 my_offset += 12;
7973 }
7974 }
7975
7976 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7977
7978 return myh;
7979 }
7980
7981 /* Arm code calling a Thumb function. */
7982
7983 static int
7984 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7985 const char * name,
7986 bfd * input_bfd,
7987 bfd * output_bfd,
7988 asection * input_section,
7989 bfd_byte * hit_data,
7990 asection * sym_sec,
7991 bfd_vma offset,
7992 bfd_signed_vma addend,
7993 bfd_vma val,
7994 char **error_message)
7995 {
7996 unsigned long int tmp;
7997 bfd_vma my_offset;
7998 asection * s;
7999 long int ret_offset;
8000 struct elf_link_hash_entry * myh;
8001 struct elf32_arm_link_hash_table * globals;
8002
8003 globals = elf32_arm_hash_table (info);
8004 BFD_ASSERT (globals != NULL);
8005 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8006
8007 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8008 ARM2THUMB_GLUE_SECTION_NAME);
8009 BFD_ASSERT (s != NULL);
8010 BFD_ASSERT (s->contents != NULL);
8011 BFD_ASSERT (s->output_section != NULL);
8012
8013 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8014 sym_sec, val, s, error_message);
8015 if (!myh)
8016 return FALSE;
8017
8018 my_offset = myh->root.u.def.value;
8019 tmp = bfd_get_32 (input_bfd, hit_data);
8020 tmp = tmp & 0xFF000000;
8021
8022 /* Somehow these are both 4 too far, so subtract 8. */
8023 ret_offset = (s->output_offset
8024 + my_offset
8025 + s->output_section->vma
8026 - (input_section->output_offset
8027 + input_section->output_section->vma
8028 + offset + addend)
8029 - 8);
8030
8031 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8032
8033 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8034
8035 return TRUE;
8036 }
8037
8038 /* Populate Arm stub for an exported Thumb function. */
8039
8040 static bfd_boolean
8041 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8042 {
8043 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8044 asection * s;
8045 struct elf_link_hash_entry * myh;
8046 struct elf32_arm_link_hash_entry *eh;
8047 struct elf32_arm_link_hash_table * globals;
8048 asection *sec;
8049 bfd_vma val;
8050 char *error_message;
8051
8052 eh = elf32_arm_hash_entry (h);
8053 /* Allocate stubs for exported Thumb functions on v4t. */
8054 if (eh->export_glue == NULL)
8055 return TRUE;
8056
8057 globals = elf32_arm_hash_table (info);
8058 BFD_ASSERT (globals != NULL);
8059 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8060
8061 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8062 ARM2THUMB_GLUE_SECTION_NAME);
8063 BFD_ASSERT (s != NULL);
8064 BFD_ASSERT (s->contents != NULL);
8065 BFD_ASSERT (s->output_section != NULL);
8066
8067 sec = eh->export_glue->root.u.def.section;
8068
8069 BFD_ASSERT (sec->output_section != NULL);
8070
8071 val = eh->export_glue->root.u.def.value + sec->output_offset
8072 + sec->output_section->vma;
8073
8074 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8075 h->root.u.def.section->owner,
8076 globals->obfd, sec, val, s,
8077 &error_message);
8078 BFD_ASSERT (myh);
8079 return TRUE;
8080 }
8081
8082 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8083
8084 static bfd_vma
8085 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8086 {
8087 bfd_byte *p;
8088 bfd_vma glue_addr;
8089 asection *s;
8090 struct elf32_arm_link_hash_table *globals;
8091
8092 globals = elf32_arm_hash_table (info);
8093 BFD_ASSERT (globals != NULL);
8094 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8095
8096 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8097 ARM_BX_GLUE_SECTION_NAME);
8098 BFD_ASSERT (s != NULL);
8099 BFD_ASSERT (s->contents != NULL);
8100 BFD_ASSERT (s->output_section != NULL);
8101
8102 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8103
8104 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8105
8106 if ((globals->bx_glue_offset[reg] & 1) == 0)
8107 {
8108 p = s->contents + glue_addr;
8109 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8110 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8111 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8112 globals->bx_glue_offset[reg] |= 1;
8113 }
8114
8115 return glue_addr + s->output_section->vma + s->output_offset;
8116 }
8117
8118 /* Generate Arm stubs for exported Thumb symbols. */
8119 static void
8120 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8121 struct bfd_link_info *link_info)
8122 {
8123 struct elf32_arm_link_hash_table * globals;
8124
8125 if (link_info == NULL)
8126 /* Ignore this if we are not called by the ELF backend linker. */
8127 return;
8128
8129 globals = elf32_arm_hash_table (link_info);
8130 if (globals == NULL)
8131 return;
8132
8133 /* If blx is available then exported Thumb symbols are OK and there is
8134 nothing to do. */
8135 if (globals->use_blx)
8136 return;
8137
8138 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8139 link_info);
8140 }
8141
8142 /* Reserve space for COUNT dynamic relocations in relocation selection
8143 SRELOC. */
8144
8145 static void
8146 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8147 bfd_size_type count)
8148 {
8149 struct elf32_arm_link_hash_table *htab;
8150
8151 htab = elf32_arm_hash_table (info);
8152 BFD_ASSERT (htab->root.dynamic_sections_created);
8153 if (sreloc == NULL)
8154 abort ();
8155 sreloc->size += RELOC_SIZE (htab) * count;
8156 }
8157
8158 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8159 dynamic, the relocations should go in SRELOC, otherwise they should
8160 go in the special .rel.iplt section. */
8161
8162 static void
8163 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8164 bfd_size_type count)
8165 {
8166 struct elf32_arm_link_hash_table *htab;
8167
8168 htab = elf32_arm_hash_table (info);
8169 if (!htab->root.dynamic_sections_created)
8170 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8171 else
8172 {
8173 BFD_ASSERT (sreloc != NULL);
8174 sreloc->size += RELOC_SIZE (htab) * count;
8175 }
8176 }
8177
8178 /* Add relocation REL to the end of relocation section SRELOC. */
8179
8180 static void
8181 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8182 asection *sreloc, Elf_Internal_Rela *rel)
8183 {
8184 bfd_byte *loc;
8185 struct elf32_arm_link_hash_table *htab;
8186
8187 htab = elf32_arm_hash_table (info);
8188 if (!htab->root.dynamic_sections_created
8189 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8190 sreloc = htab->root.irelplt;
8191 if (sreloc == NULL)
8192 abort ();
8193 loc = sreloc->contents;
8194 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8195 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8196 abort ();
8197 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8198 }
8199
8200 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8201 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8202 to .plt. */
8203
8204 static void
8205 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8206 bfd_boolean is_iplt_entry,
8207 union gotplt_union *root_plt,
8208 struct arm_plt_info *arm_plt)
8209 {
8210 struct elf32_arm_link_hash_table *htab;
8211 asection *splt;
8212 asection *sgotplt;
8213
8214 htab = elf32_arm_hash_table (info);
8215
8216 if (is_iplt_entry)
8217 {
8218 splt = htab->root.iplt;
8219 sgotplt = htab->root.igotplt;
8220
8221 /* NaCl uses a special first entry in .iplt too. */
8222 if (htab->nacl_p && splt->size == 0)
8223 splt->size += htab->plt_header_size;
8224
8225 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8226 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8227 }
8228 else
8229 {
8230 splt = htab->root.splt;
8231 sgotplt = htab->root.sgotplt;
8232
8233 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8234 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8235
8236 /* If this is the first .plt entry, make room for the special
8237 first entry. */
8238 if (splt->size == 0)
8239 splt->size += htab->plt_header_size;
8240
8241 htab->next_tls_desc_index++;
8242 }
8243
8244 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8245 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8246 splt->size += PLT_THUMB_STUB_SIZE;
8247 root_plt->offset = splt->size;
8248 splt->size += htab->plt_entry_size;
8249
8250 if (!htab->symbian_p)
8251 {
8252 /* We also need to make an entry in the .got.plt section, which
8253 will be placed in the .got section by the linker script. */
8254 if (is_iplt_entry)
8255 arm_plt->got_offset = sgotplt->size;
8256 else
8257 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8258 sgotplt->size += 4;
8259 }
8260 }
8261
8262 static bfd_vma
8263 arm_movw_immediate (bfd_vma value)
8264 {
8265 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8266 }
8267
8268 static bfd_vma
8269 arm_movt_immediate (bfd_vma value)
8270 {
8271 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8272 }
8273
8274 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8275 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8276 Otherwise, DYNINDX is the index of the symbol in the dynamic
8277 symbol table and SYM_VALUE is undefined.
8278
8279 ROOT_PLT points to the offset of the PLT entry from the start of its
8280 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8281 bookkeeping information.
8282
8283 Returns FALSE if there was a problem. */
8284
8285 static bfd_boolean
8286 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8287 union gotplt_union *root_plt,
8288 struct arm_plt_info *arm_plt,
8289 int dynindx, bfd_vma sym_value)
8290 {
8291 struct elf32_arm_link_hash_table *htab;
8292 asection *sgot;
8293 asection *splt;
8294 asection *srel;
8295 bfd_byte *loc;
8296 bfd_vma plt_index;
8297 Elf_Internal_Rela rel;
8298 bfd_vma plt_header_size;
8299 bfd_vma got_header_size;
8300
8301 htab = elf32_arm_hash_table (info);
8302
8303 /* Pick the appropriate sections and sizes. */
8304 if (dynindx == -1)
8305 {
8306 splt = htab->root.iplt;
8307 sgot = htab->root.igotplt;
8308 srel = htab->root.irelplt;
8309
8310 /* There are no reserved entries in .igot.plt, and no special
8311 first entry in .iplt. */
8312 got_header_size = 0;
8313 plt_header_size = 0;
8314 }
8315 else
8316 {
8317 splt = htab->root.splt;
8318 sgot = htab->root.sgotplt;
8319 srel = htab->root.srelplt;
8320
8321 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8322 plt_header_size = htab->plt_header_size;
8323 }
8324 BFD_ASSERT (splt != NULL && srel != NULL);
8325
8326 /* Fill in the entry in the procedure linkage table. */
8327 if (htab->symbian_p)
8328 {
8329 BFD_ASSERT (dynindx >= 0);
8330 put_arm_insn (htab, output_bfd,
8331 elf32_arm_symbian_plt_entry[0],
8332 splt->contents + root_plt->offset);
8333 bfd_put_32 (output_bfd,
8334 elf32_arm_symbian_plt_entry[1],
8335 splt->contents + root_plt->offset + 4);
8336
8337 /* Fill in the entry in the .rel.plt section. */
8338 rel.r_offset = (splt->output_section->vma
8339 + splt->output_offset
8340 + root_plt->offset + 4);
8341 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8342
8343 /* Get the index in the procedure linkage table which
8344 corresponds to this symbol. This is the index of this symbol
8345 in all the symbols for which we are making plt entries. The
8346 first entry in the procedure linkage table is reserved. */
8347 plt_index = ((root_plt->offset - plt_header_size)
8348 / htab->plt_entry_size);
8349 }
8350 else
8351 {
8352 bfd_vma got_offset, got_address, plt_address;
8353 bfd_vma got_displacement, initial_got_entry;
8354 bfd_byte * ptr;
8355
8356 BFD_ASSERT (sgot != NULL);
8357
8358 /* Get the offset into the .(i)got.plt table of the entry that
8359 corresponds to this function. */
8360 got_offset = (arm_plt->got_offset & -2);
8361
8362 /* Get the index in the procedure linkage table which
8363 corresponds to this symbol. This is the index of this symbol
8364 in all the symbols for which we are making plt entries.
8365 After the reserved .got.plt entries, all symbols appear in
8366 the same order as in .plt. */
8367 plt_index = (got_offset - got_header_size) / 4;
8368
8369 /* Calculate the address of the GOT entry. */
8370 got_address = (sgot->output_section->vma
8371 + sgot->output_offset
8372 + got_offset);
8373
8374 /* ...and the address of the PLT entry. */
8375 plt_address = (splt->output_section->vma
8376 + splt->output_offset
8377 + root_plt->offset);
8378
8379 ptr = splt->contents + root_plt->offset;
8380 if (htab->vxworks_p && bfd_link_pic (info))
8381 {
8382 unsigned int i;
8383 bfd_vma val;
8384
8385 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8386 {
8387 val = elf32_arm_vxworks_shared_plt_entry[i];
8388 if (i == 2)
8389 val |= got_address - sgot->output_section->vma;
8390 if (i == 5)
8391 val |= plt_index * RELOC_SIZE (htab);
8392 if (i == 2 || i == 5)
8393 bfd_put_32 (output_bfd, val, ptr);
8394 else
8395 put_arm_insn (htab, output_bfd, val, ptr);
8396 }
8397 }
8398 else if (htab->vxworks_p)
8399 {
8400 unsigned int i;
8401 bfd_vma val;
8402
8403 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8404 {
8405 val = elf32_arm_vxworks_exec_plt_entry[i];
8406 if (i == 2)
8407 val |= got_address;
8408 if (i == 4)
8409 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8410 if (i == 5)
8411 val |= plt_index * RELOC_SIZE (htab);
8412 if (i == 2 || i == 5)
8413 bfd_put_32 (output_bfd, val, ptr);
8414 else
8415 put_arm_insn (htab, output_bfd, val, ptr);
8416 }
8417
8418 loc = (htab->srelplt2->contents
8419 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8420
8421 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8422 referencing the GOT for this PLT entry. */
8423 rel.r_offset = plt_address + 8;
8424 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8425 rel.r_addend = got_offset;
8426 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8427 loc += RELOC_SIZE (htab);
8428
8429 /* Create the R_ARM_ABS32 relocation referencing the
8430 beginning of the PLT for this GOT entry. */
8431 rel.r_offset = got_address;
8432 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8433 rel.r_addend = 0;
8434 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8435 }
8436 else if (htab->nacl_p)
8437 {
8438 /* Calculate the displacement between the PLT slot and the
8439 common tail that's part of the special initial PLT slot. */
8440 int32_t tail_displacement
8441 = ((splt->output_section->vma + splt->output_offset
8442 + ARM_NACL_PLT_TAIL_OFFSET)
8443 - (plt_address + htab->plt_entry_size + 4));
8444 BFD_ASSERT ((tail_displacement & 3) == 0);
8445 tail_displacement >>= 2;
8446
8447 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8448 || (-tail_displacement & 0xff000000) == 0);
8449
8450 /* Calculate the displacement between the PLT slot and the entry
8451 in the GOT. The offset accounts for the value produced by
8452 adding to pc in the penultimate instruction of the PLT stub. */
8453 got_displacement = (got_address
8454 - (plt_address + htab->plt_entry_size));
8455
8456 /* NaCl does not support interworking at all. */
8457 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8458
8459 put_arm_insn (htab, output_bfd,
8460 elf32_arm_nacl_plt_entry[0]
8461 | arm_movw_immediate (got_displacement),
8462 ptr + 0);
8463 put_arm_insn (htab, output_bfd,
8464 elf32_arm_nacl_plt_entry[1]
8465 | arm_movt_immediate (got_displacement),
8466 ptr + 4);
8467 put_arm_insn (htab, output_bfd,
8468 elf32_arm_nacl_plt_entry[2],
8469 ptr + 8);
8470 put_arm_insn (htab, output_bfd,
8471 elf32_arm_nacl_plt_entry[3]
8472 | (tail_displacement & 0x00ffffff),
8473 ptr + 12);
8474 }
8475 else if (using_thumb_only (htab))
8476 {
8477 /* PR ld/16017: Generate thumb only PLT entries. */
8478 if (!using_thumb2 (htab))
8479 {
8480 /* FIXME: We ought to be able to generate thumb-1 PLT
8481 instructions... */
8482 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8483 output_bfd);
8484 return FALSE;
8485 }
8486
8487 /* Calculate the displacement between the PLT slot and the entry in
8488 the GOT. The 12-byte offset accounts for the value produced by
8489 adding to pc in the 3rd instruction of the PLT stub. */
8490 got_displacement = got_address - (plt_address + 12);
8491
8492 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8493 instead of 'put_thumb_insn'. */
8494 put_arm_insn (htab, output_bfd,
8495 elf32_thumb2_plt_entry[0]
8496 | ((got_displacement & 0x000000ff) << 16)
8497 | ((got_displacement & 0x00000700) << 20)
8498 | ((got_displacement & 0x00000800) >> 1)
8499 | ((got_displacement & 0x0000f000) >> 12),
8500 ptr + 0);
8501 put_arm_insn (htab, output_bfd,
8502 elf32_thumb2_plt_entry[1]
8503 | ((got_displacement & 0x00ff0000) )
8504 | ((got_displacement & 0x07000000) << 4)
8505 | ((got_displacement & 0x08000000) >> 17)
8506 | ((got_displacement & 0xf0000000) >> 28),
8507 ptr + 4);
8508 put_arm_insn (htab, output_bfd,
8509 elf32_thumb2_plt_entry[2],
8510 ptr + 8);
8511 put_arm_insn (htab, output_bfd,
8512 elf32_thumb2_plt_entry[3],
8513 ptr + 12);
8514 }
8515 else
8516 {
8517 /* Calculate the displacement between the PLT slot and the
8518 entry in the GOT. The eight-byte offset accounts for the
8519 value produced by adding to pc in the first instruction
8520 of the PLT stub. */
8521 got_displacement = got_address - (plt_address + 8);
8522
8523 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8524 {
8525 put_thumb_insn (htab, output_bfd,
8526 elf32_arm_plt_thumb_stub[0], ptr - 4);
8527 put_thumb_insn (htab, output_bfd,
8528 elf32_arm_plt_thumb_stub[1], ptr - 2);
8529 }
8530
8531 if (!elf32_arm_use_long_plt_entry)
8532 {
8533 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8534
8535 put_arm_insn (htab, output_bfd,
8536 elf32_arm_plt_entry_short[0]
8537 | ((got_displacement & 0x0ff00000) >> 20),
8538 ptr + 0);
8539 put_arm_insn (htab, output_bfd,
8540 elf32_arm_plt_entry_short[1]
8541 | ((got_displacement & 0x000ff000) >> 12),
8542 ptr+ 4);
8543 put_arm_insn (htab, output_bfd,
8544 elf32_arm_plt_entry_short[2]
8545 | (got_displacement & 0x00000fff),
8546 ptr + 8);
8547 #ifdef FOUR_WORD_PLT
8548 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8549 #endif
8550 }
8551 else
8552 {
8553 put_arm_insn (htab, output_bfd,
8554 elf32_arm_plt_entry_long[0]
8555 | ((got_displacement & 0xf0000000) >> 28),
8556 ptr + 0);
8557 put_arm_insn (htab, output_bfd,
8558 elf32_arm_plt_entry_long[1]
8559 | ((got_displacement & 0x0ff00000) >> 20),
8560 ptr + 4);
8561 put_arm_insn (htab, output_bfd,
8562 elf32_arm_plt_entry_long[2]
8563 | ((got_displacement & 0x000ff000) >> 12),
8564 ptr+ 8);
8565 put_arm_insn (htab, output_bfd,
8566 elf32_arm_plt_entry_long[3]
8567 | (got_displacement & 0x00000fff),
8568 ptr + 12);
8569 }
8570 }
8571
8572 /* Fill in the entry in the .rel(a).(i)plt section. */
8573 rel.r_offset = got_address;
8574 rel.r_addend = 0;
8575 if (dynindx == -1)
8576 {
8577 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8578 The dynamic linker or static executable then calls SYM_VALUE
8579 to determine the correct run-time value of the .igot.plt entry. */
8580 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8581 initial_got_entry = sym_value;
8582 }
8583 else
8584 {
8585 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8586 initial_got_entry = (splt->output_section->vma
8587 + splt->output_offset);
8588 }
8589
8590 /* Fill in the entry in the global offset table. */
8591 bfd_put_32 (output_bfd, initial_got_entry,
8592 sgot->contents + got_offset);
8593 }
8594
8595 if (dynindx == -1)
8596 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8597 else
8598 {
8599 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8600 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8601 }
8602
8603 return TRUE;
8604 }
8605
8606 /* Some relocations map to different relocations depending on the
8607 target. Return the real relocation. */
8608
8609 static int
8610 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8611 int r_type)
8612 {
8613 switch (r_type)
8614 {
8615 case R_ARM_TARGET1:
8616 if (globals->target1_is_rel)
8617 return R_ARM_REL32;
8618 else
8619 return R_ARM_ABS32;
8620
8621 case R_ARM_TARGET2:
8622 return globals->target2_reloc;
8623
8624 default:
8625 return r_type;
8626 }
8627 }
8628
8629 /* Return the base VMA address which should be subtracted from real addresses
8630 when resolving @dtpoff relocation.
8631 This is PT_TLS segment p_vaddr. */
8632
8633 static bfd_vma
8634 dtpoff_base (struct bfd_link_info *info)
8635 {
8636 /* If tls_sec is NULL, we should have signalled an error already. */
8637 if (elf_hash_table (info)->tls_sec == NULL)
8638 return 0;
8639 return elf_hash_table (info)->tls_sec->vma;
8640 }
8641
8642 /* Return the relocation value for @tpoff relocation
8643 if STT_TLS virtual address is ADDRESS. */
8644
8645 static bfd_vma
8646 tpoff (struct bfd_link_info *info, bfd_vma address)
8647 {
8648 struct elf_link_hash_table *htab = elf_hash_table (info);
8649 bfd_vma base;
8650
8651 /* If tls_sec is NULL, we should have signalled an error already. */
8652 if (htab->tls_sec == NULL)
8653 return 0;
8654 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8655 return address - htab->tls_sec->vma + base;
8656 }
8657
8658 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8659 VALUE is the relocation value. */
8660
8661 static bfd_reloc_status_type
8662 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8663 {
8664 if (value > 0xfff)
8665 return bfd_reloc_overflow;
8666
8667 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8668 bfd_put_32 (abfd, value, data);
8669 return bfd_reloc_ok;
8670 }
8671
8672 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8673 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8674 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8675
8676 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8677 is to then call final_link_relocate. Return other values in the
8678 case of error.
8679
8680 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8681 the pre-relaxed code. It would be nice if the relocs were updated
8682 to match the optimization. */
8683
8684 static bfd_reloc_status_type
8685 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8686 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8687 Elf_Internal_Rela *rel, unsigned long is_local)
8688 {
8689 unsigned long insn;
8690
8691 switch (ELF32_R_TYPE (rel->r_info))
8692 {
8693 default:
8694 return bfd_reloc_notsupported;
8695
8696 case R_ARM_TLS_GOTDESC:
8697 if (is_local)
8698 insn = 0;
8699 else
8700 {
8701 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8702 if (insn & 1)
8703 insn -= 5; /* THUMB */
8704 else
8705 insn -= 8; /* ARM */
8706 }
8707 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8708 return bfd_reloc_continue;
8709
8710 case R_ARM_THM_TLS_DESCSEQ:
8711 /* Thumb insn. */
8712 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8713 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8714 {
8715 if (is_local)
8716 /* nop */
8717 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8718 }
8719 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8720 {
8721 if (is_local)
8722 /* nop */
8723 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8724 else
8725 /* ldr rx,[ry] */
8726 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8727 }
8728 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8729 {
8730 if (is_local)
8731 /* nop */
8732 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8733 else
8734 /* mov r0, rx */
8735 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8736 contents + rel->r_offset);
8737 }
8738 else
8739 {
8740 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8741 /* It's a 32 bit instruction, fetch the rest of it for
8742 error generation. */
8743 insn = (insn << 16)
8744 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8745 (*_bfd_error_handler)
8746 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8747 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8748 return bfd_reloc_notsupported;
8749 }
8750 break;
8751
8752 case R_ARM_TLS_DESCSEQ:
8753 /* arm insn. */
8754 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8755 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8756 {
8757 if (is_local)
8758 /* mov rx, ry */
8759 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8760 contents + rel->r_offset);
8761 }
8762 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8763 {
8764 if (is_local)
8765 /* nop */
8766 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8767 else
8768 /* ldr rx,[ry] */
8769 bfd_put_32 (input_bfd, insn & 0xfffff000,
8770 contents + rel->r_offset);
8771 }
8772 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8773 {
8774 if (is_local)
8775 /* nop */
8776 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8777 else
8778 /* mov r0, rx */
8779 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8780 contents + rel->r_offset);
8781 }
8782 else
8783 {
8784 (*_bfd_error_handler)
8785 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8786 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8787 return bfd_reloc_notsupported;
8788 }
8789 break;
8790
8791 case R_ARM_TLS_CALL:
8792 /* GD->IE relaxation, turn the instruction into 'nop' or
8793 'ldr r0, [pc,r0]' */
8794 insn = is_local ? 0xe1a00000 : 0xe79f0000;
8795 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8796 break;
8797
8798 case R_ARM_THM_TLS_CALL:
8799 /* GD->IE relaxation. */
8800 if (!is_local)
8801 /* add r0,pc; ldr r0, [r0] */
8802 insn = 0x44786800;
8803 else if (arch_has_thumb2_nop (globals))
8804 /* nop.w */
8805 insn = 0xf3af8000;
8806 else
8807 /* nop; nop */
8808 insn = 0xbf00bf00;
8809
8810 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
8811 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
8812 break;
8813 }
8814 return bfd_reloc_ok;
8815 }
8816
8817 /* For a given value of n, calculate the value of G_n as required to
8818 deal with group relocations. We return it in the form of an
8819 encoded constant-and-rotation, together with the final residual. If n is
8820 specified as less than zero, then final_residual is filled with the
8821 input value and no further action is performed. */
8822
8823 static bfd_vma
8824 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
8825 {
8826 int current_n;
8827 bfd_vma g_n;
8828 bfd_vma encoded_g_n = 0;
8829 bfd_vma residual = value; /* Also known as Y_n. */
8830
8831 for (current_n = 0; current_n <= n; current_n++)
8832 {
8833 int shift;
8834
8835 /* Calculate which part of the value to mask. */
8836 if (residual == 0)
8837 shift = 0;
8838 else
8839 {
8840 int msb;
8841
8842 /* Determine the most significant bit in the residual and
8843 align the resulting value to a 2-bit boundary. */
8844 for (msb = 30; msb >= 0; msb -= 2)
8845 if (residual & (3 << msb))
8846 break;
8847
8848 /* The desired shift is now (msb - 6), or zero, whichever
8849 is the greater. */
8850 shift = msb - 6;
8851 if (shift < 0)
8852 shift = 0;
8853 }
8854
8855 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8856 g_n = residual & (0xff << shift);
8857 encoded_g_n = (g_n >> shift)
8858 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
8859
8860 /* Calculate the residual for the next time around. */
8861 residual &= ~g_n;
8862 }
8863
8864 *final_residual = residual;
8865
8866 return encoded_g_n;
8867 }
8868
8869 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8870 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8871
8872 static int
8873 identify_add_or_sub (bfd_vma insn)
8874 {
8875 int opcode = insn & 0x1e00000;
8876
8877 if (opcode == 1 << 23) /* ADD */
8878 return 1;
8879
8880 if (opcode == 1 << 22) /* SUB */
8881 return -1;
8882
8883 return 0;
8884 }
8885
8886 /* Perform a relocation as part of a final link. */
8887
8888 static bfd_reloc_status_type
8889 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8890 bfd * input_bfd,
8891 bfd * output_bfd,
8892 asection * input_section,
8893 bfd_byte * contents,
8894 Elf_Internal_Rela * rel,
8895 bfd_vma value,
8896 struct bfd_link_info * info,
8897 asection * sym_sec,
8898 const char * sym_name,
8899 unsigned char st_type,
8900 enum arm_st_branch_type branch_type,
8901 struct elf_link_hash_entry * h,
8902 bfd_boolean * unresolved_reloc_p,
8903 char ** error_message)
8904 {
8905 unsigned long r_type = howto->type;
8906 unsigned long r_symndx;
8907 bfd_byte * hit_data = contents + rel->r_offset;
8908 bfd_vma * local_got_offsets;
8909 bfd_vma * local_tlsdesc_gotents;
8910 asection * sgot;
8911 asection * splt;
8912 asection * sreloc = NULL;
8913 asection * srelgot;
8914 bfd_vma addend;
8915 bfd_signed_vma signed_addend;
8916 unsigned char dynreloc_st_type;
8917 bfd_vma dynreloc_value;
8918 struct elf32_arm_link_hash_table * globals;
8919 struct elf32_arm_link_hash_entry *eh;
8920 union gotplt_union *root_plt;
8921 struct arm_plt_info *arm_plt;
8922 bfd_vma plt_offset;
8923 bfd_vma gotplt_offset;
8924 bfd_boolean has_iplt_entry;
8925
8926 globals = elf32_arm_hash_table (info);
8927 if (globals == NULL)
8928 return bfd_reloc_notsupported;
8929
8930 BFD_ASSERT (is_arm_elf (input_bfd));
8931
8932 /* Some relocation types map to different relocations depending on the
8933 target. We pick the right one here. */
8934 r_type = arm_real_reloc_type (globals, r_type);
8935
8936 /* It is possible to have linker relaxations on some TLS access
8937 models. Update our information here. */
8938 r_type = elf32_arm_tls_transition (info, r_type, h);
8939
8940 if (r_type != howto->type)
8941 howto = elf32_arm_howto_from_type (r_type);
8942
8943 eh = (struct elf32_arm_link_hash_entry *) h;
8944 sgot = globals->root.sgot;
8945 local_got_offsets = elf_local_got_offsets (input_bfd);
8946 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8947
8948 if (globals->root.dynamic_sections_created)
8949 srelgot = globals->root.srelgot;
8950 else
8951 srelgot = NULL;
8952
8953 r_symndx = ELF32_R_SYM (rel->r_info);
8954
8955 if (globals->use_rel)
8956 {
8957 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8958
8959 if (addend & ((howto->src_mask + 1) >> 1))
8960 {
8961 signed_addend = -1;
8962 signed_addend &= ~ howto->src_mask;
8963 signed_addend |= addend;
8964 }
8965 else
8966 signed_addend = addend;
8967 }
8968 else
8969 addend = signed_addend = rel->r_addend;
8970
8971 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8972 are resolving a function call relocation. */
8973 if (using_thumb_only (globals)
8974 && (r_type == R_ARM_THM_CALL
8975 || r_type == R_ARM_THM_JUMP24)
8976 && branch_type == ST_BRANCH_TO_ARM)
8977 branch_type = ST_BRANCH_TO_THUMB;
8978
8979 /* Record the symbol information that should be used in dynamic
8980 relocations. */
8981 dynreloc_st_type = st_type;
8982 dynreloc_value = value;
8983 if (branch_type == ST_BRANCH_TO_THUMB)
8984 dynreloc_value |= 1;
8985
8986 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8987 VALUE appropriately for relocations that we resolve at link time. */
8988 has_iplt_entry = FALSE;
8989 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8990 && root_plt->offset != (bfd_vma) -1)
8991 {
8992 plt_offset = root_plt->offset;
8993 gotplt_offset = arm_plt->got_offset;
8994
8995 if (h == NULL || eh->is_iplt)
8996 {
8997 has_iplt_entry = TRUE;
8998 splt = globals->root.iplt;
8999
9000 /* Populate .iplt entries here, because not all of them will
9001 be seen by finish_dynamic_symbol. The lower bit is set if
9002 we have already populated the entry. */
9003 if (plt_offset & 1)
9004 plt_offset--;
9005 else
9006 {
9007 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9008 -1, dynreloc_value))
9009 root_plt->offset |= 1;
9010 else
9011 return bfd_reloc_notsupported;
9012 }
9013
9014 /* Static relocations always resolve to the .iplt entry. */
9015 st_type = STT_FUNC;
9016 value = (splt->output_section->vma
9017 + splt->output_offset
9018 + plt_offset);
9019 branch_type = ST_BRANCH_TO_ARM;
9020
9021 /* If there are non-call relocations that resolve to the .iplt
9022 entry, then all dynamic ones must too. */
9023 if (arm_plt->noncall_refcount != 0)
9024 {
9025 dynreloc_st_type = st_type;
9026 dynreloc_value = value;
9027 }
9028 }
9029 else
9030 /* We populate the .plt entry in finish_dynamic_symbol. */
9031 splt = globals->root.splt;
9032 }
9033 else
9034 {
9035 splt = NULL;
9036 plt_offset = (bfd_vma) -1;
9037 gotplt_offset = (bfd_vma) -1;
9038 }
9039
9040 switch (r_type)
9041 {
9042 case R_ARM_NONE:
9043 /* We don't need to find a value for this symbol. It's just a
9044 marker. */
9045 *unresolved_reloc_p = FALSE;
9046 return bfd_reloc_ok;
9047
9048 case R_ARM_ABS12:
9049 if (!globals->vxworks_p)
9050 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9051
9052 case R_ARM_PC24:
9053 case R_ARM_ABS32:
9054 case R_ARM_ABS32_NOI:
9055 case R_ARM_REL32:
9056 case R_ARM_REL32_NOI:
9057 case R_ARM_CALL:
9058 case R_ARM_JUMP24:
9059 case R_ARM_XPC25:
9060 case R_ARM_PREL31:
9061 case R_ARM_PLT32:
9062 /* Handle relocations which should use the PLT entry. ABS32/REL32
9063 will use the symbol's value, which may point to a PLT entry, but we
9064 don't need to handle that here. If we created a PLT entry, all
9065 branches in this object should go to it, except if the PLT is too
9066 far away, in which case a long branch stub should be inserted. */
9067 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9068 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9069 && r_type != R_ARM_CALL
9070 && r_type != R_ARM_JUMP24
9071 && r_type != R_ARM_PLT32)
9072 && plt_offset != (bfd_vma) -1)
9073 {
9074 /* If we've created a .plt section, and assigned a PLT entry
9075 to this function, it must either be a STT_GNU_IFUNC reference
9076 or not be known to bind locally. In other cases, we should
9077 have cleared the PLT entry by now. */
9078 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9079
9080 value = (splt->output_section->vma
9081 + splt->output_offset
9082 + plt_offset);
9083 *unresolved_reloc_p = FALSE;
9084 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9085 contents, rel->r_offset, value,
9086 rel->r_addend);
9087 }
9088
9089 /* When generating a shared object or relocatable executable, these
9090 relocations are copied into the output file to be resolved at
9091 run time. */
9092 if ((bfd_link_pic (info)
9093 || globals->root.is_relocatable_executable)
9094 && (input_section->flags & SEC_ALLOC)
9095 && !(globals->vxworks_p
9096 && strcmp (input_section->output_section->name,
9097 ".tls_vars") == 0)
9098 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9099 || !SYMBOL_CALLS_LOCAL (info, h))
9100 && !(input_bfd == globals->stub_bfd
9101 && strstr (input_section->name, STUB_SUFFIX))
9102 && (h == NULL
9103 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9104 || h->root.type != bfd_link_hash_undefweak)
9105 && r_type != R_ARM_PC24
9106 && r_type != R_ARM_CALL
9107 && r_type != R_ARM_JUMP24
9108 && r_type != R_ARM_PREL31
9109 && r_type != R_ARM_PLT32)
9110 {
9111 Elf_Internal_Rela outrel;
9112 bfd_boolean skip, relocate;
9113
9114 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9115 && !h->def_regular)
9116 {
9117 char *v = _("shared object");
9118
9119 if (bfd_link_executable (info))
9120 v = _("PIE executable");
9121
9122 (*_bfd_error_handler)
9123 (_("%B: relocation %s against external or undefined symbol `%s'"
9124 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9125 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9126 return bfd_reloc_notsupported;
9127 }
9128
9129 *unresolved_reloc_p = FALSE;
9130
9131 if (sreloc == NULL && globals->root.dynamic_sections_created)
9132 {
9133 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9134 ! globals->use_rel);
9135
9136 if (sreloc == NULL)
9137 return bfd_reloc_notsupported;
9138 }
9139
9140 skip = FALSE;
9141 relocate = FALSE;
9142
9143 outrel.r_addend = addend;
9144 outrel.r_offset =
9145 _bfd_elf_section_offset (output_bfd, info, input_section,
9146 rel->r_offset);
9147 if (outrel.r_offset == (bfd_vma) -1)
9148 skip = TRUE;
9149 else if (outrel.r_offset == (bfd_vma) -2)
9150 skip = TRUE, relocate = TRUE;
9151 outrel.r_offset += (input_section->output_section->vma
9152 + input_section->output_offset);
9153
9154 if (skip)
9155 memset (&outrel, 0, sizeof outrel);
9156 else if (h != NULL
9157 && h->dynindx != -1
9158 && (!bfd_link_pic (info)
9159 || !SYMBOLIC_BIND (info, h)
9160 || !h->def_regular))
9161 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9162 else
9163 {
9164 int symbol;
9165
9166 /* This symbol is local, or marked to become local. */
9167 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9168 if (globals->symbian_p)
9169 {
9170 asection *osec;
9171
9172 /* On Symbian OS, the data segment and text segement
9173 can be relocated independently. Therefore, we
9174 must indicate the segment to which this
9175 relocation is relative. The BPABI allows us to
9176 use any symbol in the right segment; we just use
9177 the section symbol as it is convenient. (We
9178 cannot use the symbol given by "h" directly as it
9179 will not appear in the dynamic symbol table.)
9180
9181 Note that the dynamic linker ignores the section
9182 symbol value, so we don't subtract osec->vma
9183 from the emitted reloc addend. */
9184 if (sym_sec)
9185 osec = sym_sec->output_section;
9186 else
9187 osec = input_section->output_section;
9188 symbol = elf_section_data (osec)->dynindx;
9189 if (symbol == 0)
9190 {
9191 struct elf_link_hash_table *htab = elf_hash_table (info);
9192
9193 if ((osec->flags & SEC_READONLY) == 0
9194 && htab->data_index_section != NULL)
9195 osec = htab->data_index_section;
9196 else
9197 osec = htab->text_index_section;
9198 symbol = elf_section_data (osec)->dynindx;
9199 }
9200 BFD_ASSERT (symbol != 0);
9201 }
9202 else
9203 /* On SVR4-ish systems, the dynamic loader cannot
9204 relocate the text and data segments independently,
9205 so the symbol does not matter. */
9206 symbol = 0;
9207 if (dynreloc_st_type == STT_GNU_IFUNC)
9208 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9209 to the .iplt entry. Instead, every non-call reference
9210 must use an R_ARM_IRELATIVE relocation to obtain the
9211 correct run-time address. */
9212 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9213 else
9214 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9215 if (globals->use_rel)
9216 relocate = TRUE;
9217 else
9218 outrel.r_addend += dynreloc_value;
9219 }
9220
9221 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9222
9223 /* If this reloc is against an external symbol, we do not want to
9224 fiddle with the addend. Otherwise, we need to include the symbol
9225 value so that it becomes an addend for the dynamic reloc. */
9226 if (! relocate)
9227 return bfd_reloc_ok;
9228
9229 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9230 contents, rel->r_offset,
9231 dynreloc_value, (bfd_vma) 0);
9232 }
9233 else switch (r_type)
9234 {
9235 case R_ARM_ABS12:
9236 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9237
9238 case R_ARM_XPC25: /* Arm BLX instruction. */
9239 case R_ARM_CALL:
9240 case R_ARM_JUMP24:
9241 case R_ARM_PC24: /* Arm B/BL instruction. */
9242 case R_ARM_PLT32:
9243 {
9244 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9245
9246 if (r_type == R_ARM_XPC25)
9247 {
9248 /* Check for Arm calling Arm function. */
9249 /* FIXME: Should we translate the instruction into a BL
9250 instruction instead ? */
9251 if (branch_type != ST_BRANCH_TO_THUMB)
9252 (*_bfd_error_handler)
9253 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9254 input_bfd,
9255 h ? h->root.root.string : "(local)");
9256 }
9257 else if (r_type == R_ARM_PC24)
9258 {
9259 /* Check for Arm calling Thumb function. */
9260 if (branch_type == ST_BRANCH_TO_THUMB)
9261 {
9262 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9263 output_bfd, input_section,
9264 hit_data, sym_sec, rel->r_offset,
9265 signed_addend, value,
9266 error_message))
9267 return bfd_reloc_ok;
9268 else
9269 return bfd_reloc_dangerous;
9270 }
9271 }
9272
9273 /* Check if a stub has to be inserted because the
9274 destination is too far or we are changing mode. */
9275 if ( r_type == R_ARM_CALL
9276 || r_type == R_ARM_JUMP24
9277 || r_type == R_ARM_PLT32)
9278 {
9279 enum elf32_arm_stub_type stub_type = arm_stub_none;
9280 struct elf32_arm_link_hash_entry *hash;
9281
9282 hash = (struct elf32_arm_link_hash_entry *) h;
9283 stub_type = arm_type_of_stub (info, input_section, rel,
9284 st_type, &branch_type,
9285 hash, value, sym_sec,
9286 input_bfd, sym_name);
9287
9288 if (stub_type != arm_stub_none)
9289 {
9290 /* The target is out of reach, so redirect the
9291 branch to the local stub for this function. */
9292 stub_entry = elf32_arm_get_stub_entry (input_section,
9293 sym_sec, h,
9294 rel, globals,
9295 stub_type);
9296 {
9297 if (stub_entry != NULL)
9298 value = (stub_entry->stub_offset
9299 + stub_entry->stub_sec->output_offset
9300 + stub_entry->stub_sec->output_section->vma);
9301
9302 if (plt_offset != (bfd_vma) -1)
9303 *unresolved_reloc_p = FALSE;
9304 }
9305 }
9306 else
9307 {
9308 /* If the call goes through a PLT entry, make sure to
9309 check distance to the right destination address. */
9310 if (plt_offset != (bfd_vma) -1)
9311 {
9312 value = (splt->output_section->vma
9313 + splt->output_offset
9314 + plt_offset);
9315 *unresolved_reloc_p = FALSE;
9316 /* The PLT entry is in ARM mode, regardless of the
9317 target function. */
9318 branch_type = ST_BRANCH_TO_ARM;
9319 }
9320 }
9321 }
9322
9323 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9324 where:
9325 S is the address of the symbol in the relocation.
9326 P is address of the instruction being relocated.
9327 A is the addend (extracted from the instruction) in bytes.
9328
9329 S is held in 'value'.
9330 P is the base address of the section containing the
9331 instruction plus the offset of the reloc into that
9332 section, ie:
9333 (input_section->output_section->vma +
9334 input_section->output_offset +
9335 rel->r_offset).
9336 A is the addend, converted into bytes, ie:
9337 (signed_addend * 4)
9338
9339 Note: None of these operations have knowledge of the pipeline
9340 size of the processor, thus it is up to the assembler to
9341 encode this information into the addend. */
9342 value -= (input_section->output_section->vma
9343 + input_section->output_offset);
9344 value -= rel->r_offset;
9345 if (globals->use_rel)
9346 value += (signed_addend << howto->size);
9347 else
9348 /* RELA addends do not have to be adjusted by howto->size. */
9349 value += signed_addend;
9350
9351 signed_addend = value;
9352 signed_addend >>= howto->rightshift;
9353
9354 /* A branch to an undefined weak symbol is turned into a jump to
9355 the next instruction unless a PLT entry will be created.
9356 Do the same for local undefined symbols (but not for STN_UNDEF).
9357 The jump to the next instruction is optimized as a NOP depending
9358 on the architecture. */
9359 if (h ? (h->root.type == bfd_link_hash_undefweak
9360 && plt_offset == (bfd_vma) -1)
9361 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9362 {
9363 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9364
9365 if (arch_has_arm_nop (globals))
9366 value |= 0x0320f000;
9367 else
9368 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9369 }
9370 else
9371 {
9372 /* Perform a signed range check. */
9373 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9374 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9375 return bfd_reloc_overflow;
9376
9377 addend = (value & 2);
9378
9379 value = (signed_addend & howto->dst_mask)
9380 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9381
9382 if (r_type == R_ARM_CALL)
9383 {
9384 /* Set the H bit in the BLX instruction. */
9385 if (branch_type == ST_BRANCH_TO_THUMB)
9386 {
9387 if (addend)
9388 value |= (1 << 24);
9389 else
9390 value &= ~(bfd_vma)(1 << 24);
9391 }
9392
9393 /* Select the correct instruction (BL or BLX). */
9394 /* Only if we are not handling a BL to a stub. In this
9395 case, mode switching is performed by the stub. */
9396 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9397 value |= (1 << 28);
9398 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9399 {
9400 value &= ~(bfd_vma)(1 << 28);
9401 value |= (1 << 24);
9402 }
9403 }
9404 }
9405 }
9406 break;
9407
9408 case R_ARM_ABS32:
9409 value += addend;
9410 if (branch_type == ST_BRANCH_TO_THUMB)
9411 value |= 1;
9412 break;
9413
9414 case R_ARM_ABS32_NOI:
9415 value += addend;
9416 break;
9417
9418 case R_ARM_REL32:
9419 value += addend;
9420 if (branch_type == ST_BRANCH_TO_THUMB)
9421 value |= 1;
9422 value -= (input_section->output_section->vma
9423 + input_section->output_offset + rel->r_offset);
9424 break;
9425
9426 case R_ARM_REL32_NOI:
9427 value += addend;
9428 value -= (input_section->output_section->vma
9429 + input_section->output_offset + rel->r_offset);
9430 break;
9431
9432 case R_ARM_PREL31:
9433 value -= (input_section->output_section->vma
9434 + input_section->output_offset + rel->r_offset);
9435 value += signed_addend;
9436 if (! h || h->root.type != bfd_link_hash_undefweak)
9437 {
9438 /* Check for overflow. */
9439 if ((value ^ (value >> 1)) & (1 << 30))
9440 return bfd_reloc_overflow;
9441 }
9442 value &= 0x7fffffff;
9443 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9444 if (branch_type == ST_BRANCH_TO_THUMB)
9445 value |= 1;
9446 break;
9447 }
9448
9449 bfd_put_32 (input_bfd, value, hit_data);
9450 return bfd_reloc_ok;
9451
9452 case R_ARM_ABS8:
9453 /* PR 16202: Refectch the addend using the correct size. */
9454 if (globals->use_rel)
9455 addend = bfd_get_8 (input_bfd, hit_data);
9456 value += addend;
9457
9458 /* There is no way to tell whether the user intended to use a signed or
9459 unsigned addend. When checking for overflow we accept either,
9460 as specified by the AAELF. */
9461 if ((long) value > 0xff || (long) value < -0x80)
9462 return bfd_reloc_overflow;
9463
9464 bfd_put_8 (input_bfd, value, hit_data);
9465 return bfd_reloc_ok;
9466
9467 case R_ARM_ABS16:
9468 /* PR 16202: Refectch the addend using the correct size. */
9469 if (globals->use_rel)
9470 addend = bfd_get_16 (input_bfd, hit_data);
9471 value += addend;
9472
9473 /* See comment for R_ARM_ABS8. */
9474 if ((long) value > 0xffff || (long) value < -0x8000)
9475 return bfd_reloc_overflow;
9476
9477 bfd_put_16 (input_bfd, value, hit_data);
9478 return bfd_reloc_ok;
9479
9480 case R_ARM_THM_ABS5:
9481 /* Support ldr and str instructions for the thumb. */
9482 if (globals->use_rel)
9483 {
9484 /* Need to refetch addend. */
9485 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9486 /* ??? Need to determine shift amount from operand size. */
9487 addend >>= howto->rightshift;
9488 }
9489 value += addend;
9490
9491 /* ??? Isn't value unsigned? */
9492 if ((long) value > 0x1f || (long) value < -0x10)
9493 return bfd_reloc_overflow;
9494
9495 /* ??? Value needs to be properly shifted into place first. */
9496 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9497 bfd_put_16 (input_bfd, value, hit_data);
9498 return bfd_reloc_ok;
9499
9500 case R_ARM_THM_ALU_PREL_11_0:
9501 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9502 {
9503 bfd_vma insn;
9504 bfd_signed_vma relocation;
9505
9506 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9507 | bfd_get_16 (input_bfd, hit_data + 2);
9508
9509 if (globals->use_rel)
9510 {
9511 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9512 | ((insn & (1 << 26)) >> 15);
9513 if (insn & 0xf00000)
9514 signed_addend = -signed_addend;
9515 }
9516
9517 relocation = value + signed_addend;
9518 relocation -= Pa (input_section->output_section->vma
9519 + input_section->output_offset
9520 + rel->r_offset);
9521
9522 value = relocation;
9523
9524 if (value >= 0x1000)
9525 return bfd_reloc_overflow;
9526
9527 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9528 | ((value & 0x700) << 4)
9529 | ((value & 0x800) << 15);
9530 if (relocation < 0)
9531 insn |= 0xa00000;
9532
9533 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9534 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9535
9536 return bfd_reloc_ok;
9537 }
9538
9539 case R_ARM_THM_PC8:
9540 /* PR 10073: This reloc is not generated by the GNU toolchain,
9541 but it is supported for compatibility with third party libraries
9542 generated by other compilers, specifically the ARM/IAR. */
9543 {
9544 bfd_vma insn;
9545 bfd_signed_vma relocation;
9546
9547 insn = bfd_get_16 (input_bfd, hit_data);
9548
9549 if (globals->use_rel)
9550 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9551
9552 relocation = value + addend;
9553 relocation -= Pa (input_section->output_section->vma
9554 + input_section->output_offset
9555 + rel->r_offset);
9556
9557 value = relocation;
9558
9559 /* We do not check for overflow of this reloc. Although strictly
9560 speaking this is incorrect, it appears to be necessary in order
9561 to work with IAR generated relocs. Since GCC and GAS do not
9562 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9563 a problem for them. */
9564 value &= 0x3fc;
9565
9566 insn = (insn & 0xff00) | (value >> 2);
9567
9568 bfd_put_16 (input_bfd, insn, hit_data);
9569
9570 return bfd_reloc_ok;
9571 }
9572
9573 case R_ARM_THM_PC12:
9574 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9575 {
9576 bfd_vma insn;
9577 bfd_signed_vma relocation;
9578
9579 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9580 | bfd_get_16 (input_bfd, hit_data + 2);
9581
9582 if (globals->use_rel)
9583 {
9584 signed_addend = insn & 0xfff;
9585 if (!(insn & (1 << 23)))
9586 signed_addend = -signed_addend;
9587 }
9588
9589 relocation = value + signed_addend;
9590 relocation -= Pa (input_section->output_section->vma
9591 + input_section->output_offset
9592 + rel->r_offset);
9593
9594 value = relocation;
9595
9596 if (value >= 0x1000)
9597 return bfd_reloc_overflow;
9598
9599 insn = (insn & 0xff7ff000) | value;
9600 if (relocation >= 0)
9601 insn |= (1 << 23);
9602
9603 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9604 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9605
9606 return bfd_reloc_ok;
9607 }
9608
9609 case R_ARM_THM_XPC22:
9610 case R_ARM_THM_CALL:
9611 case R_ARM_THM_JUMP24:
9612 /* Thumb BL (branch long instruction). */
9613 {
9614 bfd_vma relocation;
9615 bfd_vma reloc_sign;
9616 bfd_boolean overflow = FALSE;
9617 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9618 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9619 bfd_signed_vma reloc_signed_max;
9620 bfd_signed_vma reloc_signed_min;
9621 bfd_vma check;
9622 bfd_signed_vma signed_check;
9623 int bitsize;
9624 const int thumb2 = using_thumb2 (globals);
9625
9626 /* A branch to an undefined weak symbol is turned into a jump to
9627 the next instruction unless a PLT entry will be created.
9628 The jump to the next instruction is optimized as a NOP.W for
9629 Thumb-2 enabled architectures. */
9630 if (h && h->root.type == bfd_link_hash_undefweak
9631 && plt_offset == (bfd_vma) -1)
9632 {
9633 if (arch_has_thumb2_nop (globals))
9634 {
9635 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9636 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9637 }
9638 else
9639 {
9640 bfd_put_16 (input_bfd, 0xe000, hit_data);
9641 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9642 }
9643 return bfd_reloc_ok;
9644 }
9645
9646 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9647 with Thumb-1) involving the J1 and J2 bits. */
9648 if (globals->use_rel)
9649 {
9650 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9651 bfd_vma upper = upper_insn & 0x3ff;
9652 bfd_vma lower = lower_insn & 0x7ff;
9653 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9654 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9655 bfd_vma i1 = j1 ^ s ? 0 : 1;
9656 bfd_vma i2 = j2 ^ s ? 0 : 1;
9657
9658 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9659 /* Sign extend. */
9660 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9661
9662 signed_addend = addend;
9663 }
9664
9665 if (r_type == R_ARM_THM_XPC22)
9666 {
9667 /* Check for Thumb to Thumb call. */
9668 /* FIXME: Should we translate the instruction into a BL
9669 instruction instead ? */
9670 if (branch_type == ST_BRANCH_TO_THUMB)
9671 (*_bfd_error_handler)
9672 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9673 input_bfd,
9674 h ? h->root.root.string : "(local)");
9675 }
9676 else
9677 {
9678 /* If it is not a call to Thumb, assume call to Arm.
9679 If it is a call relative to a section name, then it is not a
9680 function call at all, but rather a long jump. Calls through
9681 the PLT do not require stubs. */
9682 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9683 {
9684 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9685 {
9686 /* Convert BL to BLX. */
9687 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9688 }
9689 else if (( r_type != R_ARM_THM_CALL)
9690 && (r_type != R_ARM_THM_JUMP24))
9691 {
9692 if (elf32_thumb_to_arm_stub
9693 (info, sym_name, input_bfd, output_bfd, input_section,
9694 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9695 error_message))
9696 return bfd_reloc_ok;
9697 else
9698 return bfd_reloc_dangerous;
9699 }
9700 }
9701 else if (branch_type == ST_BRANCH_TO_THUMB
9702 && globals->use_blx
9703 && r_type == R_ARM_THM_CALL)
9704 {
9705 /* Make sure this is a BL. */
9706 lower_insn |= 0x1800;
9707 }
9708 }
9709
9710 enum elf32_arm_stub_type stub_type = arm_stub_none;
9711 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9712 {
9713 /* Check if a stub has to be inserted because the destination
9714 is too far. */
9715 struct elf32_arm_stub_hash_entry *stub_entry;
9716 struct elf32_arm_link_hash_entry *hash;
9717
9718 hash = (struct elf32_arm_link_hash_entry *) h;
9719
9720 stub_type = arm_type_of_stub (info, input_section, rel,
9721 st_type, &branch_type,
9722 hash, value, sym_sec,
9723 input_bfd, sym_name);
9724
9725 if (stub_type != arm_stub_none)
9726 {
9727 /* The target is out of reach or we are changing modes, so
9728 redirect the branch to the local stub for this
9729 function. */
9730 stub_entry = elf32_arm_get_stub_entry (input_section,
9731 sym_sec, h,
9732 rel, globals,
9733 stub_type);
9734 if (stub_entry != NULL)
9735 {
9736 value = (stub_entry->stub_offset
9737 + stub_entry->stub_sec->output_offset
9738 + stub_entry->stub_sec->output_section->vma);
9739
9740 if (plt_offset != (bfd_vma) -1)
9741 *unresolved_reloc_p = FALSE;
9742 }
9743
9744 /* If this call becomes a call to Arm, force BLX. */
9745 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9746 {
9747 if ((stub_entry
9748 && !arm_stub_is_thumb (stub_entry->stub_type))
9749 || branch_type != ST_BRANCH_TO_THUMB)
9750 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9751 }
9752 }
9753 }
9754
9755 /* Handle calls via the PLT. */
9756 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9757 {
9758 value = (splt->output_section->vma
9759 + splt->output_offset
9760 + plt_offset);
9761
9762 if (globals->use_blx
9763 && r_type == R_ARM_THM_CALL
9764 && ! using_thumb_only (globals))
9765 {
9766 /* If the Thumb BLX instruction is available, convert
9767 the BL to a BLX instruction to call the ARM-mode
9768 PLT entry. */
9769 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9770 branch_type = ST_BRANCH_TO_ARM;
9771 }
9772 else
9773 {
9774 if (! using_thumb_only (globals))
9775 /* Target the Thumb stub before the ARM PLT entry. */
9776 value -= PLT_THUMB_STUB_SIZE;
9777 branch_type = ST_BRANCH_TO_THUMB;
9778 }
9779 *unresolved_reloc_p = FALSE;
9780 }
9781
9782 relocation = value + signed_addend;
9783
9784 relocation -= (input_section->output_section->vma
9785 + input_section->output_offset
9786 + rel->r_offset);
9787
9788 check = relocation >> howto->rightshift;
9789
9790 /* If this is a signed value, the rightshift just dropped
9791 leading 1 bits (assuming twos complement). */
9792 if ((bfd_signed_vma) relocation >= 0)
9793 signed_check = check;
9794 else
9795 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
9796
9797 /* Calculate the permissable maximum and minimum values for
9798 this relocation according to whether we're relocating for
9799 Thumb-2 or not. */
9800 bitsize = howto->bitsize;
9801 if (!thumb2)
9802 bitsize -= 2;
9803 reloc_signed_max = (1 << (bitsize - 1)) - 1;
9804 reloc_signed_min = ~reloc_signed_max;
9805
9806 /* Assumes two's complement. */
9807 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9808 overflow = TRUE;
9809
9810 if ((lower_insn & 0x5000) == 0x4000)
9811 /* For a BLX instruction, make sure that the relocation is rounded up
9812 to a word boundary. This follows the semantics of the instruction
9813 which specifies that bit 1 of the target address will come from bit
9814 1 of the base address. */
9815 relocation = (relocation + 2) & ~ 3;
9816
9817 /* Put RELOCATION back into the insn. Assumes two's complement.
9818 We use the Thumb-2 encoding, which is safe even if dealing with
9819 a Thumb-1 instruction by virtue of our overflow check above. */
9820 reloc_sign = (signed_check < 0) ? 1 : 0;
9821 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
9822 | ((relocation >> 12) & 0x3ff)
9823 | (reloc_sign << 10);
9824 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
9825 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
9826 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
9827 | ((relocation >> 1) & 0x7ff);
9828
9829 /* Put the relocated value back in the object file: */
9830 bfd_put_16 (input_bfd, upper_insn, hit_data);
9831 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9832
9833 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9834 }
9835 break;
9836
9837 case R_ARM_THM_JUMP19:
9838 /* Thumb32 conditional branch instruction. */
9839 {
9840 bfd_vma relocation;
9841 bfd_boolean overflow = FALSE;
9842 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9843 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9844 bfd_signed_vma reloc_signed_max = 0xffffe;
9845 bfd_signed_vma reloc_signed_min = -0x100000;
9846 bfd_signed_vma signed_check;
9847 enum elf32_arm_stub_type stub_type = arm_stub_none;
9848 struct elf32_arm_stub_hash_entry *stub_entry;
9849 struct elf32_arm_link_hash_entry *hash;
9850
9851 /* Need to refetch the addend, reconstruct the top three bits,
9852 and squish the two 11 bit pieces together. */
9853 if (globals->use_rel)
9854 {
9855 bfd_vma S = (upper_insn & 0x0400) >> 10;
9856 bfd_vma upper = (upper_insn & 0x003f);
9857 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
9858 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
9859 bfd_vma lower = (lower_insn & 0x07ff);
9860
9861 upper |= J1 << 6;
9862 upper |= J2 << 7;
9863 upper |= (!S) << 8;
9864 upper -= 0x0100; /* Sign extend. */
9865
9866 addend = (upper << 12) | (lower << 1);
9867 signed_addend = addend;
9868 }
9869
9870 /* Handle calls via the PLT. */
9871 if (plt_offset != (bfd_vma) -1)
9872 {
9873 value = (splt->output_section->vma
9874 + splt->output_offset
9875 + plt_offset);
9876 /* Target the Thumb stub before the ARM PLT entry. */
9877 value -= PLT_THUMB_STUB_SIZE;
9878 *unresolved_reloc_p = FALSE;
9879 }
9880
9881 hash = (struct elf32_arm_link_hash_entry *)h;
9882
9883 stub_type = arm_type_of_stub (info, input_section, rel,
9884 st_type, &branch_type,
9885 hash, value, sym_sec,
9886 input_bfd, sym_name);
9887 if (stub_type != arm_stub_none)
9888 {
9889 stub_entry = elf32_arm_get_stub_entry (input_section,
9890 sym_sec, h,
9891 rel, globals,
9892 stub_type);
9893 if (stub_entry != NULL)
9894 {
9895 value = (stub_entry->stub_offset
9896 + stub_entry->stub_sec->output_offset
9897 + stub_entry->stub_sec->output_section->vma);
9898 }
9899 }
9900
9901 relocation = value + signed_addend;
9902 relocation -= (input_section->output_section->vma
9903 + input_section->output_offset
9904 + rel->r_offset);
9905 signed_check = (bfd_signed_vma) relocation;
9906
9907 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9908 overflow = TRUE;
9909
9910 /* Put RELOCATION back into the insn. */
9911 {
9912 bfd_vma S = (relocation & 0x00100000) >> 20;
9913 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9914 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9915 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9916 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9917
9918 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9919 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9920 }
9921
9922 /* Put the relocated value back in the object file: */
9923 bfd_put_16 (input_bfd, upper_insn, hit_data);
9924 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9925
9926 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9927 }
9928
9929 case R_ARM_THM_JUMP11:
9930 case R_ARM_THM_JUMP8:
9931 case R_ARM_THM_JUMP6:
9932 /* Thumb B (branch) instruction). */
9933 {
9934 bfd_signed_vma relocation;
9935 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9936 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9937 bfd_signed_vma signed_check;
9938
9939 /* CZB cannot jump backward. */
9940 if (r_type == R_ARM_THM_JUMP6)
9941 reloc_signed_min = 0;
9942
9943 if (globals->use_rel)
9944 {
9945 /* Need to refetch addend. */
9946 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9947 if (addend & ((howto->src_mask + 1) >> 1))
9948 {
9949 signed_addend = -1;
9950 signed_addend &= ~ howto->src_mask;
9951 signed_addend |= addend;
9952 }
9953 else
9954 signed_addend = addend;
9955 /* The value in the insn has been right shifted. We need to
9956 undo this, so that we can perform the address calculation
9957 in terms of bytes. */
9958 signed_addend <<= howto->rightshift;
9959 }
9960 relocation = value + signed_addend;
9961
9962 relocation -= (input_section->output_section->vma
9963 + input_section->output_offset
9964 + rel->r_offset);
9965
9966 relocation >>= howto->rightshift;
9967 signed_check = relocation;
9968
9969 if (r_type == R_ARM_THM_JUMP6)
9970 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9971 else
9972 relocation &= howto->dst_mask;
9973 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9974
9975 bfd_put_16 (input_bfd, relocation, hit_data);
9976
9977 /* Assumes two's complement. */
9978 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9979 return bfd_reloc_overflow;
9980
9981 return bfd_reloc_ok;
9982 }
9983
9984 case R_ARM_ALU_PCREL7_0:
9985 case R_ARM_ALU_PCREL15_8:
9986 case R_ARM_ALU_PCREL23_15:
9987 {
9988 bfd_vma insn;
9989 bfd_vma relocation;
9990
9991 insn = bfd_get_32 (input_bfd, hit_data);
9992 if (globals->use_rel)
9993 {
9994 /* Extract the addend. */
9995 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9996 signed_addend = addend;
9997 }
9998 relocation = value + signed_addend;
9999
10000 relocation -= (input_section->output_section->vma
10001 + input_section->output_offset
10002 + rel->r_offset);
10003 insn = (insn & ~0xfff)
10004 | ((howto->bitpos << 7) & 0xf00)
10005 | ((relocation >> howto->bitpos) & 0xff);
10006 bfd_put_32 (input_bfd, value, hit_data);
10007 }
10008 return bfd_reloc_ok;
10009
10010 case R_ARM_GNU_VTINHERIT:
10011 case R_ARM_GNU_VTENTRY:
10012 return bfd_reloc_ok;
10013
10014 case R_ARM_GOTOFF32:
10015 /* Relocation is relative to the start of the
10016 global offset table. */
10017
10018 BFD_ASSERT (sgot != NULL);
10019 if (sgot == NULL)
10020 return bfd_reloc_notsupported;
10021
10022 /* If we are addressing a Thumb function, we need to adjust the
10023 address by one, so that attempts to call the function pointer will
10024 correctly interpret it as Thumb code. */
10025 if (branch_type == ST_BRANCH_TO_THUMB)
10026 value += 1;
10027
10028 /* Note that sgot->output_offset is not involved in this
10029 calculation. We always want the start of .got. If we
10030 define _GLOBAL_OFFSET_TABLE in a different way, as is
10031 permitted by the ABI, we might have to change this
10032 calculation. */
10033 value -= sgot->output_section->vma;
10034 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10035 contents, rel->r_offset, value,
10036 rel->r_addend);
10037
10038 case R_ARM_GOTPC:
10039 /* Use global offset table as symbol value. */
10040 BFD_ASSERT (sgot != NULL);
10041
10042 if (sgot == NULL)
10043 return bfd_reloc_notsupported;
10044
10045 *unresolved_reloc_p = FALSE;
10046 value = sgot->output_section->vma;
10047 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10048 contents, rel->r_offset, value,
10049 rel->r_addend);
10050
10051 case R_ARM_GOT32:
10052 case R_ARM_GOT_PREL:
10053 /* Relocation is to the entry for this symbol in the
10054 global offset table. */
10055 if (sgot == NULL)
10056 return bfd_reloc_notsupported;
10057
10058 if (dynreloc_st_type == STT_GNU_IFUNC
10059 && plt_offset != (bfd_vma) -1
10060 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10061 {
10062 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10063 symbol, and the relocation resolves directly to the runtime
10064 target rather than to the .iplt entry. This means that any
10065 .got entry would be the same value as the .igot.plt entry,
10066 so there's no point creating both. */
10067 sgot = globals->root.igotplt;
10068 value = sgot->output_offset + gotplt_offset;
10069 }
10070 else if (h != NULL)
10071 {
10072 bfd_vma off;
10073
10074 off = h->got.offset;
10075 BFD_ASSERT (off != (bfd_vma) -1);
10076 if ((off & 1) != 0)
10077 {
10078 /* We have already processsed one GOT relocation against
10079 this symbol. */
10080 off &= ~1;
10081 if (globals->root.dynamic_sections_created
10082 && !SYMBOL_REFERENCES_LOCAL (info, h))
10083 *unresolved_reloc_p = FALSE;
10084 }
10085 else
10086 {
10087 Elf_Internal_Rela outrel;
10088
10089 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10090 {
10091 /* If the symbol doesn't resolve locally in a static
10092 object, we have an undefined reference. If the
10093 symbol doesn't resolve locally in a dynamic object,
10094 it should be resolved by the dynamic linker. */
10095 if (globals->root.dynamic_sections_created)
10096 {
10097 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10098 *unresolved_reloc_p = FALSE;
10099 }
10100 else
10101 outrel.r_info = 0;
10102 outrel.r_addend = 0;
10103 }
10104 else
10105 {
10106 if (dynreloc_st_type == STT_GNU_IFUNC)
10107 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10108 else if (bfd_link_pic (info) &&
10109 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10110 || h->root.type != bfd_link_hash_undefweak))
10111 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10112 else
10113 outrel.r_info = 0;
10114 outrel.r_addend = dynreloc_value;
10115 }
10116
10117 /* The GOT entry is initialized to zero by default.
10118 See if we should install a different value. */
10119 if (outrel.r_addend != 0
10120 && (outrel.r_info == 0 || globals->use_rel))
10121 {
10122 bfd_put_32 (output_bfd, outrel.r_addend,
10123 sgot->contents + off);
10124 outrel.r_addend = 0;
10125 }
10126
10127 if (outrel.r_info != 0)
10128 {
10129 outrel.r_offset = (sgot->output_section->vma
10130 + sgot->output_offset
10131 + off);
10132 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10133 }
10134 h->got.offset |= 1;
10135 }
10136 value = sgot->output_offset + off;
10137 }
10138 else
10139 {
10140 bfd_vma off;
10141
10142 BFD_ASSERT (local_got_offsets != NULL &&
10143 local_got_offsets[r_symndx] != (bfd_vma) -1);
10144
10145 off = local_got_offsets[r_symndx];
10146
10147 /* The offset must always be a multiple of 4. We use the
10148 least significant bit to record whether we have already
10149 generated the necessary reloc. */
10150 if ((off & 1) != 0)
10151 off &= ~1;
10152 else
10153 {
10154 if (globals->use_rel)
10155 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10156
10157 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10158 {
10159 Elf_Internal_Rela outrel;
10160
10161 outrel.r_addend = addend + dynreloc_value;
10162 outrel.r_offset = (sgot->output_section->vma
10163 + sgot->output_offset
10164 + off);
10165 if (dynreloc_st_type == STT_GNU_IFUNC)
10166 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10167 else
10168 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10169 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10170 }
10171
10172 local_got_offsets[r_symndx] |= 1;
10173 }
10174
10175 value = sgot->output_offset + off;
10176 }
10177 if (r_type != R_ARM_GOT32)
10178 value += sgot->output_section->vma;
10179
10180 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10181 contents, rel->r_offset, value,
10182 rel->r_addend);
10183
10184 case R_ARM_TLS_LDO32:
10185 value = value - dtpoff_base (info);
10186
10187 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10188 contents, rel->r_offset, value,
10189 rel->r_addend);
10190
10191 case R_ARM_TLS_LDM32:
10192 {
10193 bfd_vma off;
10194
10195 if (sgot == NULL)
10196 abort ();
10197
10198 off = globals->tls_ldm_got.offset;
10199
10200 if ((off & 1) != 0)
10201 off &= ~1;
10202 else
10203 {
10204 /* If we don't know the module number, create a relocation
10205 for it. */
10206 if (bfd_link_pic (info))
10207 {
10208 Elf_Internal_Rela outrel;
10209
10210 if (srelgot == NULL)
10211 abort ();
10212
10213 outrel.r_addend = 0;
10214 outrel.r_offset = (sgot->output_section->vma
10215 + sgot->output_offset + off);
10216 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10217
10218 if (globals->use_rel)
10219 bfd_put_32 (output_bfd, outrel.r_addend,
10220 sgot->contents + off);
10221
10222 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10223 }
10224 else
10225 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10226
10227 globals->tls_ldm_got.offset |= 1;
10228 }
10229
10230 value = sgot->output_section->vma + sgot->output_offset + off
10231 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10232
10233 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10234 contents, rel->r_offset, value,
10235 rel->r_addend);
10236 }
10237
10238 case R_ARM_TLS_CALL:
10239 case R_ARM_THM_TLS_CALL:
10240 case R_ARM_TLS_GD32:
10241 case R_ARM_TLS_IE32:
10242 case R_ARM_TLS_GOTDESC:
10243 case R_ARM_TLS_DESCSEQ:
10244 case R_ARM_THM_TLS_DESCSEQ:
10245 {
10246 bfd_vma off, offplt;
10247 int indx = 0;
10248 char tls_type;
10249
10250 BFD_ASSERT (sgot != NULL);
10251
10252 if (h != NULL)
10253 {
10254 bfd_boolean dyn;
10255 dyn = globals->root.dynamic_sections_created;
10256 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10257 bfd_link_pic (info),
10258 h)
10259 && (!bfd_link_pic (info)
10260 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10261 {
10262 *unresolved_reloc_p = FALSE;
10263 indx = h->dynindx;
10264 }
10265 off = h->got.offset;
10266 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10267 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10268 }
10269 else
10270 {
10271 BFD_ASSERT (local_got_offsets != NULL);
10272 off = local_got_offsets[r_symndx];
10273 offplt = local_tlsdesc_gotents[r_symndx];
10274 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10275 }
10276
10277 /* Linker relaxations happens from one of the
10278 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10279 if (ELF32_R_TYPE(rel->r_info) != r_type)
10280 tls_type = GOT_TLS_IE;
10281
10282 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10283
10284 if ((off & 1) != 0)
10285 off &= ~1;
10286 else
10287 {
10288 bfd_boolean need_relocs = FALSE;
10289 Elf_Internal_Rela outrel;
10290 int cur_off = off;
10291
10292 /* The GOT entries have not been initialized yet. Do it
10293 now, and emit any relocations. If both an IE GOT and a
10294 GD GOT are necessary, we emit the GD first. */
10295
10296 if ((bfd_link_pic (info) || indx != 0)
10297 && (h == NULL
10298 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10299 || h->root.type != bfd_link_hash_undefweak))
10300 {
10301 need_relocs = TRUE;
10302 BFD_ASSERT (srelgot != NULL);
10303 }
10304
10305 if (tls_type & GOT_TLS_GDESC)
10306 {
10307 bfd_byte *loc;
10308
10309 /* We should have relaxed, unless this is an undefined
10310 weak symbol. */
10311 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10312 || bfd_link_pic (info));
10313 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10314 <= globals->root.sgotplt->size);
10315
10316 outrel.r_addend = 0;
10317 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10318 + globals->root.sgotplt->output_offset
10319 + offplt
10320 + globals->sgotplt_jump_table_size);
10321
10322 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10323 sreloc = globals->root.srelplt;
10324 loc = sreloc->contents;
10325 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10326 BFD_ASSERT (loc + RELOC_SIZE (globals)
10327 <= sreloc->contents + sreloc->size);
10328
10329 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10330
10331 /* For globals, the first word in the relocation gets
10332 the relocation index and the top bit set, or zero,
10333 if we're binding now. For locals, it gets the
10334 symbol's offset in the tls section. */
10335 bfd_put_32 (output_bfd,
10336 !h ? value - elf_hash_table (info)->tls_sec->vma
10337 : info->flags & DF_BIND_NOW ? 0
10338 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10339 globals->root.sgotplt->contents + offplt
10340 + globals->sgotplt_jump_table_size);
10341
10342 /* Second word in the relocation is always zero. */
10343 bfd_put_32 (output_bfd, 0,
10344 globals->root.sgotplt->contents + offplt
10345 + globals->sgotplt_jump_table_size + 4);
10346 }
10347 if (tls_type & GOT_TLS_GD)
10348 {
10349 if (need_relocs)
10350 {
10351 outrel.r_addend = 0;
10352 outrel.r_offset = (sgot->output_section->vma
10353 + sgot->output_offset
10354 + cur_off);
10355 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10356
10357 if (globals->use_rel)
10358 bfd_put_32 (output_bfd, outrel.r_addend,
10359 sgot->contents + cur_off);
10360
10361 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10362
10363 if (indx == 0)
10364 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10365 sgot->contents + cur_off + 4);
10366 else
10367 {
10368 outrel.r_addend = 0;
10369 outrel.r_info = ELF32_R_INFO (indx,
10370 R_ARM_TLS_DTPOFF32);
10371 outrel.r_offset += 4;
10372
10373 if (globals->use_rel)
10374 bfd_put_32 (output_bfd, outrel.r_addend,
10375 sgot->contents + cur_off + 4);
10376
10377 elf32_arm_add_dynreloc (output_bfd, info,
10378 srelgot, &outrel);
10379 }
10380 }
10381 else
10382 {
10383 /* If we are not emitting relocations for a
10384 general dynamic reference, then we must be in a
10385 static link or an executable link with the
10386 symbol binding locally. Mark it as belonging
10387 to module 1, the executable. */
10388 bfd_put_32 (output_bfd, 1,
10389 sgot->contents + cur_off);
10390 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10391 sgot->contents + cur_off + 4);
10392 }
10393
10394 cur_off += 8;
10395 }
10396
10397 if (tls_type & GOT_TLS_IE)
10398 {
10399 if (need_relocs)
10400 {
10401 if (indx == 0)
10402 outrel.r_addend = value - dtpoff_base (info);
10403 else
10404 outrel.r_addend = 0;
10405 outrel.r_offset = (sgot->output_section->vma
10406 + sgot->output_offset
10407 + cur_off);
10408 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10409
10410 if (globals->use_rel)
10411 bfd_put_32 (output_bfd, outrel.r_addend,
10412 sgot->contents + cur_off);
10413
10414 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10415 }
10416 else
10417 bfd_put_32 (output_bfd, tpoff (info, value),
10418 sgot->contents + cur_off);
10419 cur_off += 4;
10420 }
10421
10422 if (h != NULL)
10423 h->got.offset |= 1;
10424 else
10425 local_got_offsets[r_symndx] |= 1;
10426 }
10427
10428 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10429 off += 8;
10430 else if (tls_type & GOT_TLS_GDESC)
10431 off = offplt;
10432
10433 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10434 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10435 {
10436 bfd_signed_vma offset;
10437 /* TLS stubs are arm mode. The original symbol is a
10438 data object, so branch_type is bogus. */
10439 branch_type = ST_BRANCH_TO_ARM;
10440 enum elf32_arm_stub_type stub_type
10441 = arm_type_of_stub (info, input_section, rel,
10442 st_type, &branch_type,
10443 (struct elf32_arm_link_hash_entry *)h,
10444 globals->tls_trampoline, globals->root.splt,
10445 input_bfd, sym_name);
10446
10447 if (stub_type != arm_stub_none)
10448 {
10449 struct elf32_arm_stub_hash_entry *stub_entry
10450 = elf32_arm_get_stub_entry
10451 (input_section, globals->root.splt, 0, rel,
10452 globals, stub_type);
10453 offset = (stub_entry->stub_offset
10454 + stub_entry->stub_sec->output_offset
10455 + stub_entry->stub_sec->output_section->vma);
10456 }
10457 else
10458 offset = (globals->root.splt->output_section->vma
10459 + globals->root.splt->output_offset
10460 + globals->tls_trampoline);
10461
10462 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10463 {
10464 unsigned long inst;
10465
10466 offset -= (input_section->output_section->vma
10467 + input_section->output_offset
10468 + rel->r_offset + 8);
10469
10470 inst = offset >> 2;
10471 inst &= 0x00ffffff;
10472 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10473 }
10474 else
10475 {
10476 /* Thumb blx encodes the offset in a complicated
10477 fashion. */
10478 unsigned upper_insn, lower_insn;
10479 unsigned neg;
10480
10481 offset -= (input_section->output_section->vma
10482 + input_section->output_offset
10483 + rel->r_offset + 4);
10484
10485 if (stub_type != arm_stub_none
10486 && arm_stub_is_thumb (stub_type))
10487 {
10488 lower_insn = 0xd000;
10489 }
10490 else
10491 {
10492 lower_insn = 0xc000;
10493 /* Round up the offset to a word boundary. */
10494 offset = (offset + 2) & ~2;
10495 }
10496
10497 neg = offset < 0;
10498 upper_insn = (0xf000
10499 | ((offset >> 12) & 0x3ff)
10500 | (neg << 10));
10501 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10502 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10503 | ((offset >> 1) & 0x7ff);
10504 bfd_put_16 (input_bfd, upper_insn, hit_data);
10505 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10506 return bfd_reloc_ok;
10507 }
10508 }
10509 /* These relocations needs special care, as besides the fact
10510 they point somewhere in .gotplt, the addend must be
10511 adjusted accordingly depending on the type of instruction
10512 we refer to. */
10513 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10514 {
10515 unsigned long data, insn;
10516 unsigned thumb;
10517
10518 data = bfd_get_32 (input_bfd, hit_data);
10519 thumb = data & 1;
10520 data &= ~1u;
10521
10522 if (thumb)
10523 {
10524 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10525 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10526 insn = (insn << 16)
10527 | bfd_get_16 (input_bfd,
10528 contents + rel->r_offset - data + 2);
10529 if ((insn & 0xf800c000) == 0xf000c000)
10530 /* bl/blx */
10531 value = -6;
10532 else if ((insn & 0xffffff00) == 0x4400)
10533 /* add */
10534 value = -5;
10535 else
10536 {
10537 (*_bfd_error_handler)
10538 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10539 input_bfd, input_section,
10540 (unsigned long)rel->r_offset, insn);
10541 return bfd_reloc_notsupported;
10542 }
10543 }
10544 else
10545 {
10546 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10547
10548 switch (insn >> 24)
10549 {
10550 case 0xeb: /* bl */
10551 case 0xfa: /* blx */
10552 value = -4;
10553 break;
10554
10555 case 0xe0: /* add */
10556 value = -8;
10557 break;
10558
10559 default:
10560 (*_bfd_error_handler)
10561 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10562 input_bfd, input_section,
10563 (unsigned long)rel->r_offset, insn);
10564 return bfd_reloc_notsupported;
10565 }
10566 }
10567
10568 value += ((globals->root.sgotplt->output_section->vma
10569 + globals->root.sgotplt->output_offset + off)
10570 - (input_section->output_section->vma
10571 + input_section->output_offset
10572 + rel->r_offset)
10573 + globals->sgotplt_jump_table_size);
10574 }
10575 else
10576 value = ((globals->root.sgot->output_section->vma
10577 + globals->root.sgot->output_offset + off)
10578 - (input_section->output_section->vma
10579 + input_section->output_offset + rel->r_offset));
10580
10581 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10582 contents, rel->r_offset, value,
10583 rel->r_addend);
10584 }
10585
10586 case R_ARM_TLS_LE32:
10587 if (bfd_link_dll (info))
10588 {
10589 (*_bfd_error_handler)
10590 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10591 input_bfd, input_section,
10592 (long) rel->r_offset, howto->name);
10593 return bfd_reloc_notsupported;
10594 }
10595 else
10596 value = tpoff (info, value);
10597
10598 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10599 contents, rel->r_offset, value,
10600 rel->r_addend);
10601
10602 case R_ARM_V4BX:
10603 if (globals->fix_v4bx)
10604 {
10605 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10606
10607 /* Ensure that we have a BX instruction. */
10608 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10609
10610 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10611 {
10612 /* Branch to veneer. */
10613 bfd_vma glue_addr;
10614 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10615 glue_addr -= input_section->output_section->vma
10616 + input_section->output_offset
10617 + rel->r_offset + 8;
10618 insn = (insn & 0xf0000000) | 0x0a000000
10619 | ((glue_addr >> 2) & 0x00ffffff);
10620 }
10621 else
10622 {
10623 /* Preserve Rm (lowest four bits) and the condition code
10624 (highest four bits). Other bits encode MOV PC,Rm. */
10625 insn = (insn & 0xf000000f) | 0x01a0f000;
10626 }
10627
10628 bfd_put_32 (input_bfd, insn, hit_data);
10629 }
10630 return bfd_reloc_ok;
10631
10632 case R_ARM_MOVW_ABS_NC:
10633 case R_ARM_MOVT_ABS:
10634 case R_ARM_MOVW_PREL_NC:
10635 case R_ARM_MOVT_PREL:
10636 /* Until we properly support segment-base-relative addressing then
10637 we assume the segment base to be zero, as for the group relocations.
10638 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10639 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10640 case R_ARM_MOVW_BREL_NC:
10641 case R_ARM_MOVW_BREL:
10642 case R_ARM_MOVT_BREL:
10643 {
10644 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10645
10646 if (globals->use_rel)
10647 {
10648 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10649 signed_addend = (addend ^ 0x8000) - 0x8000;
10650 }
10651
10652 value += signed_addend;
10653
10654 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10655 value -= (input_section->output_section->vma
10656 + input_section->output_offset + rel->r_offset);
10657
10658 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10659 return bfd_reloc_overflow;
10660
10661 if (branch_type == ST_BRANCH_TO_THUMB)
10662 value |= 1;
10663
10664 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10665 || r_type == R_ARM_MOVT_BREL)
10666 value >>= 16;
10667
10668 insn &= 0xfff0f000;
10669 insn |= value & 0xfff;
10670 insn |= (value & 0xf000) << 4;
10671 bfd_put_32 (input_bfd, insn, hit_data);
10672 }
10673 return bfd_reloc_ok;
10674
10675 case R_ARM_THM_MOVW_ABS_NC:
10676 case R_ARM_THM_MOVT_ABS:
10677 case R_ARM_THM_MOVW_PREL_NC:
10678 case R_ARM_THM_MOVT_PREL:
10679 /* Until we properly support segment-base-relative addressing then
10680 we assume the segment base to be zero, as for the above relocations.
10681 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10682 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10683 as R_ARM_THM_MOVT_ABS. */
10684 case R_ARM_THM_MOVW_BREL_NC:
10685 case R_ARM_THM_MOVW_BREL:
10686 case R_ARM_THM_MOVT_BREL:
10687 {
10688 bfd_vma insn;
10689
10690 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10691 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10692
10693 if (globals->use_rel)
10694 {
10695 addend = ((insn >> 4) & 0xf000)
10696 | ((insn >> 15) & 0x0800)
10697 | ((insn >> 4) & 0x0700)
10698 | (insn & 0x00ff);
10699 signed_addend = (addend ^ 0x8000) - 0x8000;
10700 }
10701
10702 value += signed_addend;
10703
10704 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10705 value -= (input_section->output_section->vma
10706 + input_section->output_offset + rel->r_offset);
10707
10708 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10709 return bfd_reloc_overflow;
10710
10711 if (branch_type == ST_BRANCH_TO_THUMB)
10712 value |= 1;
10713
10714 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10715 || r_type == R_ARM_THM_MOVT_BREL)
10716 value >>= 16;
10717
10718 insn &= 0xfbf08f00;
10719 insn |= (value & 0xf000) << 4;
10720 insn |= (value & 0x0800) << 15;
10721 insn |= (value & 0x0700) << 4;
10722 insn |= (value & 0x00ff);
10723
10724 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10725 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10726 }
10727 return bfd_reloc_ok;
10728
10729 case R_ARM_ALU_PC_G0_NC:
10730 case R_ARM_ALU_PC_G1_NC:
10731 case R_ARM_ALU_PC_G0:
10732 case R_ARM_ALU_PC_G1:
10733 case R_ARM_ALU_PC_G2:
10734 case R_ARM_ALU_SB_G0_NC:
10735 case R_ARM_ALU_SB_G1_NC:
10736 case R_ARM_ALU_SB_G0:
10737 case R_ARM_ALU_SB_G1:
10738 case R_ARM_ALU_SB_G2:
10739 {
10740 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10741 bfd_vma pc = input_section->output_section->vma
10742 + input_section->output_offset + rel->r_offset;
10743 /* sb is the origin of the *segment* containing the symbol. */
10744 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10745 bfd_vma residual;
10746 bfd_vma g_n;
10747 bfd_signed_vma signed_value;
10748 int group = 0;
10749
10750 /* Determine which group of bits to select. */
10751 switch (r_type)
10752 {
10753 case R_ARM_ALU_PC_G0_NC:
10754 case R_ARM_ALU_PC_G0:
10755 case R_ARM_ALU_SB_G0_NC:
10756 case R_ARM_ALU_SB_G0:
10757 group = 0;
10758 break;
10759
10760 case R_ARM_ALU_PC_G1_NC:
10761 case R_ARM_ALU_PC_G1:
10762 case R_ARM_ALU_SB_G1_NC:
10763 case R_ARM_ALU_SB_G1:
10764 group = 1;
10765 break;
10766
10767 case R_ARM_ALU_PC_G2:
10768 case R_ARM_ALU_SB_G2:
10769 group = 2;
10770 break;
10771
10772 default:
10773 abort ();
10774 }
10775
10776 /* If REL, extract the addend from the insn. If RELA, it will
10777 have already been fetched for us. */
10778 if (globals->use_rel)
10779 {
10780 int negative;
10781 bfd_vma constant = insn & 0xff;
10782 bfd_vma rotation = (insn & 0xf00) >> 8;
10783
10784 if (rotation == 0)
10785 signed_addend = constant;
10786 else
10787 {
10788 /* Compensate for the fact that in the instruction, the
10789 rotation is stored in multiples of 2 bits. */
10790 rotation *= 2;
10791
10792 /* Rotate "constant" right by "rotation" bits. */
10793 signed_addend = (constant >> rotation) |
10794 (constant << (8 * sizeof (bfd_vma) - rotation));
10795 }
10796
10797 /* Determine if the instruction is an ADD or a SUB.
10798 (For REL, this determines the sign of the addend.) */
10799 negative = identify_add_or_sub (insn);
10800 if (negative == 0)
10801 {
10802 (*_bfd_error_handler)
10803 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10804 input_bfd, input_section,
10805 (long) rel->r_offset, howto->name);
10806 return bfd_reloc_overflow;
10807 }
10808
10809 signed_addend *= negative;
10810 }
10811
10812 /* Compute the value (X) to go in the place. */
10813 if (r_type == R_ARM_ALU_PC_G0_NC
10814 || r_type == R_ARM_ALU_PC_G1_NC
10815 || r_type == R_ARM_ALU_PC_G0
10816 || r_type == R_ARM_ALU_PC_G1
10817 || r_type == R_ARM_ALU_PC_G2)
10818 /* PC relative. */
10819 signed_value = value - pc + signed_addend;
10820 else
10821 /* Section base relative. */
10822 signed_value = value - sb + signed_addend;
10823
10824 /* If the target symbol is a Thumb function, then set the
10825 Thumb bit in the address. */
10826 if (branch_type == ST_BRANCH_TO_THUMB)
10827 signed_value |= 1;
10828
10829 /* Calculate the value of the relevant G_n, in encoded
10830 constant-with-rotation format. */
10831 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10832 group, &residual);
10833
10834 /* Check for overflow if required. */
10835 if ((r_type == R_ARM_ALU_PC_G0
10836 || r_type == R_ARM_ALU_PC_G1
10837 || r_type == R_ARM_ALU_PC_G2
10838 || r_type == R_ARM_ALU_SB_G0
10839 || r_type == R_ARM_ALU_SB_G1
10840 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
10841 {
10842 (*_bfd_error_handler)
10843 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10844 input_bfd, input_section,
10845 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
10846 howto->name);
10847 return bfd_reloc_overflow;
10848 }
10849
10850 /* Mask out the value and the ADD/SUB part of the opcode; take care
10851 not to destroy the S bit. */
10852 insn &= 0xff1ff000;
10853
10854 /* Set the opcode according to whether the value to go in the
10855 place is negative. */
10856 if (signed_value < 0)
10857 insn |= 1 << 22;
10858 else
10859 insn |= 1 << 23;
10860
10861 /* Encode the offset. */
10862 insn |= g_n;
10863
10864 bfd_put_32 (input_bfd, insn, hit_data);
10865 }
10866 return bfd_reloc_ok;
10867
10868 case R_ARM_LDR_PC_G0:
10869 case R_ARM_LDR_PC_G1:
10870 case R_ARM_LDR_PC_G2:
10871 case R_ARM_LDR_SB_G0:
10872 case R_ARM_LDR_SB_G1:
10873 case R_ARM_LDR_SB_G2:
10874 {
10875 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10876 bfd_vma pc = input_section->output_section->vma
10877 + input_section->output_offset + rel->r_offset;
10878 /* sb is the origin of the *segment* containing the symbol. */
10879 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10880 bfd_vma residual;
10881 bfd_signed_vma signed_value;
10882 int group = 0;
10883
10884 /* Determine which groups of bits to calculate. */
10885 switch (r_type)
10886 {
10887 case R_ARM_LDR_PC_G0:
10888 case R_ARM_LDR_SB_G0:
10889 group = 0;
10890 break;
10891
10892 case R_ARM_LDR_PC_G1:
10893 case R_ARM_LDR_SB_G1:
10894 group = 1;
10895 break;
10896
10897 case R_ARM_LDR_PC_G2:
10898 case R_ARM_LDR_SB_G2:
10899 group = 2;
10900 break;
10901
10902 default:
10903 abort ();
10904 }
10905
10906 /* If REL, extract the addend from the insn. If RELA, it will
10907 have already been fetched for us. */
10908 if (globals->use_rel)
10909 {
10910 int negative = (insn & (1 << 23)) ? 1 : -1;
10911 signed_addend = negative * (insn & 0xfff);
10912 }
10913
10914 /* Compute the value (X) to go in the place. */
10915 if (r_type == R_ARM_LDR_PC_G0
10916 || r_type == R_ARM_LDR_PC_G1
10917 || r_type == R_ARM_LDR_PC_G2)
10918 /* PC relative. */
10919 signed_value = value - pc + signed_addend;
10920 else
10921 /* Section base relative. */
10922 signed_value = value - sb + signed_addend;
10923
10924 /* Calculate the value of the relevant G_{n-1} to obtain
10925 the residual at that stage. */
10926 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10927 group - 1, &residual);
10928
10929 /* Check for overflow. */
10930 if (residual >= 0x1000)
10931 {
10932 (*_bfd_error_handler)
10933 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10934 input_bfd, input_section,
10935 (long) rel->r_offset, labs (signed_value), howto->name);
10936 return bfd_reloc_overflow;
10937 }
10938
10939 /* Mask out the value and U bit. */
10940 insn &= 0xff7ff000;
10941
10942 /* Set the U bit if the value to go in the place is non-negative. */
10943 if (signed_value >= 0)
10944 insn |= 1 << 23;
10945
10946 /* Encode the offset. */
10947 insn |= residual;
10948
10949 bfd_put_32 (input_bfd, insn, hit_data);
10950 }
10951 return bfd_reloc_ok;
10952
10953 case R_ARM_LDRS_PC_G0:
10954 case R_ARM_LDRS_PC_G1:
10955 case R_ARM_LDRS_PC_G2:
10956 case R_ARM_LDRS_SB_G0:
10957 case R_ARM_LDRS_SB_G1:
10958 case R_ARM_LDRS_SB_G2:
10959 {
10960 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10961 bfd_vma pc = input_section->output_section->vma
10962 + input_section->output_offset + rel->r_offset;
10963 /* sb is the origin of the *segment* containing the symbol. */
10964 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10965 bfd_vma residual;
10966 bfd_signed_vma signed_value;
10967 int group = 0;
10968
10969 /* Determine which groups of bits to calculate. */
10970 switch (r_type)
10971 {
10972 case R_ARM_LDRS_PC_G0:
10973 case R_ARM_LDRS_SB_G0:
10974 group = 0;
10975 break;
10976
10977 case R_ARM_LDRS_PC_G1:
10978 case R_ARM_LDRS_SB_G1:
10979 group = 1;
10980 break;
10981
10982 case R_ARM_LDRS_PC_G2:
10983 case R_ARM_LDRS_SB_G2:
10984 group = 2;
10985 break;
10986
10987 default:
10988 abort ();
10989 }
10990
10991 /* If REL, extract the addend from the insn. If RELA, it will
10992 have already been fetched for us. */
10993 if (globals->use_rel)
10994 {
10995 int negative = (insn & (1 << 23)) ? 1 : -1;
10996 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10997 }
10998
10999 /* Compute the value (X) to go in the place. */
11000 if (r_type == R_ARM_LDRS_PC_G0
11001 || r_type == R_ARM_LDRS_PC_G1
11002 || r_type == R_ARM_LDRS_PC_G2)
11003 /* PC relative. */
11004 signed_value = value - pc + signed_addend;
11005 else
11006 /* Section base relative. */
11007 signed_value = value - sb + signed_addend;
11008
11009 /* Calculate the value of the relevant G_{n-1} to obtain
11010 the residual at that stage. */
11011 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11012 group - 1, &residual);
11013
11014 /* Check for overflow. */
11015 if (residual >= 0x100)
11016 {
11017 (*_bfd_error_handler)
11018 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11019 input_bfd, input_section,
11020 (long) rel->r_offset, labs (signed_value), howto->name);
11021 return bfd_reloc_overflow;
11022 }
11023
11024 /* Mask out the value and U bit. */
11025 insn &= 0xff7ff0f0;
11026
11027 /* Set the U bit if the value to go in the place is non-negative. */
11028 if (signed_value >= 0)
11029 insn |= 1 << 23;
11030
11031 /* Encode the offset. */
11032 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11033
11034 bfd_put_32 (input_bfd, insn, hit_data);
11035 }
11036 return bfd_reloc_ok;
11037
11038 case R_ARM_LDC_PC_G0:
11039 case R_ARM_LDC_PC_G1:
11040 case R_ARM_LDC_PC_G2:
11041 case R_ARM_LDC_SB_G0:
11042 case R_ARM_LDC_SB_G1:
11043 case R_ARM_LDC_SB_G2:
11044 {
11045 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11046 bfd_vma pc = input_section->output_section->vma
11047 + input_section->output_offset + rel->r_offset;
11048 /* sb is the origin of the *segment* containing the symbol. */
11049 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11050 bfd_vma residual;
11051 bfd_signed_vma signed_value;
11052 int group = 0;
11053
11054 /* Determine which groups of bits to calculate. */
11055 switch (r_type)
11056 {
11057 case R_ARM_LDC_PC_G0:
11058 case R_ARM_LDC_SB_G0:
11059 group = 0;
11060 break;
11061
11062 case R_ARM_LDC_PC_G1:
11063 case R_ARM_LDC_SB_G1:
11064 group = 1;
11065 break;
11066
11067 case R_ARM_LDC_PC_G2:
11068 case R_ARM_LDC_SB_G2:
11069 group = 2;
11070 break;
11071
11072 default:
11073 abort ();
11074 }
11075
11076 /* If REL, extract the addend from the insn. If RELA, it will
11077 have already been fetched for us. */
11078 if (globals->use_rel)
11079 {
11080 int negative = (insn & (1 << 23)) ? 1 : -1;
11081 signed_addend = negative * ((insn & 0xff) << 2);
11082 }
11083
11084 /* Compute the value (X) to go in the place. */
11085 if (r_type == R_ARM_LDC_PC_G0
11086 || r_type == R_ARM_LDC_PC_G1
11087 || r_type == R_ARM_LDC_PC_G2)
11088 /* PC relative. */
11089 signed_value = value - pc + signed_addend;
11090 else
11091 /* Section base relative. */
11092 signed_value = value - sb + signed_addend;
11093
11094 /* Calculate the value of the relevant G_{n-1} to obtain
11095 the residual at that stage. */
11096 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11097 group - 1, &residual);
11098
11099 /* Check for overflow. (The absolute value to go in the place must be
11100 divisible by four and, after having been divided by four, must
11101 fit in eight bits.) */
11102 if ((residual & 0x3) != 0 || residual >= 0x400)
11103 {
11104 (*_bfd_error_handler)
11105 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11106 input_bfd, input_section,
11107 (long) rel->r_offset, labs (signed_value), howto->name);
11108 return bfd_reloc_overflow;
11109 }
11110
11111 /* Mask out the value and U bit. */
11112 insn &= 0xff7fff00;
11113
11114 /* Set the U bit if the value to go in the place is non-negative. */
11115 if (signed_value >= 0)
11116 insn |= 1 << 23;
11117
11118 /* Encode the offset. */
11119 insn |= residual >> 2;
11120
11121 bfd_put_32 (input_bfd, insn, hit_data);
11122 }
11123 return bfd_reloc_ok;
11124
11125 case R_ARM_THM_ALU_ABS_G0_NC:
11126 case R_ARM_THM_ALU_ABS_G1_NC:
11127 case R_ARM_THM_ALU_ABS_G2_NC:
11128 case R_ARM_THM_ALU_ABS_G3_NC:
11129 {
11130 const int shift_array[4] = {0, 8, 16, 24};
11131 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11132 bfd_vma addr = value;
11133 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11134
11135 /* Compute address. */
11136 if (globals->use_rel)
11137 signed_addend = insn & 0xff;
11138 addr += signed_addend;
11139 if (branch_type == ST_BRANCH_TO_THUMB)
11140 addr |= 1;
11141 /* Clean imm8 insn. */
11142 insn &= 0xff00;
11143 /* And update with correct part of address. */
11144 insn |= (addr >> shift) & 0xff;
11145 /* Update insn. */
11146 bfd_put_16 (input_bfd, insn, hit_data);
11147 }
11148
11149 *unresolved_reloc_p = FALSE;
11150 return bfd_reloc_ok;
11151
11152 default:
11153 return bfd_reloc_notsupported;
11154 }
11155 }
11156
11157 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11158 static void
11159 arm_add_to_rel (bfd * abfd,
11160 bfd_byte * address,
11161 reloc_howto_type * howto,
11162 bfd_signed_vma increment)
11163 {
11164 bfd_signed_vma addend;
11165
11166 if (howto->type == R_ARM_THM_CALL
11167 || howto->type == R_ARM_THM_JUMP24)
11168 {
11169 int upper_insn, lower_insn;
11170 int upper, lower;
11171
11172 upper_insn = bfd_get_16 (abfd, address);
11173 lower_insn = bfd_get_16 (abfd, address + 2);
11174 upper = upper_insn & 0x7ff;
11175 lower = lower_insn & 0x7ff;
11176
11177 addend = (upper << 12) | (lower << 1);
11178 addend += increment;
11179 addend >>= 1;
11180
11181 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11182 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11183
11184 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11185 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11186 }
11187 else
11188 {
11189 bfd_vma contents;
11190
11191 contents = bfd_get_32 (abfd, address);
11192
11193 /* Get the (signed) value from the instruction. */
11194 addend = contents & howto->src_mask;
11195 if (addend & ((howto->src_mask + 1) >> 1))
11196 {
11197 bfd_signed_vma mask;
11198
11199 mask = -1;
11200 mask &= ~ howto->src_mask;
11201 addend |= mask;
11202 }
11203
11204 /* Add in the increment, (which is a byte value). */
11205 switch (howto->type)
11206 {
11207 default:
11208 addend += increment;
11209 break;
11210
11211 case R_ARM_PC24:
11212 case R_ARM_PLT32:
11213 case R_ARM_CALL:
11214 case R_ARM_JUMP24:
11215 addend <<= howto->size;
11216 addend += increment;
11217
11218 /* Should we check for overflow here ? */
11219
11220 /* Drop any undesired bits. */
11221 addend >>= howto->rightshift;
11222 break;
11223 }
11224
11225 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11226
11227 bfd_put_32 (abfd, contents, address);
11228 }
11229 }
11230
11231 #define IS_ARM_TLS_RELOC(R_TYPE) \
11232 ((R_TYPE) == R_ARM_TLS_GD32 \
11233 || (R_TYPE) == R_ARM_TLS_LDO32 \
11234 || (R_TYPE) == R_ARM_TLS_LDM32 \
11235 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11236 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11237 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11238 || (R_TYPE) == R_ARM_TLS_LE32 \
11239 || (R_TYPE) == R_ARM_TLS_IE32 \
11240 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11241
11242 /* Specific set of relocations for the gnu tls dialect. */
11243 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11244 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11245 || (R_TYPE) == R_ARM_TLS_CALL \
11246 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11247 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11248 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11249
11250 /* Relocate an ARM ELF section. */
11251
11252 static bfd_boolean
11253 elf32_arm_relocate_section (bfd * output_bfd,
11254 struct bfd_link_info * info,
11255 bfd * input_bfd,
11256 asection * input_section,
11257 bfd_byte * contents,
11258 Elf_Internal_Rela * relocs,
11259 Elf_Internal_Sym * local_syms,
11260 asection ** local_sections)
11261 {
11262 Elf_Internal_Shdr *symtab_hdr;
11263 struct elf_link_hash_entry **sym_hashes;
11264 Elf_Internal_Rela *rel;
11265 Elf_Internal_Rela *relend;
11266 const char *name;
11267 struct elf32_arm_link_hash_table * globals;
11268
11269 globals = elf32_arm_hash_table (info);
11270 if (globals == NULL)
11271 return FALSE;
11272
11273 symtab_hdr = & elf_symtab_hdr (input_bfd);
11274 sym_hashes = elf_sym_hashes (input_bfd);
11275
11276 rel = relocs;
11277 relend = relocs + input_section->reloc_count;
11278 for (; rel < relend; rel++)
11279 {
11280 int r_type;
11281 reloc_howto_type * howto;
11282 unsigned long r_symndx;
11283 Elf_Internal_Sym * sym;
11284 asection * sec;
11285 struct elf_link_hash_entry * h;
11286 bfd_vma relocation;
11287 bfd_reloc_status_type r;
11288 arelent bfd_reloc;
11289 char sym_type;
11290 bfd_boolean unresolved_reloc = FALSE;
11291 char *error_message = NULL;
11292
11293 r_symndx = ELF32_R_SYM (rel->r_info);
11294 r_type = ELF32_R_TYPE (rel->r_info);
11295 r_type = arm_real_reloc_type (globals, r_type);
11296
11297 if ( r_type == R_ARM_GNU_VTENTRY
11298 || r_type == R_ARM_GNU_VTINHERIT)
11299 continue;
11300
11301 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11302 howto = bfd_reloc.howto;
11303
11304 h = NULL;
11305 sym = NULL;
11306 sec = NULL;
11307
11308 if (r_symndx < symtab_hdr->sh_info)
11309 {
11310 sym = local_syms + r_symndx;
11311 sym_type = ELF32_ST_TYPE (sym->st_info);
11312 sec = local_sections[r_symndx];
11313
11314 /* An object file might have a reference to a local
11315 undefined symbol. This is a daft object file, but we
11316 should at least do something about it. V4BX & NONE
11317 relocations do not use the symbol and are explicitly
11318 allowed to use the undefined symbol, so allow those.
11319 Likewise for relocations against STN_UNDEF. */
11320 if (r_type != R_ARM_V4BX
11321 && r_type != R_ARM_NONE
11322 && r_symndx != STN_UNDEF
11323 && bfd_is_und_section (sec)
11324 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11325 {
11326 if (!info->callbacks->undefined_symbol
11327 (info, bfd_elf_string_from_elf_section
11328 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11329 input_bfd, input_section,
11330 rel->r_offset, TRUE))
11331 return FALSE;
11332 }
11333
11334 if (globals->use_rel)
11335 {
11336 relocation = (sec->output_section->vma
11337 + sec->output_offset
11338 + sym->st_value);
11339 if (!bfd_link_relocatable (info)
11340 && (sec->flags & SEC_MERGE)
11341 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11342 {
11343 asection *msec;
11344 bfd_vma addend, value;
11345
11346 switch (r_type)
11347 {
11348 case R_ARM_MOVW_ABS_NC:
11349 case R_ARM_MOVT_ABS:
11350 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11351 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11352 addend = (addend ^ 0x8000) - 0x8000;
11353 break;
11354
11355 case R_ARM_THM_MOVW_ABS_NC:
11356 case R_ARM_THM_MOVT_ABS:
11357 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11358 << 16;
11359 value |= bfd_get_16 (input_bfd,
11360 contents + rel->r_offset + 2);
11361 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11362 | ((value & 0x04000000) >> 15);
11363 addend = (addend ^ 0x8000) - 0x8000;
11364 break;
11365
11366 default:
11367 if (howto->rightshift
11368 || (howto->src_mask & (howto->src_mask + 1)))
11369 {
11370 (*_bfd_error_handler)
11371 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11372 input_bfd, input_section,
11373 (long) rel->r_offset, howto->name);
11374 return FALSE;
11375 }
11376
11377 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11378
11379 /* Get the (signed) value from the instruction. */
11380 addend = value & howto->src_mask;
11381 if (addend & ((howto->src_mask + 1) >> 1))
11382 {
11383 bfd_signed_vma mask;
11384
11385 mask = -1;
11386 mask &= ~ howto->src_mask;
11387 addend |= mask;
11388 }
11389 break;
11390 }
11391
11392 msec = sec;
11393 addend =
11394 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11395 - relocation;
11396 addend += msec->output_section->vma + msec->output_offset;
11397
11398 /* Cases here must match those in the preceding
11399 switch statement. */
11400 switch (r_type)
11401 {
11402 case R_ARM_MOVW_ABS_NC:
11403 case R_ARM_MOVT_ABS:
11404 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11405 | (addend & 0xfff);
11406 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11407 break;
11408
11409 case R_ARM_THM_MOVW_ABS_NC:
11410 case R_ARM_THM_MOVT_ABS:
11411 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11412 | (addend & 0xff) | ((addend & 0x0800) << 15);
11413 bfd_put_16 (input_bfd, value >> 16,
11414 contents + rel->r_offset);
11415 bfd_put_16 (input_bfd, value,
11416 contents + rel->r_offset + 2);
11417 break;
11418
11419 default:
11420 value = (value & ~ howto->dst_mask)
11421 | (addend & howto->dst_mask);
11422 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11423 break;
11424 }
11425 }
11426 }
11427 else
11428 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11429 }
11430 else
11431 {
11432 bfd_boolean warned, ignored;
11433
11434 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11435 r_symndx, symtab_hdr, sym_hashes,
11436 h, sec, relocation,
11437 unresolved_reloc, warned, ignored);
11438
11439 sym_type = h->type;
11440 }
11441
11442 if (sec != NULL && discarded_section (sec))
11443 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11444 rel, 1, relend, howto, 0, contents);
11445
11446 if (bfd_link_relocatable (info))
11447 {
11448 /* This is a relocatable link. We don't have to change
11449 anything, unless the reloc is against a section symbol,
11450 in which case we have to adjust according to where the
11451 section symbol winds up in the output section. */
11452 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11453 {
11454 if (globals->use_rel)
11455 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11456 howto, (bfd_signed_vma) sec->output_offset);
11457 else
11458 rel->r_addend += sec->output_offset;
11459 }
11460 continue;
11461 }
11462
11463 if (h != NULL)
11464 name = h->root.root.string;
11465 else
11466 {
11467 name = (bfd_elf_string_from_elf_section
11468 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11469 if (name == NULL || *name == '\0')
11470 name = bfd_section_name (input_bfd, sec);
11471 }
11472
11473 if (r_symndx != STN_UNDEF
11474 && r_type != R_ARM_NONE
11475 && (h == NULL
11476 || h->root.type == bfd_link_hash_defined
11477 || h->root.type == bfd_link_hash_defweak)
11478 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11479 {
11480 (*_bfd_error_handler)
11481 ((sym_type == STT_TLS
11482 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11483 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11484 input_bfd,
11485 input_section,
11486 (long) rel->r_offset,
11487 howto->name,
11488 name);
11489 }
11490
11491 /* We call elf32_arm_final_link_relocate unless we're completely
11492 done, i.e., the relaxation produced the final output we want,
11493 and we won't let anybody mess with it. Also, we have to do
11494 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11495 both in relaxed and non-relaxed cases. */
11496 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11497 || (IS_ARM_TLS_GNU_RELOC (r_type)
11498 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11499 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11500 & GOT_TLS_GDESC)))
11501 {
11502 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11503 contents, rel, h == NULL);
11504 /* This may have been marked unresolved because it came from
11505 a shared library. But we've just dealt with that. */
11506 unresolved_reloc = 0;
11507 }
11508 else
11509 r = bfd_reloc_continue;
11510
11511 if (r == bfd_reloc_continue)
11512 {
11513 unsigned char branch_type =
11514 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11515 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11516
11517 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11518 input_section, contents, rel,
11519 relocation, info, sec, name,
11520 sym_type, branch_type, h,
11521 &unresolved_reloc,
11522 &error_message);
11523 }
11524
11525 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11526 because such sections are not SEC_ALLOC and thus ld.so will
11527 not process them. */
11528 if (unresolved_reloc
11529 && !((input_section->flags & SEC_DEBUGGING) != 0
11530 && h->def_dynamic)
11531 && _bfd_elf_section_offset (output_bfd, info, input_section,
11532 rel->r_offset) != (bfd_vma) -1)
11533 {
11534 (*_bfd_error_handler)
11535 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11536 input_bfd,
11537 input_section,
11538 (long) rel->r_offset,
11539 howto->name,
11540 h->root.root.string);
11541 return FALSE;
11542 }
11543
11544 if (r != bfd_reloc_ok)
11545 {
11546 switch (r)
11547 {
11548 case bfd_reloc_overflow:
11549 /* If the overflowing reloc was to an undefined symbol,
11550 we have already printed one error message and there
11551 is no point complaining again. */
11552 if ((! h ||
11553 h->root.type != bfd_link_hash_undefined)
11554 && (!((*info->callbacks->reloc_overflow)
11555 (info, (h ? &h->root : NULL), name, howto->name,
11556 (bfd_vma) 0, input_bfd, input_section,
11557 rel->r_offset))))
11558 return FALSE;
11559 break;
11560
11561 case bfd_reloc_undefined:
11562 if (!((*info->callbacks->undefined_symbol)
11563 (info, name, input_bfd, input_section,
11564 rel->r_offset, TRUE)))
11565 return FALSE;
11566 break;
11567
11568 case bfd_reloc_outofrange:
11569 error_message = _("out of range");
11570 goto common_error;
11571
11572 case bfd_reloc_notsupported:
11573 error_message = _("unsupported relocation");
11574 goto common_error;
11575
11576 case bfd_reloc_dangerous:
11577 /* error_message should already be set. */
11578 goto common_error;
11579
11580 default:
11581 error_message = _("unknown error");
11582 /* Fall through. */
11583
11584 common_error:
11585 BFD_ASSERT (error_message != NULL);
11586 if (!((*info->callbacks->reloc_dangerous)
11587 (info, error_message, input_bfd, input_section,
11588 rel->r_offset)))
11589 return FALSE;
11590 break;
11591 }
11592 }
11593 }
11594
11595 return TRUE;
11596 }
11597
11598 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11599 adds the edit to the start of the list. (The list must be built in order of
11600 ascending TINDEX: the function's callers are primarily responsible for
11601 maintaining that condition). */
11602
11603 static void
11604 add_unwind_table_edit (arm_unwind_table_edit **head,
11605 arm_unwind_table_edit **tail,
11606 arm_unwind_edit_type type,
11607 asection *linked_section,
11608 unsigned int tindex)
11609 {
11610 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11611 xmalloc (sizeof (arm_unwind_table_edit));
11612
11613 new_edit->type = type;
11614 new_edit->linked_section = linked_section;
11615 new_edit->index = tindex;
11616
11617 if (tindex > 0)
11618 {
11619 new_edit->next = NULL;
11620
11621 if (*tail)
11622 (*tail)->next = new_edit;
11623
11624 (*tail) = new_edit;
11625
11626 if (!*head)
11627 (*head) = new_edit;
11628 }
11629 else
11630 {
11631 new_edit->next = *head;
11632
11633 if (!*tail)
11634 *tail = new_edit;
11635
11636 *head = new_edit;
11637 }
11638 }
11639
11640 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11641
11642 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11643 static void
11644 adjust_exidx_size(asection *exidx_sec, int adjust)
11645 {
11646 asection *out_sec;
11647
11648 if (!exidx_sec->rawsize)
11649 exidx_sec->rawsize = exidx_sec->size;
11650
11651 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11652 out_sec = exidx_sec->output_section;
11653 /* Adjust size of output section. */
11654 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11655 }
11656
11657 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11658 static void
11659 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11660 {
11661 struct _arm_elf_section_data *exidx_arm_data;
11662
11663 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11664 add_unwind_table_edit (
11665 &exidx_arm_data->u.exidx.unwind_edit_list,
11666 &exidx_arm_data->u.exidx.unwind_edit_tail,
11667 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11668
11669 exidx_arm_data->additional_reloc_count++;
11670
11671 adjust_exidx_size(exidx_sec, 8);
11672 }
11673
11674 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11675 made to those tables, such that:
11676
11677 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11678 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11679 codes which have been inlined into the index).
11680
11681 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11682
11683 The edits are applied when the tables are written
11684 (in elf32_arm_write_section). */
11685
11686 bfd_boolean
11687 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11688 unsigned int num_text_sections,
11689 struct bfd_link_info *info,
11690 bfd_boolean merge_exidx_entries)
11691 {
11692 bfd *inp;
11693 unsigned int last_second_word = 0, i;
11694 asection *last_exidx_sec = NULL;
11695 asection *last_text_sec = NULL;
11696 int last_unwind_type = -1;
11697
11698 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11699 text sections. */
11700 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11701 {
11702 asection *sec;
11703
11704 for (sec = inp->sections; sec != NULL; sec = sec->next)
11705 {
11706 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11707 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11708
11709 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11710 continue;
11711
11712 if (elf_sec->linked_to)
11713 {
11714 Elf_Internal_Shdr *linked_hdr
11715 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11716 struct _arm_elf_section_data *linked_sec_arm_data
11717 = get_arm_elf_section_data (linked_hdr->bfd_section);
11718
11719 if (linked_sec_arm_data == NULL)
11720 continue;
11721
11722 /* Link this .ARM.exidx section back from the text section it
11723 describes. */
11724 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11725 }
11726 }
11727 }
11728
11729 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11730 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11731 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11732
11733 for (i = 0; i < num_text_sections; i++)
11734 {
11735 asection *sec = text_section_order[i];
11736 asection *exidx_sec;
11737 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11738 struct _arm_elf_section_data *exidx_arm_data;
11739 bfd_byte *contents = NULL;
11740 int deleted_exidx_bytes = 0;
11741 bfd_vma j;
11742 arm_unwind_table_edit *unwind_edit_head = NULL;
11743 arm_unwind_table_edit *unwind_edit_tail = NULL;
11744 Elf_Internal_Shdr *hdr;
11745 bfd *ibfd;
11746
11747 if (arm_data == NULL)
11748 continue;
11749
11750 exidx_sec = arm_data->u.text.arm_exidx_sec;
11751 if (exidx_sec == NULL)
11752 {
11753 /* Section has no unwind data. */
11754 if (last_unwind_type == 0 || !last_exidx_sec)
11755 continue;
11756
11757 /* Ignore zero sized sections. */
11758 if (sec->size == 0)
11759 continue;
11760
11761 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11762 last_unwind_type = 0;
11763 continue;
11764 }
11765
11766 /* Skip /DISCARD/ sections. */
11767 if (bfd_is_abs_section (exidx_sec->output_section))
11768 continue;
11769
11770 hdr = &elf_section_data (exidx_sec)->this_hdr;
11771 if (hdr->sh_type != SHT_ARM_EXIDX)
11772 continue;
11773
11774 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11775 if (exidx_arm_data == NULL)
11776 continue;
11777
11778 ibfd = exidx_sec->owner;
11779
11780 if (hdr->contents != NULL)
11781 contents = hdr->contents;
11782 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11783 /* An error? */
11784 continue;
11785
11786 if (last_unwind_type > 0)
11787 {
11788 unsigned int first_word = bfd_get_32 (ibfd, contents);
11789 /* Add cantunwind if first unwind item does not match section
11790 start. */
11791 if (first_word != sec->vma)
11792 {
11793 insert_cantunwind_after (last_text_sec, last_exidx_sec);
11794 last_unwind_type = 0;
11795 }
11796 }
11797
11798 for (j = 0; j < hdr->sh_size; j += 8)
11799 {
11800 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11801 int unwind_type;
11802 int elide = 0;
11803
11804 /* An EXIDX_CANTUNWIND entry. */
11805 if (second_word == 1)
11806 {
11807 if (last_unwind_type == 0)
11808 elide = 1;
11809 unwind_type = 0;
11810 }
11811 /* Inlined unwinding data. Merge if equal to previous. */
11812 else if ((second_word & 0x80000000) != 0)
11813 {
11814 if (merge_exidx_entries
11815 && last_second_word == second_word && last_unwind_type == 1)
11816 elide = 1;
11817 unwind_type = 1;
11818 last_second_word = second_word;
11819 }
11820 /* Normal table entry. In theory we could merge these too,
11821 but duplicate entries are likely to be much less common. */
11822 else
11823 unwind_type = 2;
11824
11825 if (elide && !bfd_link_relocatable (info))
11826 {
11827 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
11828 DELETE_EXIDX_ENTRY, NULL, j / 8);
11829
11830 deleted_exidx_bytes += 8;
11831 }
11832
11833 last_unwind_type = unwind_type;
11834 }
11835
11836 /* Free contents if we allocated it ourselves. */
11837 if (contents != hdr->contents)
11838 free (contents);
11839
11840 /* Record edits to be applied later (in elf32_arm_write_section). */
11841 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
11842 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
11843
11844 if (deleted_exidx_bytes > 0)
11845 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
11846
11847 last_exidx_sec = exidx_sec;
11848 last_text_sec = sec;
11849 }
11850
11851 /* Add terminating CANTUNWIND entry. */
11852 if (!bfd_link_relocatable (info) && last_exidx_sec
11853 && last_unwind_type != 0)
11854 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11855
11856 return TRUE;
11857 }
11858
11859 static bfd_boolean
11860 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
11861 bfd *ibfd, const char *name)
11862 {
11863 asection *sec, *osec;
11864
11865 sec = bfd_get_linker_section (ibfd, name);
11866 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
11867 return TRUE;
11868
11869 osec = sec->output_section;
11870 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
11871 return TRUE;
11872
11873 if (! bfd_set_section_contents (obfd, osec, sec->contents,
11874 sec->output_offset, sec->size))
11875 return FALSE;
11876
11877 return TRUE;
11878 }
11879
11880 static bfd_boolean
11881 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
11882 {
11883 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
11884 asection *sec, *osec;
11885
11886 if (globals == NULL)
11887 return FALSE;
11888
11889 /* Invoke the regular ELF backend linker to do all the work. */
11890 if (!bfd_elf_final_link (abfd, info))
11891 return FALSE;
11892
11893 /* Process stub sections (eg BE8 encoding, ...). */
11894 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
11895 unsigned int i;
11896 for (i=0; i<htab->top_id; i++)
11897 {
11898 sec = htab->stub_group[i].stub_sec;
11899 /* Only process it once, in its link_sec slot. */
11900 if (sec && i == htab->stub_group[i].link_sec->id)
11901 {
11902 osec = sec->output_section;
11903 elf32_arm_write_section (abfd, info, sec, sec->contents);
11904 if (! bfd_set_section_contents (abfd, osec, sec->contents,
11905 sec->output_offset, sec->size))
11906 return FALSE;
11907 }
11908 }
11909
11910 /* Write out any glue sections now that we have created all the
11911 stubs. */
11912 if (globals->bfd_of_glue_owner != NULL)
11913 {
11914 if (! elf32_arm_output_glue_section (info, abfd,
11915 globals->bfd_of_glue_owner,
11916 ARM2THUMB_GLUE_SECTION_NAME))
11917 return FALSE;
11918
11919 if (! elf32_arm_output_glue_section (info, abfd,
11920 globals->bfd_of_glue_owner,
11921 THUMB2ARM_GLUE_SECTION_NAME))
11922 return FALSE;
11923
11924 if (! elf32_arm_output_glue_section (info, abfd,
11925 globals->bfd_of_glue_owner,
11926 VFP11_ERRATUM_VENEER_SECTION_NAME))
11927 return FALSE;
11928
11929 if (! elf32_arm_output_glue_section (info, abfd,
11930 globals->bfd_of_glue_owner,
11931 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
11932 return FALSE;
11933
11934 if (! elf32_arm_output_glue_section (info, abfd,
11935 globals->bfd_of_glue_owner,
11936 ARM_BX_GLUE_SECTION_NAME))
11937 return FALSE;
11938 }
11939
11940 return TRUE;
11941 }
11942
11943 /* Return a best guess for the machine number based on the attributes. */
11944
11945 static unsigned int
11946 bfd_arm_get_mach_from_attributes (bfd * abfd)
11947 {
11948 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
11949
11950 switch (arch)
11951 {
11952 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
11953 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
11954 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
11955
11956 case TAG_CPU_ARCH_V5TE:
11957 {
11958 char * name;
11959
11960 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
11961 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
11962
11963 if (name)
11964 {
11965 if (strcmp (name, "IWMMXT2") == 0)
11966 return bfd_mach_arm_iWMMXt2;
11967
11968 if (strcmp (name, "IWMMXT") == 0)
11969 return bfd_mach_arm_iWMMXt;
11970
11971 if (strcmp (name, "XSCALE") == 0)
11972 {
11973 int wmmx;
11974
11975 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
11976 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
11977 switch (wmmx)
11978 {
11979 case 1: return bfd_mach_arm_iWMMXt;
11980 case 2: return bfd_mach_arm_iWMMXt2;
11981 default: return bfd_mach_arm_XScale;
11982 }
11983 }
11984 }
11985
11986 return bfd_mach_arm_5TE;
11987 }
11988
11989 default:
11990 return bfd_mach_arm_unknown;
11991 }
11992 }
11993
11994 /* Set the right machine number. */
11995
11996 static bfd_boolean
11997 elf32_arm_object_p (bfd *abfd)
11998 {
11999 unsigned int mach;
12000
12001 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12002
12003 if (mach == bfd_mach_arm_unknown)
12004 {
12005 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12006 mach = bfd_mach_arm_ep9312;
12007 else
12008 mach = bfd_arm_get_mach_from_attributes (abfd);
12009 }
12010
12011 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12012 return TRUE;
12013 }
12014
12015 /* Function to keep ARM specific flags in the ELF header. */
12016
12017 static bfd_boolean
12018 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12019 {
12020 if (elf_flags_init (abfd)
12021 && elf_elfheader (abfd)->e_flags != flags)
12022 {
12023 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12024 {
12025 if (flags & EF_ARM_INTERWORK)
12026 (*_bfd_error_handler)
12027 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12028 abfd);
12029 else
12030 _bfd_error_handler
12031 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12032 abfd);
12033 }
12034 }
12035 else
12036 {
12037 elf_elfheader (abfd)->e_flags = flags;
12038 elf_flags_init (abfd) = TRUE;
12039 }
12040
12041 return TRUE;
12042 }
12043
12044 /* Copy backend specific data from one object module to another. */
12045
12046 static bfd_boolean
12047 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12048 {
12049 flagword in_flags;
12050 flagword out_flags;
12051
12052 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12053 return TRUE;
12054
12055 in_flags = elf_elfheader (ibfd)->e_flags;
12056 out_flags = elf_elfheader (obfd)->e_flags;
12057
12058 if (elf_flags_init (obfd)
12059 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12060 && in_flags != out_flags)
12061 {
12062 /* Cannot mix APCS26 and APCS32 code. */
12063 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12064 return FALSE;
12065
12066 /* Cannot mix float APCS and non-float APCS code. */
12067 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12068 return FALSE;
12069
12070 /* If the src and dest have different interworking flags
12071 then turn off the interworking bit. */
12072 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12073 {
12074 if (out_flags & EF_ARM_INTERWORK)
12075 _bfd_error_handler
12076 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12077 obfd, ibfd);
12078
12079 in_flags &= ~EF_ARM_INTERWORK;
12080 }
12081
12082 /* Likewise for PIC, though don't warn for this case. */
12083 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12084 in_flags &= ~EF_ARM_PIC;
12085 }
12086
12087 elf_elfheader (obfd)->e_flags = in_flags;
12088 elf_flags_init (obfd) = TRUE;
12089
12090 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12091 }
12092
12093 /* Values for Tag_ABI_PCS_R9_use. */
12094 enum
12095 {
12096 AEABI_R9_V6,
12097 AEABI_R9_SB,
12098 AEABI_R9_TLS,
12099 AEABI_R9_unused
12100 };
12101
12102 /* Values for Tag_ABI_PCS_RW_data. */
12103 enum
12104 {
12105 AEABI_PCS_RW_data_absolute,
12106 AEABI_PCS_RW_data_PCrel,
12107 AEABI_PCS_RW_data_SBrel,
12108 AEABI_PCS_RW_data_unused
12109 };
12110
12111 /* Values for Tag_ABI_enum_size. */
12112 enum
12113 {
12114 AEABI_enum_unused,
12115 AEABI_enum_short,
12116 AEABI_enum_wide,
12117 AEABI_enum_forced_wide
12118 };
12119
12120 /* Determine whether an object attribute tag takes an integer, a
12121 string or both. */
12122
12123 static int
12124 elf32_arm_obj_attrs_arg_type (int tag)
12125 {
12126 if (tag == Tag_compatibility)
12127 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12128 else if (tag == Tag_nodefaults)
12129 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12130 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12131 return ATTR_TYPE_FLAG_STR_VAL;
12132 else if (tag < 32)
12133 return ATTR_TYPE_FLAG_INT_VAL;
12134 else
12135 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12136 }
12137
12138 /* The ABI defines that Tag_conformance should be emitted first, and that
12139 Tag_nodefaults should be second (if either is defined). This sets those
12140 two positions, and bumps up the position of all the remaining tags to
12141 compensate. */
12142 static int
12143 elf32_arm_obj_attrs_order (int num)
12144 {
12145 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12146 return Tag_conformance;
12147 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12148 return Tag_nodefaults;
12149 if ((num - 2) < Tag_nodefaults)
12150 return num - 2;
12151 if ((num - 1) < Tag_conformance)
12152 return num - 1;
12153 return num;
12154 }
12155
12156 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12157 static bfd_boolean
12158 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12159 {
12160 if ((tag & 127) < 64)
12161 {
12162 _bfd_error_handler
12163 (_("%B: Unknown mandatory EABI object attribute %d"),
12164 abfd, tag);
12165 bfd_set_error (bfd_error_bad_value);
12166 return FALSE;
12167 }
12168 else
12169 {
12170 _bfd_error_handler
12171 (_("Warning: %B: Unknown EABI object attribute %d"),
12172 abfd, tag);
12173 return TRUE;
12174 }
12175 }
12176
12177 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12178 Returns -1 if no architecture could be read. */
12179
12180 static int
12181 get_secondary_compatible_arch (bfd *abfd)
12182 {
12183 obj_attribute *attr =
12184 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12185
12186 /* Note: the tag and its argument below are uleb128 values, though
12187 currently-defined values fit in one byte for each. */
12188 if (attr->s
12189 && attr->s[0] == Tag_CPU_arch
12190 && (attr->s[1] & 128) != 128
12191 && attr->s[2] == 0)
12192 return attr->s[1];
12193
12194 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12195 return -1;
12196 }
12197
12198 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12199 The tag is removed if ARCH is -1. */
12200
12201 static void
12202 set_secondary_compatible_arch (bfd *abfd, int arch)
12203 {
12204 obj_attribute *attr =
12205 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12206
12207 if (arch == -1)
12208 {
12209 attr->s = NULL;
12210 return;
12211 }
12212
12213 /* Note: the tag and its argument below are uleb128 values, though
12214 currently-defined values fit in one byte for each. */
12215 if (!attr->s)
12216 attr->s = (char *) bfd_alloc (abfd, 3);
12217 attr->s[0] = Tag_CPU_arch;
12218 attr->s[1] = arch;
12219 attr->s[2] = '\0';
12220 }
12221
12222 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12223 into account. */
12224
12225 static int
12226 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12227 int newtag, int secondary_compat)
12228 {
12229 #define T(X) TAG_CPU_ARCH_##X
12230 int tagl, tagh, result;
12231 const int v6t2[] =
12232 {
12233 T(V6T2), /* PRE_V4. */
12234 T(V6T2), /* V4. */
12235 T(V6T2), /* V4T. */
12236 T(V6T2), /* V5T. */
12237 T(V6T2), /* V5TE. */
12238 T(V6T2), /* V5TEJ. */
12239 T(V6T2), /* V6. */
12240 T(V7), /* V6KZ. */
12241 T(V6T2) /* V6T2. */
12242 };
12243 const int v6k[] =
12244 {
12245 T(V6K), /* PRE_V4. */
12246 T(V6K), /* V4. */
12247 T(V6K), /* V4T. */
12248 T(V6K), /* V5T. */
12249 T(V6K), /* V5TE. */
12250 T(V6K), /* V5TEJ. */
12251 T(V6K), /* V6. */
12252 T(V6KZ), /* V6KZ. */
12253 T(V7), /* V6T2. */
12254 T(V6K) /* V6K. */
12255 };
12256 const int v7[] =
12257 {
12258 T(V7), /* PRE_V4. */
12259 T(V7), /* V4. */
12260 T(V7), /* V4T. */
12261 T(V7), /* V5T. */
12262 T(V7), /* V5TE. */
12263 T(V7), /* V5TEJ. */
12264 T(V7), /* V6. */
12265 T(V7), /* V6KZ. */
12266 T(V7), /* V6T2. */
12267 T(V7), /* V6K. */
12268 T(V7) /* V7. */
12269 };
12270 const int v6_m[] =
12271 {
12272 -1, /* PRE_V4. */
12273 -1, /* V4. */
12274 T(V6K), /* V4T. */
12275 T(V6K), /* V5T. */
12276 T(V6K), /* V5TE. */
12277 T(V6K), /* V5TEJ. */
12278 T(V6K), /* V6. */
12279 T(V6KZ), /* V6KZ. */
12280 T(V7), /* V6T2. */
12281 T(V6K), /* V6K. */
12282 T(V7), /* V7. */
12283 T(V6_M) /* V6_M. */
12284 };
12285 const int v6s_m[] =
12286 {
12287 -1, /* PRE_V4. */
12288 -1, /* V4. */
12289 T(V6K), /* V4T. */
12290 T(V6K), /* V5T. */
12291 T(V6K), /* V5TE. */
12292 T(V6K), /* V5TEJ. */
12293 T(V6K), /* V6. */
12294 T(V6KZ), /* V6KZ. */
12295 T(V7), /* V6T2. */
12296 T(V6K), /* V6K. */
12297 T(V7), /* V7. */
12298 T(V6S_M), /* V6_M. */
12299 T(V6S_M) /* V6S_M. */
12300 };
12301 const int v7e_m[] =
12302 {
12303 -1, /* PRE_V4. */
12304 -1, /* V4. */
12305 T(V7E_M), /* V4T. */
12306 T(V7E_M), /* V5T. */
12307 T(V7E_M), /* V5TE. */
12308 T(V7E_M), /* V5TEJ. */
12309 T(V7E_M), /* V6. */
12310 T(V7E_M), /* V6KZ. */
12311 T(V7E_M), /* V6T2. */
12312 T(V7E_M), /* V6K. */
12313 T(V7E_M), /* V7. */
12314 T(V7E_M), /* V6_M. */
12315 T(V7E_M), /* V6S_M. */
12316 T(V7E_M) /* V7E_M. */
12317 };
12318 const int v8[] =
12319 {
12320 T(V8), /* PRE_V4. */
12321 T(V8), /* V4. */
12322 T(V8), /* V4T. */
12323 T(V8), /* V5T. */
12324 T(V8), /* V5TE. */
12325 T(V8), /* V5TEJ. */
12326 T(V8), /* V6. */
12327 T(V8), /* V6KZ. */
12328 T(V8), /* V6T2. */
12329 T(V8), /* V6K. */
12330 T(V8), /* V7. */
12331 T(V8), /* V6_M. */
12332 T(V8), /* V6S_M. */
12333 T(V8), /* V7E_M. */
12334 T(V8) /* V8. */
12335 };
12336 const int v8m_baseline[] =
12337 {
12338 -1, /* PRE_V4. */
12339 -1, /* V4. */
12340 -1, /* V4T. */
12341 -1, /* V5T. */
12342 -1, /* V5TE. */
12343 -1, /* V5TEJ. */
12344 -1, /* V6. */
12345 -1, /* V6KZ. */
12346 -1, /* V6T2. */
12347 -1, /* V6K. */
12348 -1, /* V7. */
12349 T(V8M_BASE), /* V6_M. */
12350 T(V8M_BASE), /* V6S_M. */
12351 -1, /* V7E_M. */
12352 -1, /* V8. */
12353 -1,
12354 T(V8M_BASE) /* V8-M BASELINE. */
12355 };
12356 const int v8m_mainline[] =
12357 {
12358 -1, /* PRE_V4. */
12359 -1, /* V4. */
12360 -1, /* V4T. */
12361 -1, /* V5T. */
12362 -1, /* V5TE. */
12363 -1, /* V5TEJ. */
12364 -1, /* V6. */
12365 -1, /* V6KZ. */
12366 -1, /* V6T2. */
12367 -1, /* V6K. */
12368 T(V8M_MAIN), /* V7. */
12369 T(V8M_MAIN), /* V6_M. */
12370 T(V8M_MAIN), /* V6S_M. */
12371 T(V8M_MAIN), /* V7E_M. */
12372 -1, /* V8. */
12373 -1,
12374 T(V8M_MAIN), /* V8-M BASELINE. */
12375 T(V8M_MAIN) /* V8-M MAINLINE. */
12376 };
12377 const int v4t_plus_v6_m[] =
12378 {
12379 -1, /* PRE_V4. */
12380 -1, /* V4. */
12381 T(V4T), /* V4T. */
12382 T(V5T), /* V5T. */
12383 T(V5TE), /* V5TE. */
12384 T(V5TEJ), /* V5TEJ. */
12385 T(V6), /* V6. */
12386 T(V6KZ), /* V6KZ. */
12387 T(V6T2), /* V6T2. */
12388 T(V6K), /* V6K. */
12389 T(V7), /* V7. */
12390 T(V6_M), /* V6_M. */
12391 T(V6S_M), /* V6S_M. */
12392 T(V7E_M), /* V7E_M. */
12393 T(V8), /* V8. */
12394 -1, /* Unused. */
12395 T(V8M_BASE), /* V8-M BASELINE. */
12396 T(V8M_MAIN), /* V8-M MAINLINE. */
12397 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12398 };
12399 const int *comb[] =
12400 {
12401 v6t2,
12402 v6k,
12403 v7,
12404 v6_m,
12405 v6s_m,
12406 v7e_m,
12407 v8,
12408 NULL,
12409 v8m_baseline,
12410 v8m_mainline,
12411 /* Pseudo-architecture. */
12412 v4t_plus_v6_m
12413 };
12414
12415 /* Check we've not got a higher architecture than we know about. */
12416
12417 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12418 {
12419 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12420 return -1;
12421 }
12422
12423 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12424
12425 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12426 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12427 oldtag = T(V4T_PLUS_V6_M);
12428
12429 /* And override the new tag if we have a Tag_also_compatible_with on the
12430 input. */
12431
12432 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12433 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12434 newtag = T(V4T_PLUS_V6_M);
12435
12436 tagl = (oldtag < newtag) ? oldtag : newtag;
12437 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12438
12439 /* Architectures before V6KZ add features monotonically. */
12440 if (tagh <= TAG_CPU_ARCH_V6KZ)
12441 return result;
12442
12443 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12444
12445 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12446 as the canonical version. */
12447 if (result == T(V4T_PLUS_V6_M))
12448 {
12449 result = T(V4T);
12450 *secondary_compat_out = T(V6_M);
12451 }
12452 else
12453 *secondary_compat_out = -1;
12454
12455 if (result == -1)
12456 {
12457 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12458 ibfd, oldtag, newtag);
12459 return -1;
12460 }
12461
12462 return result;
12463 #undef T
12464 }
12465
12466 /* Query attributes object to see if integer divide instructions may be
12467 present in an object. */
12468 static bfd_boolean
12469 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12470 {
12471 int arch = attr[Tag_CPU_arch].i;
12472 int profile = attr[Tag_CPU_arch_profile].i;
12473
12474 switch (attr[Tag_DIV_use].i)
12475 {
12476 case 0:
12477 /* Integer divide allowed if instruction contained in archetecture. */
12478 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12479 return TRUE;
12480 else if (arch >= TAG_CPU_ARCH_V7E_M)
12481 return TRUE;
12482 else
12483 return FALSE;
12484
12485 case 1:
12486 /* Integer divide explicitly prohibited. */
12487 return FALSE;
12488
12489 default:
12490 /* Unrecognised case - treat as allowing divide everywhere. */
12491 case 2:
12492 /* Integer divide allowed in ARM state. */
12493 return TRUE;
12494 }
12495 }
12496
12497 /* Query attributes object to see if integer divide instructions are
12498 forbidden to be in the object. This is not the inverse of
12499 elf32_arm_attributes_accept_div. */
12500 static bfd_boolean
12501 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12502 {
12503 return attr[Tag_DIV_use].i == 1;
12504 }
12505
12506 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12507 are conflicting attributes. */
12508
12509 static bfd_boolean
12510 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12511 {
12512 obj_attribute *in_attr;
12513 obj_attribute *out_attr;
12514 /* Some tags have 0 = don't care, 1 = strong requirement,
12515 2 = weak requirement. */
12516 static const int order_021[3] = {0, 2, 1};
12517 int i;
12518 bfd_boolean result = TRUE;
12519 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12520
12521 /* Skip the linker stubs file. This preserves previous behavior
12522 of accepting unknown attributes in the first input file - but
12523 is that a bug? */
12524 if (ibfd->flags & BFD_LINKER_CREATED)
12525 return TRUE;
12526
12527 /* Skip any input that hasn't attribute section.
12528 This enables to link object files without attribute section with
12529 any others. */
12530 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12531 return TRUE;
12532
12533 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12534 {
12535 /* This is the first object. Copy the attributes. */
12536 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12537
12538 out_attr = elf_known_obj_attributes_proc (obfd);
12539
12540 /* Use the Tag_null value to indicate the attributes have been
12541 initialized. */
12542 out_attr[0].i = 1;
12543
12544 /* We do not output objects with Tag_MPextension_use_legacy - we move
12545 the attribute's value to Tag_MPextension_use. */
12546 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12547 {
12548 if (out_attr[Tag_MPextension_use].i != 0
12549 && out_attr[Tag_MPextension_use_legacy].i
12550 != out_attr[Tag_MPextension_use].i)
12551 {
12552 _bfd_error_handler
12553 (_("Error: %B has both the current and legacy "
12554 "Tag_MPextension_use attributes"), ibfd);
12555 result = FALSE;
12556 }
12557
12558 out_attr[Tag_MPextension_use] =
12559 out_attr[Tag_MPextension_use_legacy];
12560 out_attr[Tag_MPextension_use_legacy].type = 0;
12561 out_attr[Tag_MPextension_use_legacy].i = 0;
12562 }
12563
12564 return result;
12565 }
12566
12567 in_attr = elf_known_obj_attributes_proc (ibfd);
12568 out_attr = elf_known_obj_attributes_proc (obfd);
12569 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12570 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12571 {
12572 /* Ignore mismatches if the object doesn't use floating point or is
12573 floating point ABI independent. */
12574 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12575 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12576 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12577 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12578 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12579 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12580 {
12581 _bfd_error_handler
12582 (_("error: %B uses VFP register arguments, %B does not"),
12583 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12584 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12585 result = FALSE;
12586 }
12587 }
12588
12589 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12590 {
12591 /* Merge this attribute with existing attributes. */
12592 switch (i)
12593 {
12594 case Tag_CPU_raw_name:
12595 case Tag_CPU_name:
12596 /* These are merged after Tag_CPU_arch. */
12597 break;
12598
12599 case Tag_ABI_optimization_goals:
12600 case Tag_ABI_FP_optimization_goals:
12601 /* Use the first value seen. */
12602 break;
12603
12604 case Tag_CPU_arch:
12605 {
12606 int secondary_compat = -1, secondary_compat_out = -1;
12607 unsigned int saved_out_attr = out_attr[i].i;
12608 int arch_attr;
12609 static const char *name_table[] =
12610 {
12611 /* These aren't real CPU names, but we can't guess
12612 that from the architecture version alone. */
12613 "Pre v4",
12614 "ARM v4",
12615 "ARM v4T",
12616 "ARM v5T",
12617 "ARM v5TE",
12618 "ARM v5TEJ",
12619 "ARM v6",
12620 "ARM v6KZ",
12621 "ARM v6T2",
12622 "ARM v6K",
12623 "ARM v7",
12624 "ARM v6-M",
12625 "ARM v6S-M",
12626 "ARM v8",
12627 "",
12628 "ARM v8-M.baseline",
12629 "ARM v8-M.mainline",
12630 };
12631
12632 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12633 secondary_compat = get_secondary_compatible_arch (ibfd);
12634 secondary_compat_out = get_secondary_compatible_arch (obfd);
12635 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12636 &secondary_compat_out,
12637 in_attr[i].i,
12638 secondary_compat);
12639
12640 /* Return with error if failed to merge. */
12641 if (arch_attr == -1)
12642 return FALSE;
12643
12644 out_attr[i].i = arch_attr;
12645
12646 set_secondary_compatible_arch (obfd, secondary_compat_out);
12647
12648 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12649 if (out_attr[i].i == saved_out_attr)
12650 ; /* Leave the names alone. */
12651 else if (out_attr[i].i == in_attr[i].i)
12652 {
12653 /* The output architecture has been changed to match the
12654 input architecture. Use the input names. */
12655 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12656 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12657 : NULL;
12658 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12659 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12660 : NULL;
12661 }
12662 else
12663 {
12664 out_attr[Tag_CPU_name].s = NULL;
12665 out_attr[Tag_CPU_raw_name].s = NULL;
12666 }
12667
12668 /* If we still don't have a value for Tag_CPU_name,
12669 make one up now. Tag_CPU_raw_name remains blank. */
12670 if (out_attr[Tag_CPU_name].s == NULL
12671 && out_attr[i].i < ARRAY_SIZE (name_table))
12672 out_attr[Tag_CPU_name].s =
12673 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12674 }
12675 break;
12676
12677 case Tag_ARM_ISA_use:
12678 case Tag_THUMB_ISA_use:
12679 case Tag_WMMX_arch:
12680 case Tag_Advanced_SIMD_arch:
12681 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12682 case Tag_ABI_FP_rounding:
12683 case Tag_ABI_FP_exceptions:
12684 case Tag_ABI_FP_user_exceptions:
12685 case Tag_ABI_FP_number_model:
12686 case Tag_FP_HP_extension:
12687 case Tag_CPU_unaligned_access:
12688 case Tag_T2EE_use:
12689 case Tag_MPextension_use:
12690 /* Use the largest value specified. */
12691 if (in_attr[i].i > out_attr[i].i)
12692 out_attr[i].i = in_attr[i].i;
12693 break;
12694
12695 case Tag_ABI_align_preserved:
12696 case Tag_ABI_PCS_RO_data:
12697 /* Use the smallest value specified. */
12698 if (in_attr[i].i < out_attr[i].i)
12699 out_attr[i].i = in_attr[i].i;
12700 break;
12701
12702 case Tag_ABI_align_needed:
12703 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12704 && (in_attr[Tag_ABI_align_preserved].i == 0
12705 || out_attr[Tag_ABI_align_preserved].i == 0))
12706 {
12707 /* This error message should be enabled once all non-conformant
12708 binaries in the toolchain have had the attributes set
12709 properly.
12710 _bfd_error_handler
12711 (_("error: %B: 8-byte data alignment conflicts with %B"),
12712 obfd, ibfd);
12713 result = FALSE; */
12714 }
12715 /* Fall through. */
12716 case Tag_ABI_FP_denormal:
12717 case Tag_ABI_PCS_GOT_use:
12718 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12719 value if greater than 2 (for future-proofing). */
12720 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12721 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12722 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12723 out_attr[i].i = in_attr[i].i;
12724 break;
12725
12726 case Tag_Virtualization_use:
12727 /* The virtualization tag effectively stores two bits of
12728 information: the intended use of TrustZone (in bit 0), and the
12729 intended use of Virtualization (in bit 1). */
12730 if (out_attr[i].i == 0)
12731 out_attr[i].i = in_attr[i].i;
12732 else if (in_attr[i].i != 0
12733 && in_attr[i].i != out_attr[i].i)
12734 {
12735 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12736 out_attr[i].i = 3;
12737 else
12738 {
12739 _bfd_error_handler
12740 (_("error: %B: unable to merge virtualization attributes "
12741 "with %B"),
12742 obfd, ibfd);
12743 result = FALSE;
12744 }
12745 }
12746 break;
12747
12748 case Tag_CPU_arch_profile:
12749 if (out_attr[i].i != in_attr[i].i)
12750 {
12751 /* 0 will merge with anything.
12752 'A' and 'S' merge to 'A'.
12753 'R' and 'S' merge to 'R'.
12754 'M' and 'A|R|S' is an error. */
12755 if (out_attr[i].i == 0
12756 || (out_attr[i].i == 'S'
12757 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12758 out_attr[i].i = in_attr[i].i;
12759 else if (in_attr[i].i == 0
12760 || (in_attr[i].i == 'S'
12761 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12762 ; /* Do nothing. */
12763 else
12764 {
12765 _bfd_error_handler
12766 (_("error: %B: Conflicting architecture profiles %c/%c"),
12767 ibfd,
12768 in_attr[i].i ? in_attr[i].i : '0',
12769 out_attr[i].i ? out_attr[i].i : '0');
12770 result = FALSE;
12771 }
12772 }
12773 break;
12774
12775 case Tag_DSP_extension:
12776 /* No need to change output value if any of:
12777 - pre (<=) ARMv5T input architecture (do not have DSP)
12778 - M input profile not ARMv7E-M and do not have DSP. */
12779 if (in_attr[Tag_CPU_arch].i <= 3
12780 || (in_attr[Tag_CPU_arch_profile].i == 'M'
12781 && in_attr[Tag_CPU_arch].i != 13
12782 && in_attr[i].i == 0))
12783 ; /* Do nothing. */
12784 /* Output value should be 0 if DSP part of architecture, ie.
12785 - post (>=) ARMv5te architecture output
12786 - A, R or S profile output or ARMv7E-M output architecture. */
12787 else if (out_attr[Tag_CPU_arch].i >= 4
12788 && (out_attr[Tag_CPU_arch_profile].i == 'A'
12789 || out_attr[Tag_CPU_arch_profile].i == 'R'
12790 || out_attr[Tag_CPU_arch_profile].i == 'S'
12791 || out_attr[Tag_CPU_arch].i == 13))
12792 out_attr[i].i = 0;
12793 /* Otherwise, DSP instructions are added and not part of output
12794 architecture. */
12795 else
12796 out_attr[i].i = 1;
12797 break;
12798
12799 case Tag_FP_arch:
12800 {
12801 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12802 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12803 when it's 0. It might mean absence of FP hardware if
12804 Tag_FP_arch is zero. */
12805
12806 #define VFP_VERSION_COUNT 9
12807 static const struct
12808 {
12809 int ver;
12810 int regs;
12811 } vfp_versions[VFP_VERSION_COUNT] =
12812 {
12813 {0, 0},
12814 {1, 16},
12815 {2, 16},
12816 {3, 32},
12817 {3, 16},
12818 {4, 32},
12819 {4, 16},
12820 {8, 32},
12821 {8, 16}
12822 };
12823 int ver;
12824 int regs;
12825 int newval;
12826
12827 /* If the output has no requirement about FP hardware,
12828 follow the requirement of the input. */
12829 if (out_attr[i].i == 0)
12830 {
12831 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
12832 out_attr[i].i = in_attr[i].i;
12833 out_attr[Tag_ABI_HardFP_use].i
12834 = in_attr[Tag_ABI_HardFP_use].i;
12835 break;
12836 }
12837 /* If the input has no requirement about FP hardware, do
12838 nothing. */
12839 else if (in_attr[i].i == 0)
12840 {
12841 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
12842 break;
12843 }
12844
12845 /* Both the input and the output have nonzero Tag_FP_arch.
12846 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
12847
12848 /* If both the input and the output have zero Tag_ABI_HardFP_use,
12849 do nothing. */
12850 if (in_attr[Tag_ABI_HardFP_use].i == 0
12851 && out_attr[Tag_ABI_HardFP_use].i == 0)
12852 ;
12853 /* If the input and the output have different Tag_ABI_HardFP_use,
12854 the combination of them is 0 (implied by Tag_FP_arch). */
12855 else if (in_attr[Tag_ABI_HardFP_use].i
12856 != out_attr[Tag_ABI_HardFP_use].i)
12857 out_attr[Tag_ABI_HardFP_use].i = 0;
12858
12859 /* Now we can handle Tag_FP_arch. */
12860
12861 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
12862 pick the biggest. */
12863 if (in_attr[i].i >= VFP_VERSION_COUNT
12864 && in_attr[i].i > out_attr[i].i)
12865 {
12866 out_attr[i] = in_attr[i];
12867 break;
12868 }
12869 /* The output uses the superset of input features
12870 (ISA version) and registers. */
12871 ver = vfp_versions[in_attr[i].i].ver;
12872 if (ver < vfp_versions[out_attr[i].i].ver)
12873 ver = vfp_versions[out_attr[i].i].ver;
12874 regs = vfp_versions[in_attr[i].i].regs;
12875 if (regs < vfp_versions[out_attr[i].i].regs)
12876 regs = vfp_versions[out_attr[i].i].regs;
12877 /* This assumes all possible supersets are also a valid
12878 options. */
12879 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
12880 {
12881 if (regs == vfp_versions[newval].regs
12882 && ver == vfp_versions[newval].ver)
12883 break;
12884 }
12885 out_attr[i].i = newval;
12886 }
12887 break;
12888 case Tag_PCS_config:
12889 if (out_attr[i].i == 0)
12890 out_attr[i].i = in_attr[i].i;
12891 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
12892 {
12893 /* It's sometimes ok to mix different configs, so this is only
12894 a warning. */
12895 _bfd_error_handler
12896 (_("Warning: %B: Conflicting platform configuration"), ibfd);
12897 }
12898 break;
12899 case Tag_ABI_PCS_R9_use:
12900 if (in_attr[i].i != out_attr[i].i
12901 && out_attr[i].i != AEABI_R9_unused
12902 && in_attr[i].i != AEABI_R9_unused)
12903 {
12904 _bfd_error_handler
12905 (_("error: %B: Conflicting use of R9"), ibfd);
12906 result = FALSE;
12907 }
12908 if (out_attr[i].i == AEABI_R9_unused)
12909 out_attr[i].i = in_attr[i].i;
12910 break;
12911 case Tag_ABI_PCS_RW_data:
12912 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
12913 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
12914 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
12915 {
12916 _bfd_error_handler
12917 (_("error: %B: SB relative addressing conflicts with use of R9"),
12918 ibfd);
12919 result = FALSE;
12920 }
12921 /* Use the smallest value specified. */
12922 if (in_attr[i].i < out_attr[i].i)
12923 out_attr[i].i = in_attr[i].i;
12924 break;
12925 case Tag_ABI_PCS_wchar_t:
12926 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
12927 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
12928 {
12929 _bfd_error_handler
12930 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12931 ibfd, in_attr[i].i, out_attr[i].i);
12932 }
12933 else if (in_attr[i].i && !out_attr[i].i)
12934 out_attr[i].i = in_attr[i].i;
12935 break;
12936 case Tag_ABI_enum_size:
12937 if (in_attr[i].i != AEABI_enum_unused)
12938 {
12939 if (out_attr[i].i == AEABI_enum_unused
12940 || out_attr[i].i == AEABI_enum_forced_wide)
12941 {
12942 /* The existing object is compatible with anything.
12943 Use whatever requirements the new object has. */
12944 out_attr[i].i = in_attr[i].i;
12945 }
12946 else if (in_attr[i].i != AEABI_enum_forced_wide
12947 && out_attr[i].i != in_attr[i].i
12948 && !elf_arm_tdata (obfd)->no_enum_size_warning)
12949 {
12950 static const char *aeabi_enum_names[] =
12951 { "", "variable-size", "32-bit", "" };
12952 const char *in_name =
12953 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12954 ? aeabi_enum_names[in_attr[i].i]
12955 : "<unknown>";
12956 const char *out_name =
12957 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12958 ? aeabi_enum_names[out_attr[i].i]
12959 : "<unknown>";
12960 _bfd_error_handler
12961 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12962 ibfd, in_name, out_name);
12963 }
12964 }
12965 break;
12966 case Tag_ABI_VFP_args:
12967 /* Aready done. */
12968 break;
12969 case Tag_ABI_WMMX_args:
12970 if (in_attr[i].i != out_attr[i].i)
12971 {
12972 _bfd_error_handler
12973 (_("error: %B uses iWMMXt register arguments, %B does not"),
12974 ibfd, obfd);
12975 result = FALSE;
12976 }
12977 break;
12978 case Tag_compatibility:
12979 /* Merged in target-independent code. */
12980 break;
12981 case Tag_ABI_HardFP_use:
12982 /* This is handled along with Tag_FP_arch. */
12983 break;
12984 case Tag_ABI_FP_16bit_format:
12985 if (in_attr[i].i != 0 && out_attr[i].i != 0)
12986 {
12987 if (in_attr[i].i != out_attr[i].i)
12988 {
12989 _bfd_error_handler
12990 (_("error: fp16 format mismatch between %B and %B"),
12991 ibfd, obfd);
12992 result = FALSE;
12993 }
12994 }
12995 if (in_attr[i].i != 0)
12996 out_attr[i].i = in_attr[i].i;
12997 break;
12998
12999 case Tag_DIV_use:
13000 /* A value of zero on input means that the divide instruction may
13001 be used if available in the base architecture as specified via
13002 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13003 the user did not want divide instructions. A value of 2
13004 explicitly means that divide instructions were allowed in ARM
13005 and Thumb state. */
13006 if (in_attr[i].i == out_attr[i].i)
13007 /* Do nothing. */ ;
13008 else if (elf32_arm_attributes_forbid_div (in_attr)
13009 && !elf32_arm_attributes_accept_div (out_attr))
13010 out_attr[i].i = 1;
13011 else if (elf32_arm_attributes_forbid_div (out_attr)
13012 && elf32_arm_attributes_accept_div (in_attr))
13013 out_attr[i].i = in_attr[i].i;
13014 else if (in_attr[i].i == 2)
13015 out_attr[i].i = in_attr[i].i;
13016 break;
13017
13018 case Tag_MPextension_use_legacy:
13019 /* We don't output objects with Tag_MPextension_use_legacy - we
13020 move the value to Tag_MPextension_use. */
13021 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13022 {
13023 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13024 {
13025 _bfd_error_handler
13026 (_("%B has has both the current and legacy "
13027 "Tag_MPextension_use attributes"),
13028 ibfd);
13029 result = FALSE;
13030 }
13031 }
13032
13033 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13034 out_attr[Tag_MPextension_use] = in_attr[i];
13035
13036 break;
13037
13038 case Tag_nodefaults:
13039 /* This tag is set if it exists, but the value is unused (and is
13040 typically zero). We don't actually need to do anything here -
13041 the merge happens automatically when the type flags are merged
13042 below. */
13043 break;
13044 case Tag_also_compatible_with:
13045 /* Already done in Tag_CPU_arch. */
13046 break;
13047 case Tag_conformance:
13048 /* Keep the attribute if it matches. Throw it away otherwise.
13049 No attribute means no claim to conform. */
13050 if (!in_attr[i].s || !out_attr[i].s
13051 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13052 out_attr[i].s = NULL;
13053 break;
13054
13055 default:
13056 result
13057 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13058 }
13059
13060 /* If out_attr was copied from in_attr then it won't have a type yet. */
13061 if (in_attr[i].type && !out_attr[i].type)
13062 out_attr[i].type = in_attr[i].type;
13063 }
13064
13065 /* Merge Tag_compatibility attributes and any common GNU ones. */
13066 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13067 return FALSE;
13068
13069 /* Check for any attributes not known on ARM. */
13070 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13071
13072 return result;
13073 }
13074
13075
13076 /* Return TRUE if the two EABI versions are incompatible. */
13077
13078 static bfd_boolean
13079 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13080 {
13081 /* v4 and v5 are the same spec before and after it was released,
13082 so allow mixing them. */
13083 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13084 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13085 return TRUE;
13086
13087 return (iver == over);
13088 }
13089
13090 /* Merge backend specific data from an object file to the output
13091 object file when linking. */
13092
13093 static bfd_boolean
13094 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13095
13096 /* Display the flags field. */
13097
13098 static bfd_boolean
13099 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13100 {
13101 FILE * file = (FILE *) ptr;
13102 unsigned long flags;
13103
13104 BFD_ASSERT (abfd != NULL && ptr != NULL);
13105
13106 /* Print normal ELF private data. */
13107 _bfd_elf_print_private_bfd_data (abfd, ptr);
13108
13109 flags = elf_elfheader (abfd)->e_flags;
13110 /* Ignore init flag - it may not be set, despite the flags field
13111 containing valid data. */
13112
13113 /* xgettext:c-format */
13114 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13115
13116 switch (EF_ARM_EABI_VERSION (flags))
13117 {
13118 case EF_ARM_EABI_UNKNOWN:
13119 /* The following flag bits are GNU extensions and not part of the
13120 official ARM ELF extended ABI. Hence they are only decoded if
13121 the EABI version is not set. */
13122 if (flags & EF_ARM_INTERWORK)
13123 fprintf (file, _(" [interworking enabled]"));
13124
13125 if (flags & EF_ARM_APCS_26)
13126 fprintf (file, " [APCS-26]");
13127 else
13128 fprintf (file, " [APCS-32]");
13129
13130 if (flags & EF_ARM_VFP_FLOAT)
13131 fprintf (file, _(" [VFP float format]"));
13132 else if (flags & EF_ARM_MAVERICK_FLOAT)
13133 fprintf (file, _(" [Maverick float format]"));
13134 else
13135 fprintf (file, _(" [FPA float format]"));
13136
13137 if (flags & EF_ARM_APCS_FLOAT)
13138 fprintf (file, _(" [floats passed in float registers]"));
13139
13140 if (flags & EF_ARM_PIC)
13141 fprintf (file, _(" [position independent]"));
13142
13143 if (flags & EF_ARM_NEW_ABI)
13144 fprintf (file, _(" [new ABI]"));
13145
13146 if (flags & EF_ARM_OLD_ABI)
13147 fprintf (file, _(" [old ABI]"));
13148
13149 if (flags & EF_ARM_SOFT_FLOAT)
13150 fprintf (file, _(" [software FP]"));
13151
13152 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13153 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13154 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13155 | EF_ARM_MAVERICK_FLOAT);
13156 break;
13157
13158 case EF_ARM_EABI_VER1:
13159 fprintf (file, _(" [Version1 EABI]"));
13160
13161 if (flags & EF_ARM_SYMSARESORTED)
13162 fprintf (file, _(" [sorted symbol table]"));
13163 else
13164 fprintf (file, _(" [unsorted symbol table]"));
13165
13166 flags &= ~ EF_ARM_SYMSARESORTED;
13167 break;
13168
13169 case EF_ARM_EABI_VER2:
13170 fprintf (file, _(" [Version2 EABI]"));
13171
13172 if (flags & EF_ARM_SYMSARESORTED)
13173 fprintf (file, _(" [sorted symbol table]"));
13174 else
13175 fprintf (file, _(" [unsorted symbol table]"));
13176
13177 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13178 fprintf (file, _(" [dynamic symbols use segment index]"));
13179
13180 if (flags & EF_ARM_MAPSYMSFIRST)
13181 fprintf (file, _(" [mapping symbols precede others]"));
13182
13183 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13184 | EF_ARM_MAPSYMSFIRST);
13185 break;
13186
13187 case EF_ARM_EABI_VER3:
13188 fprintf (file, _(" [Version3 EABI]"));
13189 break;
13190
13191 case EF_ARM_EABI_VER4:
13192 fprintf (file, _(" [Version4 EABI]"));
13193 goto eabi;
13194
13195 case EF_ARM_EABI_VER5:
13196 fprintf (file, _(" [Version5 EABI]"));
13197
13198 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13199 fprintf (file, _(" [soft-float ABI]"));
13200
13201 if (flags & EF_ARM_ABI_FLOAT_HARD)
13202 fprintf (file, _(" [hard-float ABI]"));
13203
13204 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13205
13206 eabi:
13207 if (flags & EF_ARM_BE8)
13208 fprintf (file, _(" [BE8]"));
13209
13210 if (flags & EF_ARM_LE8)
13211 fprintf (file, _(" [LE8]"));
13212
13213 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13214 break;
13215
13216 default:
13217 fprintf (file, _(" <EABI version unrecognised>"));
13218 break;
13219 }
13220
13221 flags &= ~ EF_ARM_EABIMASK;
13222
13223 if (flags & EF_ARM_RELEXEC)
13224 fprintf (file, _(" [relocatable executable]"));
13225
13226 flags &= ~EF_ARM_RELEXEC;
13227
13228 if (flags)
13229 fprintf (file, _("<Unrecognised flag bits set>"));
13230
13231 fputc ('\n', file);
13232
13233 return TRUE;
13234 }
13235
13236 static int
13237 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13238 {
13239 switch (ELF_ST_TYPE (elf_sym->st_info))
13240 {
13241 case STT_ARM_TFUNC:
13242 return ELF_ST_TYPE (elf_sym->st_info);
13243
13244 case STT_ARM_16BIT:
13245 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13246 This allows us to distinguish between data used by Thumb instructions
13247 and non-data (which is probably code) inside Thumb regions of an
13248 executable. */
13249 if (type != STT_OBJECT && type != STT_TLS)
13250 return ELF_ST_TYPE (elf_sym->st_info);
13251 break;
13252
13253 default:
13254 break;
13255 }
13256
13257 return type;
13258 }
13259
13260 static asection *
13261 elf32_arm_gc_mark_hook (asection *sec,
13262 struct bfd_link_info *info,
13263 Elf_Internal_Rela *rel,
13264 struct elf_link_hash_entry *h,
13265 Elf_Internal_Sym *sym)
13266 {
13267 if (h != NULL)
13268 switch (ELF32_R_TYPE (rel->r_info))
13269 {
13270 case R_ARM_GNU_VTINHERIT:
13271 case R_ARM_GNU_VTENTRY:
13272 return NULL;
13273 }
13274
13275 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13276 }
13277
13278 /* Update the got entry reference counts for the section being removed. */
13279
13280 static bfd_boolean
13281 elf32_arm_gc_sweep_hook (bfd * abfd,
13282 struct bfd_link_info * info,
13283 asection * sec,
13284 const Elf_Internal_Rela * relocs)
13285 {
13286 Elf_Internal_Shdr *symtab_hdr;
13287 struct elf_link_hash_entry **sym_hashes;
13288 bfd_signed_vma *local_got_refcounts;
13289 const Elf_Internal_Rela *rel, *relend;
13290 struct elf32_arm_link_hash_table * globals;
13291
13292 if (bfd_link_relocatable (info))
13293 return TRUE;
13294
13295 globals = elf32_arm_hash_table (info);
13296 if (globals == NULL)
13297 return FALSE;
13298
13299 elf_section_data (sec)->local_dynrel = NULL;
13300
13301 symtab_hdr = & elf_symtab_hdr (abfd);
13302 sym_hashes = elf_sym_hashes (abfd);
13303 local_got_refcounts = elf_local_got_refcounts (abfd);
13304
13305 check_use_blx (globals);
13306
13307 relend = relocs + sec->reloc_count;
13308 for (rel = relocs; rel < relend; rel++)
13309 {
13310 unsigned long r_symndx;
13311 struct elf_link_hash_entry *h = NULL;
13312 struct elf32_arm_link_hash_entry *eh;
13313 int r_type;
13314 bfd_boolean call_reloc_p;
13315 bfd_boolean may_become_dynamic_p;
13316 bfd_boolean may_need_local_target_p;
13317 union gotplt_union *root_plt;
13318 struct arm_plt_info *arm_plt;
13319
13320 r_symndx = ELF32_R_SYM (rel->r_info);
13321 if (r_symndx >= symtab_hdr->sh_info)
13322 {
13323 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13324 while (h->root.type == bfd_link_hash_indirect
13325 || h->root.type == bfd_link_hash_warning)
13326 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13327 }
13328 eh = (struct elf32_arm_link_hash_entry *) h;
13329
13330 call_reloc_p = FALSE;
13331 may_become_dynamic_p = FALSE;
13332 may_need_local_target_p = FALSE;
13333
13334 r_type = ELF32_R_TYPE (rel->r_info);
13335 r_type = arm_real_reloc_type (globals, r_type);
13336 switch (r_type)
13337 {
13338 case R_ARM_GOT32:
13339 case R_ARM_GOT_PREL:
13340 case R_ARM_TLS_GD32:
13341 case R_ARM_TLS_IE32:
13342 if (h != NULL)
13343 {
13344 if (h->got.refcount > 0)
13345 h->got.refcount -= 1;
13346 }
13347 else if (local_got_refcounts != NULL)
13348 {
13349 if (local_got_refcounts[r_symndx] > 0)
13350 local_got_refcounts[r_symndx] -= 1;
13351 }
13352 break;
13353
13354 case R_ARM_TLS_LDM32:
13355 globals->tls_ldm_got.refcount -= 1;
13356 break;
13357
13358 case R_ARM_PC24:
13359 case R_ARM_PLT32:
13360 case R_ARM_CALL:
13361 case R_ARM_JUMP24:
13362 case R_ARM_PREL31:
13363 case R_ARM_THM_CALL:
13364 case R_ARM_THM_JUMP24:
13365 case R_ARM_THM_JUMP19:
13366 call_reloc_p = TRUE;
13367 may_need_local_target_p = TRUE;
13368 break;
13369
13370 case R_ARM_ABS12:
13371 if (!globals->vxworks_p)
13372 {
13373 may_need_local_target_p = TRUE;
13374 break;
13375 }
13376 /* Fall through. */
13377 case R_ARM_ABS32:
13378 case R_ARM_ABS32_NOI:
13379 case R_ARM_REL32:
13380 case R_ARM_REL32_NOI:
13381 case R_ARM_MOVW_ABS_NC:
13382 case R_ARM_MOVT_ABS:
13383 case R_ARM_MOVW_PREL_NC:
13384 case R_ARM_MOVT_PREL:
13385 case R_ARM_THM_MOVW_ABS_NC:
13386 case R_ARM_THM_MOVT_ABS:
13387 case R_ARM_THM_MOVW_PREL_NC:
13388 case R_ARM_THM_MOVT_PREL:
13389 /* Should the interworking branches be here also? */
13390 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13391 && (sec->flags & SEC_ALLOC) != 0)
13392 {
13393 if (h == NULL
13394 && elf32_arm_howto_from_type (r_type)->pc_relative)
13395 {
13396 call_reloc_p = TRUE;
13397 may_need_local_target_p = TRUE;
13398 }
13399 else
13400 may_become_dynamic_p = TRUE;
13401 }
13402 else
13403 may_need_local_target_p = TRUE;
13404 break;
13405
13406 default:
13407 break;
13408 }
13409
13410 if (may_need_local_target_p
13411 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13412 {
13413 /* If PLT refcount book-keeping is wrong and too low, we'll
13414 see a zero value (going to -1) for the root PLT reference
13415 count. */
13416 if (root_plt->refcount >= 0)
13417 {
13418 BFD_ASSERT (root_plt->refcount != 0);
13419 root_plt->refcount -= 1;
13420 }
13421 else
13422 /* A value of -1 means the symbol has become local, forced
13423 or seeing a hidden definition. Any other negative value
13424 is an error. */
13425 BFD_ASSERT (root_plt->refcount == -1);
13426
13427 if (!call_reloc_p)
13428 arm_plt->noncall_refcount--;
13429
13430 if (r_type == R_ARM_THM_CALL)
13431 arm_plt->maybe_thumb_refcount--;
13432
13433 if (r_type == R_ARM_THM_JUMP24
13434 || r_type == R_ARM_THM_JUMP19)
13435 arm_plt->thumb_refcount--;
13436 }
13437
13438 if (may_become_dynamic_p)
13439 {
13440 struct elf_dyn_relocs **pp;
13441 struct elf_dyn_relocs *p;
13442
13443 if (h != NULL)
13444 pp = &(eh->dyn_relocs);
13445 else
13446 {
13447 Elf_Internal_Sym *isym;
13448
13449 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13450 abfd, r_symndx);
13451 if (isym == NULL)
13452 return FALSE;
13453 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13454 if (pp == NULL)
13455 return FALSE;
13456 }
13457 for (; (p = *pp) != NULL; pp = &p->next)
13458 if (p->sec == sec)
13459 {
13460 /* Everything must go for SEC. */
13461 *pp = p->next;
13462 break;
13463 }
13464 }
13465 }
13466
13467 return TRUE;
13468 }
13469
13470 /* Look through the relocs for a section during the first phase. */
13471
13472 static bfd_boolean
13473 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13474 asection *sec, const Elf_Internal_Rela *relocs)
13475 {
13476 Elf_Internal_Shdr *symtab_hdr;
13477 struct elf_link_hash_entry **sym_hashes;
13478 const Elf_Internal_Rela *rel;
13479 const Elf_Internal_Rela *rel_end;
13480 bfd *dynobj;
13481 asection *sreloc;
13482 struct elf32_arm_link_hash_table *htab;
13483 bfd_boolean call_reloc_p;
13484 bfd_boolean may_become_dynamic_p;
13485 bfd_boolean may_need_local_target_p;
13486 unsigned long nsyms;
13487
13488 if (bfd_link_relocatable (info))
13489 return TRUE;
13490
13491 BFD_ASSERT (is_arm_elf (abfd));
13492
13493 htab = elf32_arm_hash_table (info);
13494 if (htab == NULL)
13495 return FALSE;
13496
13497 sreloc = NULL;
13498
13499 /* Create dynamic sections for relocatable executables so that we can
13500 copy relocations. */
13501 if (htab->root.is_relocatable_executable
13502 && ! htab->root.dynamic_sections_created)
13503 {
13504 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13505 return FALSE;
13506 }
13507
13508 if (htab->root.dynobj == NULL)
13509 htab->root.dynobj = abfd;
13510 if (!create_ifunc_sections (info))
13511 return FALSE;
13512
13513 dynobj = htab->root.dynobj;
13514
13515 symtab_hdr = & elf_symtab_hdr (abfd);
13516 sym_hashes = elf_sym_hashes (abfd);
13517 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13518
13519 rel_end = relocs + sec->reloc_count;
13520 for (rel = relocs; rel < rel_end; rel++)
13521 {
13522 Elf_Internal_Sym *isym;
13523 struct elf_link_hash_entry *h;
13524 struct elf32_arm_link_hash_entry *eh;
13525 unsigned long r_symndx;
13526 int r_type;
13527
13528 r_symndx = ELF32_R_SYM (rel->r_info);
13529 r_type = ELF32_R_TYPE (rel->r_info);
13530 r_type = arm_real_reloc_type (htab, r_type);
13531
13532 if (r_symndx >= nsyms
13533 /* PR 9934: It is possible to have relocations that do not
13534 refer to symbols, thus it is also possible to have an
13535 object file containing relocations but no symbol table. */
13536 && (r_symndx > STN_UNDEF || nsyms > 0))
13537 {
13538 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13539 r_symndx);
13540 return FALSE;
13541 }
13542
13543 h = NULL;
13544 isym = NULL;
13545 if (nsyms > 0)
13546 {
13547 if (r_symndx < symtab_hdr->sh_info)
13548 {
13549 /* A local symbol. */
13550 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13551 abfd, r_symndx);
13552 if (isym == NULL)
13553 return FALSE;
13554 }
13555 else
13556 {
13557 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13558 while (h->root.type == bfd_link_hash_indirect
13559 || h->root.type == bfd_link_hash_warning)
13560 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13561
13562 /* PR15323, ref flags aren't set for references in the
13563 same object. */
13564 h->root.non_ir_ref = 1;
13565 }
13566 }
13567
13568 eh = (struct elf32_arm_link_hash_entry *) h;
13569
13570 call_reloc_p = FALSE;
13571 may_become_dynamic_p = FALSE;
13572 may_need_local_target_p = FALSE;
13573
13574 /* Could be done earlier, if h were already available. */
13575 r_type = elf32_arm_tls_transition (info, r_type, h);
13576 switch (r_type)
13577 {
13578 case R_ARM_GOT32:
13579 case R_ARM_GOT_PREL:
13580 case R_ARM_TLS_GD32:
13581 case R_ARM_TLS_IE32:
13582 case R_ARM_TLS_GOTDESC:
13583 case R_ARM_TLS_DESCSEQ:
13584 case R_ARM_THM_TLS_DESCSEQ:
13585 case R_ARM_TLS_CALL:
13586 case R_ARM_THM_TLS_CALL:
13587 /* This symbol requires a global offset table entry. */
13588 {
13589 int tls_type, old_tls_type;
13590
13591 switch (r_type)
13592 {
13593 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13594
13595 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13596
13597 case R_ARM_TLS_GOTDESC:
13598 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13599 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13600 tls_type = GOT_TLS_GDESC; break;
13601
13602 default: tls_type = GOT_NORMAL; break;
13603 }
13604
13605 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13606 info->flags |= DF_STATIC_TLS;
13607
13608 if (h != NULL)
13609 {
13610 h->got.refcount++;
13611 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13612 }
13613 else
13614 {
13615 /* This is a global offset table entry for a local symbol. */
13616 if (!elf32_arm_allocate_local_sym_info (abfd))
13617 return FALSE;
13618 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13619 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13620 }
13621
13622 /* If a variable is accessed with both tls methods, two
13623 slots may be created. */
13624 if (GOT_TLS_GD_ANY_P (old_tls_type)
13625 && GOT_TLS_GD_ANY_P (tls_type))
13626 tls_type |= old_tls_type;
13627
13628 /* We will already have issued an error message if there
13629 is a TLS/non-TLS mismatch, based on the symbol
13630 type. So just combine any TLS types needed. */
13631 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13632 && tls_type != GOT_NORMAL)
13633 tls_type |= old_tls_type;
13634
13635 /* If the symbol is accessed in both IE and GDESC
13636 method, we're able to relax. Turn off the GDESC flag,
13637 without messing up with any other kind of tls types
13638 that may be involved. */
13639 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13640 tls_type &= ~GOT_TLS_GDESC;
13641
13642 if (old_tls_type != tls_type)
13643 {
13644 if (h != NULL)
13645 elf32_arm_hash_entry (h)->tls_type = tls_type;
13646 else
13647 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13648 }
13649 }
13650 /* Fall through. */
13651
13652 case R_ARM_TLS_LDM32:
13653 if (r_type == R_ARM_TLS_LDM32)
13654 htab->tls_ldm_got.refcount++;
13655 /* Fall through. */
13656
13657 case R_ARM_GOTOFF32:
13658 case R_ARM_GOTPC:
13659 if (htab->root.sgot == NULL
13660 && !create_got_section (htab->root.dynobj, info))
13661 return FALSE;
13662 break;
13663
13664 case R_ARM_PC24:
13665 case R_ARM_PLT32:
13666 case R_ARM_CALL:
13667 case R_ARM_JUMP24:
13668 case R_ARM_PREL31:
13669 case R_ARM_THM_CALL:
13670 case R_ARM_THM_JUMP24:
13671 case R_ARM_THM_JUMP19:
13672 call_reloc_p = TRUE;
13673 may_need_local_target_p = TRUE;
13674 break;
13675
13676 case R_ARM_ABS12:
13677 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13678 ldr __GOTT_INDEX__ offsets. */
13679 if (!htab->vxworks_p)
13680 {
13681 may_need_local_target_p = TRUE;
13682 break;
13683 }
13684 else goto jump_over;
13685
13686 /* Fall through. */
13687
13688 case R_ARM_MOVW_ABS_NC:
13689 case R_ARM_MOVT_ABS:
13690 case R_ARM_THM_MOVW_ABS_NC:
13691 case R_ARM_THM_MOVT_ABS:
13692 if (bfd_link_pic (info))
13693 {
13694 (*_bfd_error_handler)
13695 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13696 abfd, elf32_arm_howto_table_1[r_type].name,
13697 (h) ? h->root.root.string : "a local symbol");
13698 bfd_set_error (bfd_error_bad_value);
13699 return FALSE;
13700 }
13701
13702 /* Fall through. */
13703 case R_ARM_ABS32:
13704 case R_ARM_ABS32_NOI:
13705 jump_over:
13706 if (h != NULL && bfd_link_executable (info))
13707 {
13708 h->pointer_equality_needed = 1;
13709 }
13710 /* Fall through. */
13711 case R_ARM_REL32:
13712 case R_ARM_REL32_NOI:
13713 case R_ARM_MOVW_PREL_NC:
13714 case R_ARM_MOVT_PREL:
13715 case R_ARM_THM_MOVW_PREL_NC:
13716 case R_ARM_THM_MOVT_PREL:
13717
13718 /* Should the interworking branches be listed here? */
13719 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13720 && (sec->flags & SEC_ALLOC) != 0)
13721 {
13722 if (h == NULL
13723 && elf32_arm_howto_from_type (r_type)->pc_relative)
13724 {
13725 /* In shared libraries and relocatable executables,
13726 we treat local relative references as calls;
13727 see the related SYMBOL_CALLS_LOCAL code in
13728 allocate_dynrelocs. */
13729 call_reloc_p = TRUE;
13730 may_need_local_target_p = TRUE;
13731 }
13732 else
13733 /* We are creating a shared library or relocatable
13734 executable, and this is a reloc against a global symbol,
13735 or a non-PC-relative reloc against a local symbol.
13736 We may need to copy the reloc into the output. */
13737 may_become_dynamic_p = TRUE;
13738 }
13739 else
13740 may_need_local_target_p = TRUE;
13741 break;
13742
13743 /* This relocation describes the C++ object vtable hierarchy.
13744 Reconstruct it for later use during GC. */
13745 case R_ARM_GNU_VTINHERIT:
13746 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13747 return FALSE;
13748 break;
13749
13750 /* This relocation describes which C++ vtable entries are actually
13751 used. Record for later use during GC. */
13752 case R_ARM_GNU_VTENTRY:
13753 BFD_ASSERT (h != NULL);
13754 if (h != NULL
13755 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13756 return FALSE;
13757 break;
13758 }
13759
13760 if (h != NULL)
13761 {
13762 if (call_reloc_p)
13763 /* We may need a .plt entry if the function this reloc
13764 refers to is in a different object, regardless of the
13765 symbol's type. We can't tell for sure yet, because
13766 something later might force the symbol local. */
13767 h->needs_plt = 1;
13768 else if (may_need_local_target_p)
13769 /* If this reloc is in a read-only section, we might
13770 need a copy reloc. We can't check reliably at this
13771 stage whether the section is read-only, as input
13772 sections have not yet been mapped to output sections.
13773 Tentatively set the flag for now, and correct in
13774 adjust_dynamic_symbol. */
13775 h->non_got_ref = 1;
13776 }
13777
13778 if (may_need_local_target_p
13779 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13780 {
13781 union gotplt_union *root_plt;
13782 struct arm_plt_info *arm_plt;
13783 struct arm_local_iplt_info *local_iplt;
13784
13785 if (h != NULL)
13786 {
13787 root_plt = &h->plt;
13788 arm_plt = &eh->plt;
13789 }
13790 else
13791 {
13792 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
13793 if (local_iplt == NULL)
13794 return FALSE;
13795 root_plt = &local_iplt->root;
13796 arm_plt = &local_iplt->arm;
13797 }
13798
13799 /* If the symbol is a function that doesn't bind locally,
13800 this relocation will need a PLT entry. */
13801 if (root_plt->refcount != -1)
13802 root_plt->refcount += 1;
13803
13804 if (!call_reloc_p)
13805 arm_plt->noncall_refcount++;
13806
13807 /* It's too early to use htab->use_blx here, so we have to
13808 record possible blx references separately from
13809 relocs that definitely need a thumb stub. */
13810
13811 if (r_type == R_ARM_THM_CALL)
13812 arm_plt->maybe_thumb_refcount += 1;
13813
13814 if (r_type == R_ARM_THM_JUMP24
13815 || r_type == R_ARM_THM_JUMP19)
13816 arm_plt->thumb_refcount += 1;
13817 }
13818
13819 if (may_become_dynamic_p)
13820 {
13821 struct elf_dyn_relocs *p, **head;
13822
13823 /* Create a reloc section in dynobj. */
13824 if (sreloc == NULL)
13825 {
13826 sreloc = _bfd_elf_make_dynamic_reloc_section
13827 (sec, dynobj, 2, abfd, ! htab->use_rel);
13828
13829 if (sreloc == NULL)
13830 return FALSE;
13831
13832 /* BPABI objects never have dynamic relocations mapped. */
13833 if (htab->symbian_p)
13834 {
13835 flagword flags;
13836
13837 flags = bfd_get_section_flags (dynobj, sreloc);
13838 flags &= ~(SEC_LOAD | SEC_ALLOC);
13839 bfd_set_section_flags (dynobj, sreloc, flags);
13840 }
13841 }
13842
13843 /* If this is a global symbol, count the number of
13844 relocations we need for this symbol. */
13845 if (h != NULL)
13846 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
13847 else
13848 {
13849 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13850 if (head == NULL)
13851 return FALSE;
13852 }
13853
13854 p = *head;
13855 if (p == NULL || p->sec != sec)
13856 {
13857 bfd_size_type amt = sizeof *p;
13858
13859 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
13860 if (p == NULL)
13861 return FALSE;
13862 p->next = *head;
13863 *head = p;
13864 p->sec = sec;
13865 p->count = 0;
13866 p->pc_count = 0;
13867 }
13868
13869 if (elf32_arm_howto_from_type (r_type)->pc_relative)
13870 p->pc_count += 1;
13871 p->count += 1;
13872 }
13873 }
13874
13875 return TRUE;
13876 }
13877
13878 /* Unwinding tables are not referenced directly. This pass marks them as
13879 required if the corresponding code section is marked. */
13880
13881 static bfd_boolean
13882 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
13883 elf_gc_mark_hook_fn gc_mark_hook)
13884 {
13885 bfd *sub;
13886 Elf_Internal_Shdr **elf_shdrp;
13887 bfd_boolean again;
13888
13889 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
13890
13891 /* Marking EH data may cause additional code sections to be marked,
13892 requiring multiple passes. */
13893 again = TRUE;
13894 while (again)
13895 {
13896 again = FALSE;
13897 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
13898 {
13899 asection *o;
13900
13901 if (! is_arm_elf (sub))
13902 continue;
13903
13904 elf_shdrp = elf_elfsections (sub);
13905 for (o = sub->sections; o != NULL; o = o->next)
13906 {
13907 Elf_Internal_Shdr *hdr;
13908
13909 hdr = &elf_section_data (o)->this_hdr;
13910 if (hdr->sh_type == SHT_ARM_EXIDX
13911 && hdr->sh_link
13912 && hdr->sh_link < elf_numsections (sub)
13913 && !o->gc_mark
13914 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
13915 {
13916 again = TRUE;
13917 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
13918 return FALSE;
13919 }
13920 }
13921 }
13922 }
13923
13924 return TRUE;
13925 }
13926
13927 /* Treat mapping symbols as special target symbols. */
13928
13929 static bfd_boolean
13930 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
13931 {
13932 return bfd_is_arm_special_symbol_name (sym->name,
13933 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
13934 }
13935
13936 /* This is a copy of elf_find_function() from elf.c except that
13937 ARM mapping symbols are ignored when looking for function names
13938 and STT_ARM_TFUNC is considered to a function type. */
13939
13940 static bfd_boolean
13941 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
13942 asymbol ** symbols,
13943 asection * section,
13944 bfd_vma offset,
13945 const char ** filename_ptr,
13946 const char ** functionname_ptr)
13947 {
13948 const char * filename = NULL;
13949 asymbol * func = NULL;
13950 bfd_vma low_func = 0;
13951 asymbol ** p;
13952
13953 for (p = symbols; *p != NULL; p++)
13954 {
13955 elf_symbol_type *q;
13956
13957 q = (elf_symbol_type *) *p;
13958
13959 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
13960 {
13961 default:
13962 break;
13963 case STT_FILE:
13964 filename = bfd_asymbol_name (&q->symbol);
13965 break;
13966 case STT_FUNC:
13967 case STT_ARM_TFUNC:
13968 case STT_NOTYPE:
13969 /* Skip mapping symbols. */
13970 if ((q->symbol.flags & BSF_LOCAL)
13971 && bfd_is_arm_special_symbol_name (q->symbol.name,
13972 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
13973 continue;
13974 /* Fall through. */
13975 if (bfd_get_section (&q->symbol) == section
13976 && q->symbol.value >= low_func
13977 && q->symbol.value <= offset)
13978 {
13979 func = (asymbol *) q;
13980 low_func = q->symbol.value;
13981 }
13982 break;
13983 }
13984 }
13985
13986 if (func == NULL)
13987 return FALSE;
13988
13989 if (filename_ptr)
13990 *filename_ptr = filename;
13991 if (functionname_ptr)
13992 *functionname_ptr = bfd_asymbol_name (func);
13993
13994 return TRUE;
13995 }
13996
13997
13998 /* Find the nearest line to a particular section and offset, for error
13999 reporting. This code is a duplicate of the code in elf.c, except
14000 that it uses arm_elf_find_function. */
14001
14002 static bfd_boolean
14003 elf32_arm_find_nearest_line (bfd * abfd,
14004 asymbol ** symbols,
14005 asection * section,
14006 bfd_vma offset,
14007 const char ** filename_ptr,
14008 const char ** functionname_ptr,
14009 unsigned int * line_ptr,
14010 unsigned int * discriminator_ptr)
14011 {
14012 bfd_boolean found = FALSE;
14013
14014 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14015 filename_ptr, functionname_ptr,
14016 line_ptr, discriminator_ptr,
14017 dwarf_debug_sections, 0,
14018 & elf_tdata (abfd)->dwarf2_find_line_info))
14019 {
14020 if (!*functionname_ptr)
14021 arm_elf_find_function (abfd, symbols, section, offset,
14022 *filename_ptr ? NULL : filename_ptr,
14023 functionname_ptr);
14024
14025 return TRUE;
14026 }
14027
14028 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14029 uses DWARF1. */
14030
14031 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14032 & found, filename_ptr,
14033 functionname_ptr, line_ptr,
14034 & elf_tdata (abfd)->line_info))
14035 return FALSE;
14036
14037 if (found && (*functionname_ptr || *line_ptr))
14038 return TRUE;
14039
14040 if (symbols == NULL)
14041 return FALSE;
14042
14043 if (! arm_elf_find_function (abfd, symbols, section, offset,
14044 filename_ptr, functionname_ptr))
14045 return FALSE;
14046
14047 *line_ptr = 0;
14048 return TRUE;
14049 }
14050
14051 static bfd_boolean
14052 elf32_arm_find_inliner_info (bfd * abfd,
14053 const char ** filename_ptr,
14054 const char ** functionname_ptr,
14055 unsigned int * line_ptr)
14056 {
14057 bfd_boolean found;
14058 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14059 functionname_ptr, line_ptr,
14060 & elf_tdata (abfd)->dwarf2_find_line_info);
14061 return found;
14062 }
14063
14064 /* Adjust a symbol defined by a dynamic object and referenced by a
14065 regular object. The current definition is in some section of the
14066 dynamic object, but we're not including those sections. We have to
14067 change the definition to something the rest of the link can
14068 understand. */
14069
14070 static bfd_boolean
14071 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14072 struct elf_link_hash_entry * h)
14073 {
14074 bfd * dynobj;
14075 asection * s;
14076 struct elf32_arm_link_hash_entry * eh;
14077 struct elf32_arm_link_hash_table *globals;
14078
14079 globals = elf32_arm_hash_table (info);
14080 if (globals == NULL)
14081 return FALSE;
14082
14083 dynobj = elf_hash_table (info)->dynobj;
14084
14085 /* Make sure we know what is going on here. */
14086 BFD_ASSERT (dynobj != NULL
14087 && (h->needs_plt
14088 || h->type == STT_GNU_IFUNC
14089 || h->u.weakdef != NULL
14090 || (h->def_dynamic
14091 && h->ref_regular
14092 && !h->def_regular)));
14093
14094 eh = (struct elf32_arm_link_hash_entry *) h;
14095
14096 /* If this is a function, put it in the procedure linkage table. We
14097 will fill in the contents of the procedure linkage table later,
14098 when we know the address of the .got section. */
14099 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14100 {
14101 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14102 symbol binds locally. */
14103 if (h->plt.refcount <= 0
14104 || (h->type != STT_GNU_IFUNC
14105 && (SYMBOL_CALLS_LOCAL (info, h)
14106 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14107 && h->root.type == bfd_link_hash_undefweak))))
14108 {
14109 /* This case can occur if we saw a PLT32 reloc in an input
14110 file, but the symbol was never referred to by a dynamic
14111 object, or if all references were garbage collected. In
14112 such a case, we don't actually need to build a procedure
14113 linkage table, and we can just do a PC24 reloc instead. */
14114 h->plt.offset = (bfd_vma) -1;
14115 eh->plt.thumb_refcount = 0;
14116 eh->plt.maybe_thumb_refcount = 0;
14117 eh->plt.noncall_refcount = 0;
14118 h->needs_plt = 0;
14119 }
14120
14121 return TRUE;
14122 }
14123 else
14124 {
14125 /* It's possible that we incorrectly decided a .plt reloc was
14126 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14127 in check_relocs. We can't decide accurately between function
14128 and non-function syms in check-relocs; Objects loaded later in
14129 the link may change h->type. So fix it now. */
14130 h->plt.offset = (bfd_vma) -1;
14131 eh->plt.thumb_refcount = 0;
14132 eh->plt.maybe_thumb_refcount = 0;
14133 eh->plt.noncall_refcount = 0;
14134 }
14135
14136 /* If this is a weak symbol, and there is a real definition, the
14137 processor independent code will have arranged for us to see the
14138 real definition first, and we can just use the same value. */
14139 if (h->u.weakdef != NULL)
14140 {
14141 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14142 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14143 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14144 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14145 return TRUE;
14146 }
14147
14148 /* If there are no non-GOT references, we do not need a copy
14149 relocation. */
14150 if (!h->non_got_ref)
14151 return TRUE;
14152
14153 /* This is a reference to a symbol defined by a dynamic object which
14154 is not a function. */
14155
14156 /* If we are creating a shared library, we must presume that the
14157 only references to the symbol are via the global offset table.
14158 For such cases we need not do anything here; the relocations will
14159 be handled correctly by relocate_section. Relocatable executables
14160 can reference data in shared objects directly, so we don't need to
14161 do anything here. */
14162 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14163 return TRUE;
14164
14165 /* We must allocate the symbol in our .dynbss section, which will
14166 become part of the .bss section of the executable. There will be
14167 an entry for this symbol in the .dynsym section. The dynamic
14168 object will contain position independent code, so all references
14169 from the dynamic object to this symbol will go through the global
14170 offset table. The dynamic linker will use the .dynsym entry to
14171 determine the address it must put in the global offset table, so
14172 both the dynamic object and the regular object will refer to the
14173 same memory location for the variable. */
14174 s = bfd_get_linker_section (dynobj, ".dynbss");
14175 BFD_ASSERT (s != NULL);
14176
14177 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14178 linker to copy the initial value out of the dynamic object and into
14179 the runtime process image. We need to remember the offset into the
14180 .rel(a).bss section we are going to use. */
14181 if (info->nocopyreloc == 0
14182 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14183 && h->size != 0)
14184 {
14185 asection *srel;
14186
14187 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14188 elf32_arm_allocate_dynrelocs (info, srel, 1);
14189 h->needs_copy = 1;
14190 }
14191
14192 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14193 }
14194
14195 /* Allocate space in .plt, .got and associated reloc sections for
14196 dynamic relocs. */
14197
14198 static bfd_boolean
14199 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14200 {
14201 struct bfd_link_info *info;
14202 struct elf32_arm_link_hash_table *htab;
14203 struct elf32_arm_link_hash_entry *eh;
14204 struct elf_dyn_relocs *p;
14205
14206 if (h->root.type == bfd_link_hash_indirect)
14207 return TRUE;
14208
14209 eh = (struct elf32_arm_link_hash_entry *) h;
14210
14211 info = (struct bfd_link_info *) inf;
14212 htab = elf32_arm_hash_table (info);
14213 if (htab == NULL)
14214 return FALSE;
14215
14216 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14217 && h->plt.refcount > 0)
14218 {
14219 /* Make sure this symbol is output as a dynamic symbol.
14220 Undefined weak syms won't yet be marked as dynamic. */
14221 if (h->dynindx == -1
14222 && !h->forced_local)
14223 {
14224 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14225 return FALSE;
14226 }
14227
14228 /* If the call in the PLT entry binds locally, the associated
14229 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14230 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14231 than the .plt section. */
14232 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14233 {
14234 eh->is_iplt = 1;
14235 if (eh->plt.noncall_refcount == 0
14236 && SYMBOL_REFERENCES_LOCAL (info, h))
14237 /* All non-call references can be resolved directly.
14238 This means that they can (and in some cases, must)
14239 resolve directly to the run-time target, rather than
14240 to the PLT. That in turns means that any .got entry
14241 would be equal to the .igot.plt entry, so there's
14242 no point having both. */
14243 h->got.refcount = 0;
14244 }
14245
14246 if (bfd_link_pic (info)
14247 || eh->is_iplt
14248 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14249 {
14250 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14251
14252 /* If this symbol is not defined in a regular file, and we are
14253 not generating a shared library, then set the symbol to this
14254 location in the .plt. This is required to make function
14255 pointers compare as equal between the normal executable and
14256 the shared library. */
14257 if (! bfd_link_pic (info)
14258 && !h->def_regular)
14259 {
14260 h->root.u.def.section = htab->root.splt;
14261 h->root.u.def.value = h->plt.offset;
14262
14263 /* Make sure the function is not marked as Thumb, in case
14264 it is the target of an ABS32 relocation, which will
14265 point to the PLT entry. */
14266 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14267 }
14268
14269 /* VxWorks executables have a second set of relocations for
14270 each PLT entry. They go in a separate relocation section,
14271 which is processed by the kernel loader. */
14272 if (htab->vxworks_p && !bfd_link_pic (info))
14273 {
14274 /* There is a relocation for the initial PLT entry:
14275 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14276 if (h->plt.offset == htab->plt_header_size)
14277 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14278
14279 /* There are two extra relocations for each subsequent
14280 PLT entry: an R_ARM_32 relocation for the GOT entry,
14281 and an R_ARM_32 relocation for the PLT entry. */
14282 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14283 }
14284 }
14285 else
14286 {
14287 h->plt.offset = (bfd_vma) -1;
14288 h->needs_plt = 0;
14289 }
14290 }
14291 else
14292 {
14293 h->plt.offset = (bfd_vma) -1;
14294 h->needs_plt = 0;
14295 }
14296
14297 eh = (struct elf32_arm_link_hash_entry *) h;
14298 eh->tlsdesc_got = (bfd_vma) -1;
14299
14300 if (h->got.refcount > 0)
14301 {
14302 asection *s;
14303 bfd_boolean dyn;
14304 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14305 int indx;
14306
14307 /* Make sure this symbol is output as a dynamic symbol.
14308 Undefined weak syms won't yet be marked as dynamic. */
14309 if (h->dynindx == -1
14310 && !h->forced_local)
14311 {
14312 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14313 return FALSE;
14314 }
14315
14316 if (!htab->symbian_p)
14317 {
14318 s = htab->root.sgot;
14319 h->got.offset = s->size;
14320
14321 if (tls_type == GOT_UNKNOWN)
14322 abort ();
14323
14324 if (tls_type == GOT_NORMAL)
14325 /* Non-TLS symbols need one GOT slot. */
14326 s->size += 4;
14327 else
14328 {
14329 if (tls_type & GOT_TLS_GDESC)
14330 {
14331 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14332 eh->tlsdesc_got
14333 = (htab->root.sgotplt->size
14334 - elf32_arm_compute_jump_table_size (htab));
14335 htab->root.sgotplt->size += 8;
14336 h->got.offset = (bfd_vma) -2;
14337 /* plt.got_offset needs to know there's a TLS_DESC
14338 reloc in the middle of .got.plt. */
14339 htab->num_tls_desc++;
14340 }
14341
14342 if (tls_type & GOT_TLS_GD)
14343 {
14344 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14345 the symbol is both GD and GDESC, got.offset may
14346 have been overwritten. */
14347 h->got.offset = s->size;
14348 s->size += 8;
14349 }
14350
14351 if (tls_type & GOT_TLS_IE)
14352 /* R_ARM_TLS_IE32 needs one GOT slot. */
14353 s->size += 4;
14354 }
14355
14356 dyn = htab->root.dynamic_sections_created;
14357
14358 indx = 0;
14359 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14360 bfd_link_pic (info),
14361 h)
14362 && (!bfd_link_pic (info)
14363 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14364 indx = h->dynindx;
14365
14366 if (tls_type != GOT_NORMAL
14367 && (bfd_link_pic (info) || indx != 0)
14368 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14369 || h->root.type != bfd_link_hash_undefweak))
14370 {
14371 if (tls_type & GOT_TLS_IE)
14372 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14373
14374 if (tls_type & GOT_TLS_GD)
14375 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14376
14377 if (tls_type & GOT_TLS_GDESC)
14378 {
14379 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14380 /* GDESC needs a trampoline to jump to. */
14381 htab->tls_trampoline = -1;
14382 }
14383
14384 /* Only GD needs it. GDESC just emits one relocation per
14385 2 entries. */
14386 if ((tls_type & GOT_TLS_GD) && indx != 0)
14387 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14388 }
14389 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14390 {
14391 if (htab->root.dynamic_sections_created)
14392 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14393 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14394 }
14395 else if (h->type == STT_GNU_IFUNC
14396 && eh->plt.noncall_refcount == 0)
14397 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14398 they all resolve dynamically instead. Reserve room for the
14399 GOT entry's R_ARM_IRELATIVE relocation. */
14400 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14401 else if (bfd_link_pic (info)
14402 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14403 || h->root.type != bfd_link_hash_undefweak))
14404 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14405 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14406 }
14407 }
14408 else
14409 h->got.offset = (bfd_vma) -1;
14410
14411 /* Allocate stubs for exported Thumb functions on v4t. */
14412 if (!htab->use_blx && h->dynindx != -1
14413 && h->def_regular
14414 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14415 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14416 {
14417 struct elf_link_hash_entry * th;
14418 struct bfd_link_hash_entry * bh;
14419 struct elf_link_hash_entry * myh;
14420 char name[1024];
14421 asection *s;
14422 bh = NULL;
14423 /* Create a new symbol to regist the real location of the function. */
14424 s = h->root.u.def.section;
14425 sprintf (name, "__real_%s", h->root.root.string);
14426 _bfd_generic_link_add_one_symbol (info, s->owner,
14427 name, BSF_GLOBAL, s,
14428 h->root.u.def.value,
14429 NULL, TRUE, FALSE, &bh);
14430
14431 myh = (struct elf_link_hash_entry *) bh;
14432 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14433 myh->forced_local = 1;
14434 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14435 eh->export_glue = myh;
14436 th = record_arm_to_thumb_glue (info, h);
14437 /* Point the symbol at the stub. */
14438 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14439 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14440 h->root.u.def.section = th->root.u.def.section;
14441 h->root.u.def.value = th->root.u.def.value & ~1;
14442 }
14443
14444 if (eh->dyn_relocs == NULL)
14445 return TRUE;
14446
14447 /* In the shared -Bsymbolic case, discard space allocated for
14448 dynamic pc-relative relocs against symbols which turn out to be
14449 defined in regular objects. For the normal shared case, discard
14450 space for pc-relative relocs that have become local due to symbol
14451 visibility changes. */
14452
14453 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14454 {
14455 /* Relocs that use pc_count are PC-relative forms, which will appear
14456 on something like ".long foo - ." or "movw REG, foo - .". We want
14457 calls to protected symbols to resolve directly to the function
14458 rather than going via the plt. If people want function pointer
14459 comparisons to work as expected then they should avoid writing
14460 assembly like ".long foo - .". */
14461 if (SYMBOL_CALLS_LOCAL (info, h))
14462 {
14463 struct elf_dyn_relocs **pp;
14464
14465 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14466 {
14467 p->count -= p->pc_count;
14468 p->pc_count = 0;
14469 if (p->count == 0)
14470 *pp = p->next;
14471 else
14472 pp = &p->next;
14473 }
14474 }
14475
14476 if (htab->vxworks_p)
14477 {
14478 struct elf_dyn_relocs **pp;
14479
14480 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14481 {
14482 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14483 *pp = p->next;
14484 else
14485 pp = &p->next;
14486 }
14487 }
14488
14489 /* Also discard relocs on undefined weak syms with non-default
14490 visibility. */
14491 if (eh->dyn_relocs != NULL
14492 && h->root.type == bfd_link_hash_undefweak)
14493 {
14494 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14495 eh->dyn_relocs = NULL;
14496
14497 /* Make sure undefined weak symbols are output as a dynamic
14498 symbol in PIEs. */
14499 else if (h->dynindx == -1
14500 && !h->forced_local)
14501 {
14502 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14503 return FALSE;
14504 }
14505 }
14506
14507 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14508 && h->root.type == bfd_link_hash_new)
14509 {
14510 /* Output absolute symbols so that we can create relocations
14511 against them. For normal symbols we output a relocation
14512 against the section that contains them. */
14513 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14514 return FALSE;
14515 }
14516
14517 }
14518 else
14519 {
14520 /* For the non-shared case, discard space for relocs against
14521 symbols which turn out to need copy relocs or are not
14522 dynamic. */
14523
14524 if (!h->non_got_ref
14525 && ((h->def_dynamic
14526 && !h->def_regular)
14527 || (htab->root.dynamic_sections_created
14528 && (h->root.type == bfd_link_hash_undefweak
14529 || h->root.type == bfd_link_hash_undefined))))
14530 {
14531 /* Make sure this symbol is output as a dynamic symbol.
14532 Undefined weak syms won't yet be marked as dynamic. */
14533 if (h->dynindx == -1
14534 && !h->forced_local)
14535 {
14536 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14537 return FALSE;
14538 }
14539
14540 /* If that succeeded, we know we'll be keeping all the
14541 relocs. */
14542 if (h->dynindx != -1)
14543 goto keep;
14544 }
14545
14546 eh->dyn_relocs = NULL;
14547
14548 keep: ;
14549 }
14550
14551 /* Finally, allocate space. */
14552 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14553 {
14554 asection *sreloc = elf_section_data (p->sec)->sreloc;
14555 if (h->type == STT_GNU_IFUNC
14556 && eh->plt.noncall_refcount == 0
14557 && SYMBOL_REFERENCES_LOCAL (info, h))
14558 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14559 else
14560 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14561 }
14562
14563 return TRUE;
14564 }
14565
14566 /* Find any dynamic relocs that apply to read-only sections. */
14567
14568 static bfd_boolean
14569 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14570 {
14571 struct elf32_arm_link_hash_entry * eh;
14572 struct elf_dyn_relocs * p;
14573
14574 eh = (struct elf32_arm_link_hash_entry *) h;
14575 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14576 {
14577 asection *s = p->sec;
14578
14579 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14580 {
14581 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14582
14583 info->flags |= DF_TEXTREL;
14584
14585 /* Not an error, just cut short the traversal. */
14586 return FALSE;
14587 }
14588 }
14589 return TRUE;
14590 }
14591
14592 void
14593 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14594 int byteswap_code)
14595 {
14596 struct elf32_arm_link_hash_table *globals;
14597
14598 globals = elf32_arm_hash_table (info);
14599 if (globals == NULL)
14600 return;
14601
14602 globals->byteswap_code = byteswap_code;
14603 }
14604
14605 /* Set the sizes of the dynamic sections. */
14606
14607 static bfd_boolean
14608 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14609 struct bfd_link_info * info)
14610 {
14611 bfd * dynobj;
14612 asection * s;
14613 bfd_boolean plt;
14614 bfd_boolean relocs;
14615 bfd *ibfd;
14616 struct elf32_arm_link_hash_table *htab;
14617
14618 htab = elf32_arm_hash_table (info);
14619 if (htab == NULL)
14620 return FALSE;
14621
14622 dynobj = elf_hash_table (info)->dynobj;
14623 BFD_ASSERT (dynobj != NULL);
14624 check_use_blx (htab);
14625
14626 if (elf_hash_table (info)->dynamic_sections_created)
14627 {
14628 /* Set the contents of the .interp section to the interpreter. */
14629 if (bfd_link_executable (info) && !info->nointerp)
14630 {
14631 s = bfd_get_linker_section (dynobj, ".interp");
14632 BFD_ASSERT (s != NULL);
14633 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14634 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14635 }
14636 }
14637
14638 /* Set up .got offsets for local syms, and space for local dynamic
14639 relocs. */
14640 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14641 {
14642 bfd_signed_vma *local_got;
14643 bfd_signed_vma *end_local_got;
14644 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14645 char *local_tls_type;
14646 bfd_vma *local_tlsdesc_gotent;
14647 bfd_size_type locsymcount;
14648 Elf_Internal_Shdr *symtab_hdr;
14649 asection *srel;
14650 bfd_boolean is_vxworks = htab->vxworks_p;
14651 unsigned int symndx;
14652
14653 if (! is_arm_elf (ibfd))
14654 continue;
14655
14656 for (s = ibfd->sections; s != NULL; s = s->next)
14657 {
14658 struct elf_dyn_relocs *p;
14659
14660 for (p = (struct elf_dyn_relocs *)
14661 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14662 {
14663 if (!bfd_is_abs_section (p->sec)
14664 && bfd_is_abs_section (p->sec->output_section))
14665 {
14666 /* Input section has been discarded, either because
14667 it is a copy of a linkonce section or due to
14668 linker script /DISCARD/, so we'll be discarding
14669 the relocs too. */
14670 }
14671 else if (is_vxworks
14672 && strcmp (p->sec->output_section->name,
14673 ".tls_vars") == 0)
14674 {
14675 /* Relocations in vxworks .tls_vars sections are
14676 handled specially by the loader. */
14677 }
14678 else if (p->count != 0)
14679 {
14680 srel = elf_section_data (p->sec)->sreloc;
14681 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14682 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14683 info->flags |= DF_TEXTREL;
14684 }
14685 }
14686 }
14687
14688 local_got = elf_local_got_refcounts (ibfd);
14689 if (!local_got)
14690 continue;
14691
14692 symtab_hdr = & elf_symtab_hdr (ibfd);
14693 locsymcount = symtab_hdr->sh_info;
14694 end_local_got = local_got + locsymcount;
14695 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14696 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14697 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14698 symndx = 0;
14699 s = htab->root.sgot;
14700 srel = htab->root.srelgot;
14701 for (; local_got < end_local_got;
14702 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14703 ++local_tlsdesc_gotent, ++symndx)
14704 {
14705 *local_tlsdesc_gotent = (bfd_vma) -1;
14706 local_iplt = *local_iplt_ptr;
14707 if (local_iplt != NULL)
14708 {
14709 struct elf_dyn_relocs *p;
14710
14711 if (local_iplt->root.refcount > 0)
14712 {
14713 elf32_arm_allocate_plt_entry (info, TRUE,
14714 &local_iplt->root,
14715 &local_iplt->arm);
14716 if (local_iplt->arm.noncall_refcount == 0)
14717 /* All references to the PLT are calls, so all
14718 non-call references can resolve directly to the
14719 run-time target. This means that the .got entry
14720 would be the same as the .igot.plt entry, so there's
14721 no point creating both. */
14722 *local_got = 0;
14723 }
14724 else
14725 {
14726 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14727 local_iplt->root.offset = (bfd_vma) -1;
14728 }
14729
14730 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14731 {
14732 asection *psrel;
14733
14734 psrel = elf_section_data (p->sec)->sreloc;
14735 if (local_iplt->arm.noncall_refcount == 0)
14736 elf32_arm_allocate_irelocs (info, psrel, p->count);
14737 else
14738 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14739 }
14740 }
14741 if (*local_got > 0)
14742 {
14743 Elf_Internal_Sym *isym;
14744
14745 *local_got = s->size;
14746 if (*local_tls_type & GOT_TLS_GD)
14747 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14748 s->size += 8;
14749 if (*local_tls_type & GOT_TLS_GDESC)
14750 {
14751 *local_tlsdesc_gotent = htab->root.sgotplt->size
14752 - elf32_arm_compute_jump_table_size (htab);
14753 htab->root.sgotplt->size += 8;
14754 *local_got = (bfd_vma) -2;
14755 /* plt.got_offset needs to know there's a TLS_DESC
14756 reloc in the middle of .got.plt. */
14757 htab->num_tls_desc++;
14758 }
14759 if (*local_tls_type & GOT_TLS_IE)
14760 s->size += 4;
14761
14762 if (*local_tls_type & GOT_NORMAL)
14763 {
14764 /* If the symbol is both GD and GDESC, *local_got
14765 may have been overwritten. */
14766 *local_got = s->size;
14767 s->size += 4;
14768 }
14769
14770 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14771 if (isym == NULL)
14772 return FALSE;
14773
14774 /* If all references to an STT_GNU_IFUNC PLT are calls,
14775 then all non-call references, including this GOT entry,
14776 resolve directly to the run-time target. */
14777 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14778 && (local_iplt == NULL
14779 || local_iplt->arm.noncall_refcount == 0))
14780 elf32_arm_allocate_irelocs (info, srel, 1);
14781 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14782 {
14783 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14784 || *local_tls_type & GOT_TLS_GD)
14785 elf32_arm_allocate_dynrelocs (info, srel, 1);
14786
14787 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
14788 {
14789 elf32_arm_allocate_dynrelocs (info,
14790 htab->root.srelplt, 1);
14791 htab->tls_trampoline = -1;
14792 }
14793 }
14794 }
14795 else
14796 *local_got = (bfd_vma) -1;
14797 }
14798 }
14799
14800 if (htab->tls_ldm_got.refcount > 0)
14801 {
14802 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14803 for R_ARM_TLS_LDM32 relocations. */
14804 htab->tls_ldm_got.offset = htab->root.sgot->size;
14805 htab->root.sgot->size += 8;
14806 if (bfd_link_pic (info))
14807 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14808 }
14809 else
14810 htab->tls_ldm_got.offset = -1;
14811
14812 /* Allocate global sym .plt and .got entries, and space for global
14813 sym dynamic relocs. */
14814 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
14815
14816 /* Here we rummage through the found bfds to collect glue information. */
14817 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14818 {
14819 if (! is_arm_elf (ibfd))
14820 continue;
14821
14822 /* Initialise mapping tables for code/data. */
14823 bfd_elf32_arm_init_maps (ibfd);
14824
14825 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
14826 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
14827 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
14828 /* xgettext:c-format */
14829 _bfd_error_handler (_("Errors encountered processing file %s"),
14830 ibfd->filename);
14831 }
14832
14833 /* Allocate space for the glue sections now that we've sized them. */
14834 bfd_elf32_arm_allocate_interworking_sections (info);
14835
14836 /* For every jump slot reserved in the sgotplt, reloc_count is
14837 incremented. However, when we reserve space for TLS descriptors,
14838 it's not incremented, so in order to compute the space reserved
14839 for them, it suffices to multiply the reloc count by the jump
14840 slot size. */
14841 if (htab->root.srelplt)
14842 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
14843
14844 if (htab->tls_trampoline)
14845 {
14846 if (htab->root.splt->size == 0)
14847 htab->root.splt->size += htab->plt_header_size;
14848
14849 htab->tls_trampoline = htab->root.splt->size;
14850 htab->root.splt->size += htab->plt_entry_size;
14851
14852 /* If we're not using lazy TLS relocations, don't generate the
14853 PLT and GOT entries they require. */
14854 if (!(info->flags & DF_BIND_NOW))
14855 {
14856 htab->dt_tlsdesc_got = htab->root.sgot->size;
14857 htab->root.sgot->size += 4;
14858
14859 htab->dt_tlsdesc_plt = htab->root.splt->size;
14860 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
14861 }
14862 }
14863
14864 /* The check_relocs and adjust_dynamic_symbol entry points have
14865 determined the sizes of the various dynamic sections. Allocate
14866 memory for them. */
14867 plt = FALSE;
14868 relocs = FALSE;
14869 for (s = dynobj->sections; s != NULL; s = s->next)
14870 {
14871 const char * name;
14872
14873 if ((s->flags & SEC_LINKER_CREATED) == 0)
14874 continue;
14875
14876 /* It's OK to base decisions on the section name, because none
14877 of the dynobj section names depend upon the input files. */
14878 name = bfd_get_section_name (dynobj, s);
14879
14880 if (s == htab->root.splt)
14881 {
14882 /* Remember whether there is a PLT. */
14883 plt = s->size != 0;
14884 }
14885 else if (CONST_STRNEQ (name, ".rel"))
14886 {
14887 if (s->size != 0)
14888 {
14889 /* Remember whether there are any reloc sections other
14890 than .rel(a).plt and .rela.plt.unloaded. */
14891 if (s != htab->root.srelplt && s != htab->srelplt2)
14892 relocs = TRUE;
14893
14894 /* We use the reloc_count field as a counter if we need
14895 to copy relocs into the output file. */
14896 s->reloc_count = 0;
14897 }
14898 }
14899 else if (s != htab->root.sgot
14900 && s != htab->root.sgotplt
14901 && s != htab->root.iplt
14902 && s != htab->root.igotplt
14903 && s != htab->sdynbss)
14904 {
14905 /* It's not one of our sections, so don't allocate space. */
14906 continue;
14907 }
14908
14909 if (s->size == 0)
14910 {
14911 /* If we don't need this section, strip it from the
14912 output file. This is mostly to handle .rel(a).bss and
14913 .rel(a).plt. We must create both sections in
14914 create_dynamic_sections, because they must be created
14915 before the linker maps input sections to output
14916 sections. The linker does that before
14917 adjust_dynamic_symbol is called, and it is that
14918 function which decides whether anything needs to go
14919 into these sections. */
14920 s->flags |= SEC_EXCLUDE;
14921 continue;
14922 }
14923
14924 if ((s->flags & SEC_HAS_CONTENTS) == 0)
14925 continue;
14926
14927 /* Allocate memory for the section contents. */
14928 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
14929 if (s->contents == NULL)
14930 return FALSE;
14931 }
14932
14933 if (elf_hash_table (info)->dynamic_sections_created)
14934 {
14935 /* Add some entries to the .dynamic section. We fill in the
14936 values later, in elf32_arm_finish_dynamic_sections, but we
14937 must add the entries now so that we get the correct size for
14938 the .dynamic section. The DT_DEBUG entry is filled in by the
14939 dynamic linker and used by the debugger. */
14940 #define add_dynamic_entry(TAG, VAL) \
14941 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14942
14943 if (bfd_link_executable (info))
14944 {
14945 if (!add_dynamic_entry (DT_DEBUG, 0))
14946 return FALSE;
14947 }
14948
14949 if (plt)
14950 {
14951 if ( !add_dynamic_entry (DT_PLTGOT, 0)
14952 || !add_dynamic_entry (DT_PLTRELSZ, 0)
14953 || !add_dynamic_entry (DT_PLTREL,
14954 htab->use_rel ? DT_REL : DT_RELA)
14955 || !add_dynamic_entry (DT_JMPREL, 0))
14956 return FALSE;
14957
14958 if (htab->dt_tlsdesc_plt &&
14959 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
14960 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
14961 return FALSE;
14962 }
14963
14964 if (relocs)
14965 {
14966 if (htab->use_rel)
14967 {
14968 if (!add_dynamic_entry (DT_REL, 0)
14969 || !add_dynamic_entry (DT_RELSZ, 0)
14970 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
14971 return FALSE;
14972 }
14973 else
14974 {
14975 if (!add_dynamic_entry (DT_RELA, 0)
14976 || !add_dynamic_entry (DT_RELASZ, 0)
14977 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
14978 return FALSE;
14979 }
14980 }
14981
14982 /* If any dynamic relocs apply to a read-only section,
14983 then we need a DT_TEXTREL entry. */
14984 if ((info->flags & DF_TEXTREL) == 0)
14985 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
14986 info);
14987
14988 if ((info->flags & DF_TEXTREL) != 0)
14989 {
14990 if (!add_dynamic_entry (DT_TEXTREL, 0))
14991 return FALSE;
14992 }
14993 if (htab->vxworks_p
14994 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
14995 return FALSE;
14996 }
14997 #undef add_dynamic_entry
14998
14999 return TRUE;
15000 }
15001
15002 /* Size sections even though they're not dynamic. We use it to setup
15003 _TLS_MODULE_BASE_, if needed. */
15004
15005 static bfd_boolean
15006 elf32_arm_always_size_sections (bfd *output_bfd,
15007 struct bfd_link_info *info)
15008 {
15009 asection *tls_sec;
15010
15011 if (bfd_link_relocatable (info))
15012 return TRUE;
15013
15014 tls_sec = elf_hash_table (info)->tls_sec;
15015
15016 if (tls_sec)
15017 {
15018 struct elf_link_hash_entry *tlsbase;
15019
15020 tlsbase = elf_link_hash_lookup
15021 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15022
15023 if (tlsbase)
15024 {
15025 struct bfd_link_hash_entry *bh = NULL;
15026 const struct elf_backend_data *bed
15027 = get_elf_backend_data (output_bfd);
15028
15029 if (!(_bfd_generic_link_add_one_symbol
15030 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15031 tls_sec, 0, NULL, FALSE,
15032 bed->collect, &bh)))
15033 return FALSE;
15034
15035 tlsbase->type = STT_TLS;
15036 tlsbase = (struct elf_link_hash_entry *)bh;
15037 tlsbase->def_regular = 1;
15038 tlsbase->other = STV_HIDDEN;
15039 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15040 }
15041 }
15042 return TRUE;
15043 }
15044
15045 /* Finish up dynamic symbol handling. We set the contents of various
15046 dynamic sections here. */
15047
15048 static bfd_boolean
15049 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15050 struct bfd_link_info * info,
15051 struct elf_link_hash_entry * h,
15052 Elf_Internal_Sym * sym)
15053 {
15054 struct elf32_arm_link_hash_table *htab;
15055 struct elf32_arm_link_hash_entry *eh;
15056
15057 htab = elf32_arm_hash_table (info);
15058 if (htab == NULL)
15059 return FALSE;
15060
15061 eh = (struct elf32_arm_link_hash_entry *) h;
15062
15063 if (h->plt.offset != (bfd_vma) -1)
15064 {
15065 if (!eh->is_iplt)
15066 {
15067 BFD_ASSERT (h->dynindx != -1);
15068 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15069 h->dynindx, 0))
15070 return FALSE;
15071 }
15072
15073 if (!h->def_regular)
15074 {
15075 /* Mark the symbol as undefined, rather than as defined in
15076 the .plt section. */
15077 sym->st_shndx = SHN_UNDEF;
15078 /* If the symbol is weak we need to clear the value.
15079 Otherwise, the PLT entry would provide a definition for
15080 the symbol even if the symbol wasn't defined anywhere,
15081 and so the symbol would never be NULL. Leave the value if
15082 there were any relocations where pointer equality matters
15083 (this is a clue for the dynamic linker, to make function
15084 pointer comparisons work between an application and shared
15085 library). */
15086 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15087 sym->st_value = 0;
15088 }
15089 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15090 {
15091 /* At least one non-call relocation references this .iplt entry,
15092 so the .iplt entry is the function's canonical address. */
15093 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15094 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15095 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15096 (output_bfd, htab->root.iplt->output_section));
15097 sym->st_value = (h->plt.offset
15098 + htab->root.iplt->output_section->vma
15099 + htab->root.iplt->output_offset);
15100 }
15101 }
15102
15103 if (h->needs_copy)
15104 {
15105 asection * s;
15106 Elf_Internal_Rela rel;
15107
15108 /* This symbol needs a copy reloc. Set it up. */
15109 BFD_ASSERT (h->dynindx != -1
15110 && (h->root.type == bfd_link_hash_defined
15111 || h->root.type == bfd_link_hash_defweak));
15112
15113 s = htab->srelbss;
15114 BFD_ASSERT (s != NULL);
15115
15116 rel.r_addend = 0;
15117 rel.r_offset = (h->root.u.def.value
15118 + h->root.u.def.section->output_section->vma
15119 + h->root.u.def.section->output_offset);
15120 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15121 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15122 }
15123
15124 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15125 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15126 to the ".got" section. */
15127 if (h == htab->root.hdynamic
15128 || (!htab->vxworks_p && h == htab->root.hgot))
15129 sym->st_shndx = SHN_ABS;
15130
15131 return TRUE;
15132 }
15133
15134 static void
15135 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15136 void *contents,
15137 const unsigned long *template, unsigned count)
15138 {
15139 unsigned ix;
15140
15141 for (ix = 0; ix != count; ix++)
15142 {
15143 unsigned long insn = template[ix];
15144
15145 /* Emit mov pc,rx if bx is not permitted. */
15146 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15147 insn = (insn & 0xf000000f) | 0x01a0f000;
15148 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15149 }
15150 }
15151
15152 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15153 other variants, NaCl needs this entry in a static executable's
15154 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15155 zero. For .iplt really only the last bundle is useful, and .iplt
15156 could have a shorter first entry, with each individual PLT entry's
15157 relative branch calculated differently so it targets the last
15158 bundle instead of the instruction before it (labelled .Lplt_tail
15159 above). But it's simpler to keep the size and layout of PLT0
15160 consistent with the dynamic case, at the cost of some dead code at
15161 the start of .iplt and the one dead store to the stack at the start
15162 of .Lplt_tail. */
15163 static void
15164 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15165 asection *plt, bfd_vma got_displacement)
15166 {
15167 unsigned int i;
15168
15169 put_arm_insn (htab, output_bfd,
15170 elf32_arm_nacl_plt0_entry[0]
15171 | arm_movw_immediate (got_displacement),
15172 plt->contents + 0);
15173 put_arm_insn (htab, output_bfd,
15174 elf32_arm_nacl_plt0_entry[1]
15175 | arm_movt_immediate (got_displacement),
15176 plt->contents + 4);
15177
15178 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15179 put_arm_insn (htab, output_bfd,
15180 elf32_arm_nacl_plt0_entry[i],
15181 plt->contents + (i * 4));
15182 }
15183
15184 /* Finish up the dynamic sections. */
15185
15186 static bfd_boolean
15187 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15188 {
15189 bfd * dynobj;
15190 asection * sgot;
15191 asection * sdyn;
15192 struct elf32_arm_link_hash_table *htab;
15193
15194 htab = elf32_arm_hash_table (info);
15195 if (htab == NULL)
15196 return FALSE;
15197
15198 dynobj = elf_hash_table (info)->dynobj;
15199
15200 sgot = htab->root.sgotplt;
15201 /* A broken linker script might have discarded the dynamic sections.
15202 Catch this here so that we do not seg-fault later on. */
15203 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15204 return FALSE;
15205 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15206
15207 if (elf_hash_table (info)->dynamic_sections_created)
15208 {
15209 asection *splt;
15210 Elf32_External_Dyn *dyncon, *dynconend;
15211
15212 splt = htab->root.splt;
15213 BFD_ASSERT (splt != NULL && sdyn != NULL);
15214 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15215
15216 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15217 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15218
15219 for (; dyncon < dynconend; dyncon++)
15220 {
15221 Elf_Internal_Dyn dyn;
15222 const char * name;
15223 asection * s;
15224
15225 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15226
15227 switch (dyn.d_tag)
15228 {
15229 unsigned int type;
15230
15231 default:
15232 if (htab->vxworks_p
15233 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15234 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15235 break;
15236
15237 case DT_HASH:
15238 name = ".hash";
15239 goto get_vma_if_bpabi;
15240 case DT_STRTAB:
15241 name = ".dynstr";
15242 goto get_vma_if_bpabi;
15243 case DT_SYMTAB:
15244 name = ".dynsym";
15245 goto get_vma_if_bpabi;
15246 case DT_VERSYM:
15247 name = ".gnu.version";
15248 goto get_vma_if_bpabi;
15249 case DT_VERDEF:
15250 name = ".gnu.version_d";
15251 goto get_vma_if_bpabi;
15252 case DT_VERNEED:
15253 name = ".gnu.version_r";
15254 goto get_vma_if_bpabi;
15255
15256 case DT_PLTGOT:
15257 name = ".got";
15258 goto get_vma;
15259 case DT_JMPREL:
15260 name = RELOC_SECTION (htab, ".plt");
15261 get_vma:
15262 s = bfd_get_section_by_name (output_bfd, name);
15263 if (s == NULL)
15264 {
15265 /* PR ld/14397: Issue an error message if a required section is missing. */
15266 (*_bfd_error_handler)
15267 (_("error: required section '%s' not found in the linker script"), name);
15268 bfd_set_error (bfd_error_invalid_operation);
15269 return FALSE;
15270 }
15271 if (!htab->symbian_p)
15272 dyn.d_un.d_ptr = s->vma;
15273 else
15274 /* In the BPABI, tags in the PT_DYNAMIC section point
15275 at the file offset, not the memory address, for the
15276 convenience of the post linker. */
15277 dyn.d_un.d_ptr = s->filepos;
15278 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15279 break;
15280
15281 get_vma_if_bpabi:
15282 if (htab->symbian_p)
15283 goto get_vma;
15284 break;
15285
15286 case DT_PLTRELSZ:
15287 s = htab->root.srelplt;
15288 BFD_ASSERT (s != NULL);
15289 dyn.d_un.d_val = s->size;
15290 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15291 break;
15292
15293 case DT_RELSZ:
15294 case DT_RELASZ:
15295 if (!htab->symbian_p)
15296 {
15297 /* My reading of the SVR4 ABI indicates that the
15298 procedure linkage table relocs (DT_JMPREL) should be
15299 included in the overall relocs (DT_REL). This is
15300 what Solaris does. However, UnixWare can not handle
15301 that case. Therefore, we override the DT_RELSZ entry
15302 here to make it not include the JMPREL relocs. Since
15303 the linker script arranges for .rel(a).plt to follow all
15304 other relocation sections, we don't have to worry
15305 about changing the DT_REL entry. */
15306 s = htab->root.srelplt;
15307 if (s != NULL)
15308 dyn.d_un.d_val -= s->size;
15309 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15310 break;
15311 }
15312 /* Fall through. */
15313
15314 case DT_REL:
15315 case DT_RELA:
15316 /* In the BPABI, the DT_REL tag must point at the file
15317 offset, not the VMA, of the first relocation
15318 section. So, we use code similar to that in
15319 elflink.c, but do not check for SHF_ALLOC on the
15320 relcoation section, since relocations sections are
15321 never allocated under the BPABI. The comments above
15322 about Unixware notwithstanding, we include all of the
15323 relocations here. */
15324 if (htab->symbian_p)
15325 {
15326 unsigned int i;
15327 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15328 ? SHT_REL : SHT_RELA);
15329 dyn.d_un.d_val = 0;
15330 for (i = 1; i < elf_numsections (output_bfd); i++)
15331 {
15332 Elf_Internal_Shdr *hdr
15333 = elf_elfsections (output_bfd)[i];
15334 if (hdr->sh_type == type)
15335 {
15336 if (dyn.d_tag == DT_RELSZ
15337 || dyn.d_tag == DT_RELASZ)
15338 dyn.d_un.d_val += hdr->sh_size;
15339 else if ((ufile_ptr) hdr->sh_offset
15340 <= dyn.d_un.d_val - 1)
15341 dyn.d_un.d_val = hdr->sh_offset;
15342 }
15343 }
15344 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15345 }
15346 break;
15347
15348 case DT_TLSDESC_PLT:
15349 s = htab->root.splt;
15350 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15351 + htab->dt_tlsdesc_plt);
15352 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15353 break;
15354
15355 case DT_TLSDESC_GOT:
15356 s = htab->root.sgot;
15357 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15358 + htab->dt_tlsdesc_got);
15359 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15360 break;
15361
15362 /* Set the bottom bit of DT_INIT/FINI if the
15363 corresponding function is Thumb. */
15364 case DT_INIT:
15365 name = info->init_function;
15366 goto get_sym;
15367 case DT_FINI:
15368 name = info->fini_function;
15369 get_sym:
15370 /* If it wasn't set by elf_bfd_final_link
15371 then there is nothing to adjust. */
15372 if (dyn.d_un.d_val != 0)
15373 {
15374 struct elf_link_hash_entry * eh;
15375
15376 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15377 FALSE, FALSE, TRUE);
15378 if (eh != NULL
15379 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15380 == ST_BRANCH_TO_THUMB)
15381 {
15382 dyn.d_un.d_val |= 1;
15383 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15384 }
15385 }
15386 break;
15387 }
15388 }
15389
15390 /* Fill in the first entry in the procedure linkage table. */
15391 if (splt->size > 0 && htab->plt_header_size)
15392 {
15393 const bfd_vma *plt0_entry;
15394 bfd_vma got_address, plt_address, got_displacement;
15395
15396 /* Calculate the addresses of the GOT and PLT. */
15397 got_address = sgot->output_section->vma + sgot->output_offset;
15398 plt_address = splt->output_section->vma + splt->output_offset;
15399
15400 if (htab->vxworks_p)
15401 {
15402 /* The VxWorks GOT is relocated by the dynamic linker.
15403 Therefore, we must emit relocations rather than simply
15404 computing the values now. */
15405 Elf_Internal_Rela rel;
15406
15407 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15408 put_arm_insn (htab, output_bfd, plt0_entry[0],
15409 splt->contents + 0);
15410 put_arm_insn (htab, output_bfd, plt0_entry[1],
15411 splt->contents + 4);
15412 put_arm_insn (htab, output_bfd, plt0_entry[2],
15413 splt->contents + 8);
15414 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15415
15416 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15417 rel.r_offset = plt_address + 12;
15418 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15419 rel.r_addend = 0;
15420 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15421 htab->srelplt2->contents);
15422 }
15423 else if (htab->nacl_p)
15424 arm_nacl_put_plt0 (htab, output_bfd, splt,
15425 got_address + 8 - (plt_address + 16));
15426 else if (using_thumb_only (htab))
15427 {
15428 got_displacement = got_address - (plt_address + 12);
15429
15430 plt0_entry = elf32_thumb2_plt0_entry;
15431 put_arm_insn (htab, output_bfd, plt0_entry[0],
15432 splt->contents + 0);
15433 put_arm_insn (htab, output_bfd, plt0_entry[1],
15434 splt->contents + 4);
15435 put_arm_insn (htab, output_bfd, plt0_entry[2],
15436 splt->contents + 8);
15437
15438 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15439 }
15440 else
15441 {
15442 got_displacement = got_address - (plt_address + 16);
15443
15444 plt0_entry = elf32_arm_plt0_entry;
15445 put_arm_insn (htab, output_bfd, plt0_entry[0],
15446 splt->contents + 0);
15447 put_arm_insn (htab, output_bfd, plt0_entry[1],
15448 splt->contents + 4);
15449 put_arm_insn (htab, output_bfd, plt0_entry[2],
15450 splt->contents + 8);
15451 put_arm_insn (htab, output_bfd, plt0_entry[3],
15452 splt->contents + 12);
15453
15454 #ifdef FOUR_WORD_PLT
15455 /* The displacement value goes in the otherwise-unused
15456 last word of the second entry. */
15457 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15458 #else
15459 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15460 #endif
15461 }
15462 }
15463
15464 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15465 really seem like the right value. */
15466 if (splt->output_section->owner == output_bfd)
15467 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15468
15469 if (htab->dt_tlsdesc_plt)
15470 {
15471 bfd_vma got_address
15472 = sgot->output_section->vma + sgot->output_offset;
15473 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15474 + htab->root.sgot->output_offset);
15475 bfd_vma plt_address
15476 = splt->output_section->vma + splt->output_offset;
15477
15478 arm_put_trampoline (htab, output_bfd,
15479 splt->contents + htab->dt_tlsdesc_plt,
15480 dl_tlsdesc_lazy_trampoline, 6);
15481
15482 bfd_put_32 (output_bfd,
15483 gotplt_address + htab->dt_tlsdesc_got
15484 - (plt_address + htab->dt_tlsdesc_plt)
15485 - dl_tlsdesc_lazy_trampoline[6],
15486 splt->contents + htab->dt_tlsdesc_plt + 24);
15487 bfd_put_32 (output_bfd,
15488 got_address - (plt_address + htab->dt_tlsdesc_plt)
15489 - dl_tlsdesc_lazy_trampoline[7],
15490 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15491 }
15492
15493 if (htab->tls_trampoline)
15494 {
15495 arm_put_trampoline (htab, output_bfd,
15496 splt->contents + htab->tls_trampoline,
15497 tls_trampoline, 3);
15498 #ifdef FOUR_WORD_PLT
15499 bfd_put_32 (output_bfd, 0x00000000,
15500 splt->contents + htab->tls_trampoline + 12);
15501 #endif
15502 }
15503
15504 if (htab->vxworks_p
15505 && !bfd_link_pic (info)
15506 && htab->root.splt->size > 0)
15507 {
15508 /* Correct the .rel(a).plt.unloaded relocations. They will have
15509 incorrect symbol indexes. */
15510 int num_plts;
15511 unsigned char *p;
15512
15513 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15514 / htab->plt_entry_size);
15515 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15516
15517 for (; num_plts; num_plts--)
15518 {
15519 Elf_Internal_Rela rel;
15520
15521 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15522 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15523 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15524 p += RELOC_SIZE (htab);
15525
15526 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15527 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15528 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15529 p += RELOC_SIZE (htab);
15530 }
15531 }
15532 }
15533
15534 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15535 /* NaCl uses a special first entry in .iplt too. */
15536 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15537
15538 /* Fill in the first three entries in the global offset table. */
15539 if (sgot)
15540 {
15541 if (sgot->size > 0)
15542 {
15543 if (sdyn == NULL)
15544 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15545 else
15546 bfd_put_32 (output_bfd,
15547 sdyn->output_section->vma + sdyn->output_offset,
15548 sgot->contents);
15549 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15550 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15551 }
15552
15553 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15554 }
15555
15556 return TRUE;
15557 }
15558
15559 static void
15560 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15561 {
15562 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15563 struct elf32_arm_link_hash_table *globals;
15564 struct elf_segment_map *m;
15565
15566 i_ehdrp = elf_elfheader (abfd);
15567
15568 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15569 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15570 else
15571 _bfd_elf_post_process_headers (abfd, link_info);
15572 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15573
15574 if (link_info)
15575 {
15576 globals = elf32_arm_hash_table (link_info);
15577 if (globals != NULL && globals->byteswap_code)
15578 i_ehdrp->e_flags |= EF_ARM_BE8;
15579 }
15580
15581 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15582 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15583 {
15584 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15585 if (abi == AEABI_VFP_args_vfp)
15586 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15587 else
15588 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15589 }
15590
15591 /* Scan segment to set p_flags attribute if it contains only sections with
15592 SHF_ARM_NOREAD flag. */
15593 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15594 {
15595 unsigned int j;
15596
15597 if (m->count == 0)
15598 continue;
15599 for (j = 0; j < m->count; j++)
15600 {
15601 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD))
15602 break;
15603 }
15604 if (j == m->count)
15605 {
15606 m->p_flags = PF_X;
15607 m->p_flags_valid = 1;
15608 }
15609 }
15610 }
15611
15612 static enum elf_reloc_type_class
15613 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15614 const asection *rel_sec ATTRIBUTE_UNUSED,
15615 const Elf_Internal_Rela *rela)
15616 {
15617 switch ((int) ELF32_R_TYPE (rela->r_info))
15618 {
15619 case R_ARM_RELATIVE:
15620 return reloc_class_relative;
15621 case R_ARM_JUMP_SLOT:
15622 return reloc_class_plt;
15623 case R_ARM_COPY:
15624 return reloc_class_copy;
15625 case R_ARM_IRELATIVE:
15626 return reloc_class_ifunc;
15627 default:
15628 return reloc_class_normal;
15629 }
15630 }
15631
15632 static void
15633 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15634 {
15635 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15636 }
15637
15638 /* Return TRUE if this is an unwinding table entry. */
15639
15640 static bfd_boolean
15641 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15642 {
15643 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15644 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15645 }
15646
15647
15648 /* Set the type and flags for an ARM section. We do this by
15649 the section name, which is a hack, but ought to work. */
15650
15651 static bfd_boolean
15652 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15653 {
15654 const char * name;
15655
15656 name = bfd_get_section_name (abfd, sec);
15657
15658 if (is_arm_elf_unwind_section_name (abfd, name))
15659 {
15660 hdr->sh_type = SHT_ARM_EXIDX;
15661 hdr->sh_flags |= SHF_LINK_ORDER;
15662 }
15663
15664 if (sec->flags & SEC_ELF_NOREAD)
15665 hdr->sh_flags |= SHF_ARM_NOREAD;
15666
15667 return TRUE;
15668 }
15669
15670 /* Handle an ARM specific section when reading an object file. This is
15671 called when bfd_section_from_shdr finds a section with an unknown
15672 type. */
15673
15674 static bfd_boolean
15675 elf32_arm_section_from_shdr (bfd *abfd,
15676 Elf_Internal_Shdr * hdr,
15677 const char *name,
15678 int shindex)
15679 {
15680 /* There ought to be a place to keep ELF backend specific flags, but
15681 at the moment there isn't one. We just keep track of the
15682 sections by their name, instead. Fortunately, the ABI gives
15683 names for all the ARM specific sections, so we will probably get
15684 away with this. */
15685 switch (hdr->sh_type)
15686 {
15687 case SHT_ARM_EXIDX:
15688 case SHT_ARM_PREEMPTMAP:
15689 case SHT_ARM_ATTRIBUTES:
15690 break;
15691
15692 default:
15693 return FALSE;
15694 }
15695
15696 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15697 return FALSE;
15698
15699 return TRUE;
15700 }
15701
15702 static _arm_elf_section_data *
15703 get_arm_elf_section_data (asection * sec)
15704 {
15705 if (sec && sec->owner && is_arm_elf (sec->owner))
15706 return elf32_arm_section_data (sec);
15707 else
15708 return NULL;
15709 }
15710
15711 typedef struct
15712 {
15713 void *flaginfo;
15714 struct bfd_link_info *info;
15715 asection *sec;
15716 int sec_shndx;
15717 int (*func) (void *, const char *, Elf_Internal_Sym *,
15718 asection *, struct elf_link_hash_entry *);
15719 } output_arch_syminfo;
15720
15721 enum map_symbol_type
15722 {
15723 ARM_MAP_ARM,
15724 ARM_MAP_THUMB,
15725 ARM_MAP_DATA
15726 };
15727
15728
15729 /* Output a single mapping symbol. */
15730
15731 static bfd_boolean
15732 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15733 enum map_symbol_type type,
15734 bfd_vma offset)
15735 {
15736 static const char *names[3] = {"$a", "$t", "$d"};
15737 Elf_Internal_Sym sym;
15738
15739 sym.st_value = osi->sec->output_section->vma
15740 + osi->sec->output_offset
15741 + offset;
15742 sym.st_size = 0;
15743 sym.st_other = 0;
15744 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15745 sym.st_shndx = osi->sec_shndx;
15746 sym.st_target_internal = 0;
15747 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15748 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15749 }
15750
15751 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15752 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15753
15754 static bfd_boolean
15755 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15756 bfd_boolean is_iplt_entry_p,
15757 union gotplt_union *root_plt,
15758 struct arm_plt_info *arm_plt)
15759 {
15760 struct elf32_arm_link_hash_table *htab;
15761 bfd_vma addr, plt_header_size;
15762
15763 if (root_plt->offset == (bfd_vma) -1)
15764 return TRUE;
15765
15766 htab = elf32_arm_hash_table (osi->info);
15767 if (htab == NULL)
15768 return FALSE;
15769
15770 if (is_iplt_entry_p)
15771 {
15772 osi->sec = htab->root.iplt;
15773 plt_header_size = 0;
15774 }
15775 else
15776 {
15777 osi->sec = htab->root.splt;
15778 plt_header_size = htab->plt_header_size;
15779 }
15780 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15781 (osi->info->output_bfd, osi->sec->output_section));
15782
15783 addr = root_plt->offset & -2;
15784 if (htab->symbian_p)
15785 {
15786 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15787 return FALSE;
15788 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
15789 return FALSE;
15790 }
15791 else if (htab->vxworks_p)
15792 {
15793 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15794 return FALSE;
15795 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
15796 return FALSE;
15797 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
15798 return FALSE;
15799 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
15800 return FALSE;
15801 }
15802 else if (htab->nacl_p)
15803 {
15804 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15805 return FALSE;
15806 }
15807 else if (using_thumb_only (htab))
15808 {
15809 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
15810 return FALSE;
15811 }
15812 else
15813 {
15814 bfd_boolean thumb_stub_p;
15815
15816 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
15817 if (thumb_stub_p)
15818 {
15819 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
15820 return FALSE;
15821 }
15822 #ifdef FOUR_WORD_PLT
15823 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15824 return FALSE;
15825 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
15826 return FALSE;
15827 #else
15828 /* A three-word PLT with no Thumb thunk contains only Arm code,
15829 so only need to output a mapping symbol for the first PLT entry and
15830 entries with thumb thunks. */
15831 if (thumb_stub_p || addr == plt_header_size)
15832 {
15833 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15834 return FALSE;
15835 }
15836 #endif
15837 }
15838
15839 return TRUE;
15840 }
15841
15842 /* Output mapping symbols for PLT entries associated with H. */
15843
15844 static bfd_boolean
15845 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
15846 {
15847 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
15848 struct elf32_arm_link_hash_entry *eh;
15849
15850 if (h->root.type == bfd_link_hash_indirect)
15851 return TRUE;
15852
15853 if (h->root.type == bfd_link_hash_warning)
15854 /* When warning symbols are created, they **replace** the "real"
15855 entry in the hash table, thus we never get to see the real
15856 symbol in a hash traversal. So look at it now. */
15857 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15858
15859 eh = (struct elf32_arm_link_hash_entry *) h;
15860 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
15861 &h->plt, &eh->plt);
15862 }
15863
15864 /* Output a single local symbol for a generated stub. */
15865
15866 static bfd_boolean
15867 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
15868 bfd_vma offset, bfd_vma size)
15869 {
15870 Elf_Internal_Sym sym;
15871
15872 sym.st_value = osi->sec->output_section->vma
15873 + osi->sec->output_offset
15874 + offset;
15875 sym.st_size = size;
15876 sym.st_other = 0;
15877 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
15878 sym.st_shndx = osi->sec_shndx;
15879 sym.st_target_internal = 0;
15880 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
15881 }
15882
15883 static bfd_boolean
15884 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
15885 void * in_arg)
15886 {
15887 struct elf32_arm_stub_hash_entry *stub_entry;
15888 asection *stub_sec;
15889 bfd_vma addr;
15890 char *stub_name;
15891 output_arch_syminfo *osi;
15892 const insn_sequence *template_sequence;
15893 enum stub_insn_type prev_type;
15894 int size;
15895 int i;
15896 enum map_symbol_type sym_type;
15897
15898 /* Massage our args to the form they really have. */
15899 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15900 osi = (output_arch_syminfo *) in_arg;
15901
15902 stub_sec = stub_entry->stub_sec;
15903
15904 /* Ensure this stub is attached to the current section being
15905 processed. */
15906 if (stub_sec != osi->sec)
15907 return TRUE;
15908
15909 addr = (bfd_vma) stub_entry->stub_offset;
15910 stub_name = stub_entry->output_name;
15911
15912 template_sequence = stub_entry->stub_template;
15913 switch (template_sequence[0].type)
15914 {
15915 case ARM_TYPE:
15916 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
15917 return FALSE;
15918 break;
15919 case THUMB16_TYPE:
15920 case THUMB32_TYPE:
15921 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
15922 stub_entry->stub_size))
15923 return FALSE;
15924 break;
15925 default:
15926 BFD_FAIL ();
15927 return 0;
15928 }
15929
15930 prev_type = DATA_TYPE;
15931 size = 0;
15932 for (i = 0; i < stub_entry->stub_template_size; i++)
15933 {
15934 switch (template_sequence[i].type)
15935 {
15936 case ARM_TYPE:
15937 sym_type = ARM_MAP_ARM;
15938 break;
15939
15940 case THUMB16_TYPE:
15941 case THUMB32_TYPE:
15942 sym_type = ARM_MAP_THUMB;
15943 break;
15944
15945 case DATA_TYPE:
15946 sym_type = ARM_MAP_DATA;
15947 break;
15948
15949 default:
15950 BFD_FAIL ();
15951 return FALSE;
15952 }
15953
15954 if (template_sequence[i].type != prev_type)
15955 {
15956 prev_type = template_sequence[i].type;
15957 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
15958 return FALSE;
15959 }
15960
15961 switch (template_sequence[i].type)
15962 {
15963 case ARM_TYPE:
15964 case THUMB32_TYPE:
15965 size += 4;
15966 break;
15967
15968 case THUMB16_TYPE:
15969 size += 2;
15970 break;
15971
15972 case DATA_TYPE:
15973 size += 4;
15974 break;
15975
15976 default:
15977 BFD_FAIL ();
15978 return FALSE;
15979 }
15980 }
15981
15982 return TRUE;
15983 }
15984
15985 /* Output mapping symbols for linker generated sections,
15986 and for those data-only sections that do not have a
15987 $d. */
15988
15989 static bfd_boolean
15990 elf32_arm_output_arch_local_syms (bfd *output_bfd,
15991 struct bfd_link_info *info,
15992 void *flaginfo,
15993 int (*func) (void *, const char *,
15994 Elf_Internal_Sym *,
15995 asection *,
15996 struct elf_link_hash_entry *))
15997 {
15998 output_arch_syminfo osi;
15999 struct elf32_arm_link_hash_table *htab;
16000 bfd_vma offset;
16001 bfd_size_type size;
16002 bfd *input_bfd;
16003
16004 htab = elf32_arm_hash_table (info);
16005 if (htab == NULL)
16006 return FALSE;
16007
16008 check_use_blx (htab);
16009
16010 osi.flaginfo = flaginfo;
16011 osi.info = info;
16012 osi.func = func;
16013
16014 /* Add a $d mapping symbol to data-only sections that
16015 don't have any mapping symbol. This may result in (harmless) redundant
16016 mapping symbols. */
16017 for (input_bfd = info->input_bfds;
16018 input_bfd != NULL;
16019 input_bfd = input_bfd->link.next)
16020 {
16021 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16022 for (osi.sec = input_bfd->sections;
16023 osi.sec != NULL;
16024 osi.sec = osi.sec->next)
16025 {
16026 if (osi.sec->output_section != NULL
16027 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16028 != 0)
16029 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16030 == SEC_HAS_CONTENTS
16031 && get_arm_elf_section_data (osi.sec) != NULL
16032 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16033 && osi.sec->size > 0
16034 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16035 {
16036 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16037 (output_bfd, osi.sec->output_section);
16038 if (osi.sec_shndx != (int)SHN_BAD)
16039 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16040 }
16041 }
16042 }
16043
16044 /* ARM->Thumb glue. */
16045 if (htab->arm_glue_size > 0)
16046 {
16047 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16048 ARM2THUMB_GLUE_SECTION_NAME);
16049
16050 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16051 (output_bfd, osi.sec->output_section);
16052 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16053 || htab->pic_veneer)
16054 size = ARM2THUMB_PIC_GLUE_SIZE;
16055 else if (htab->use_blx)
16056 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16057 else
16058 size = ARM2THUMB_STATIC_GLUE_SIZE;
16059
16060 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16061 {
16062 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16063 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16064 }
16065 }
16066
16067 /* Thumb->ARM glue. */
16068 if (htab->thumb_glue_size > 0)
16069 {
16070 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16071 THUMB2ARM_GLUE_SECTION_NAME);
16072
16073 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16074 (output_bfd, osi.sec->output_section);
16075 size = THUMB2ARM_GLUE_SIZE;
16076
16077 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16078 {
16079 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16080 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16081 }
16082 }
16083
16084 /* ARMv4 BX veneers. */
16085 if (htab->bx_glue_size > 0)
16086 {
16087 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16088 ARM_BX_GLUE_SECTION_NAME);
16089
16090 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16091 (output_bfd, osi.sec->output_section);
16092
16093 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16094 }
16095
16096 /* Long calls stubs. */
16097 if (htab->stub_bfd && htab->stub_bfd->sections)
16098 {
16099 asection* stub_sec;
16100
16101 for (stub_sec = htab->stub_bfd->sections;
16102 stub_sec != NULL;
16103 stub_sec = stub_sec->next)
16104 {
16105 /* Ignore non-stub sections. */
16106 if (!strstr (stub_sec->name, STUB_SUFFIX))
16107 continue;
16108
16109 osi.sec = stub_sec;
16110
16111 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16112 (output_bfd, osi.sec->output_section);
16113
16114 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16115 }
16116 }
16117
16118 /* Finally, output mapping symbols for the PLT. */
16119 if (htab->root.splt && htab->root.splt->size > 0)
16120 {
16121 osi.sec = htab->root.splt;
16122 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16123 (output_bfd, osi.sec->output_section));
16124
16125 /* Output mapping symbols for the plt header. SymbianOS does not have a
16126 plt header. */
16127 if (htab->vxworks_p)
16128 {
16129 /* VxWorks shared libraries have no PLT header. */
16130 if (!bfd_link_pic (info))
16131 {
16132 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16133 return FALSE;
16134 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16135 return FALSE;
16136 }
16137 }
16138 else if (htab->nacl_p)
16139 {
16140 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16141 return FALSE;
16142 }
16143 else if (using_thumb_only (htab))
16144 {
16145 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16146 return FALSE;
16147 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16148 return FALSE;
16149 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16150 return FALSE;
16151 }
16152 else if (!htab->symbian_p)
16153 {
16154 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16155 return FALSE;
16156 #ifndef FOUR_WORD_PLT
16157 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16158 return FALSE;
16159 #endif
16160 }
16161 }
16162 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16163 {
16164 /* NaCl uses a special first entry in .iplt too. */
16165 osi.sec = htab->root.iplt;
16166 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16167 (output_bfd, osi.sec->output_section));
16168 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16169 return FALSE;
16170 }
16171 if ((htab->root.splt && htab->root.splt->size > 0)
16172 || (htab->root.iplt && htab->root.iplt->size > 0))
16173 {
16174 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16175 for (input_bfd = info->input_bfds;
16176 input_bfd != NULL;
16177 input_bfd = input_bfd->link.next)
16178 {
16179 struct arm_local_iplt_info **local_iplt;
16180 unsigned int i, num_syms;
16181
16182 local_iplt = elf32_arm_local_iplt (input_bfd);
16183 if (local_iplt != NULL)
16184 {
16185 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16186 for (i = 0; i < num_syms; i++)
16187 if (local_iplt[i] != NULL
16188 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16189 &local_iplt[i]->root,
16190 &local_iplt[i]->arm))
16191 return FALSE;
16192 }
16193 }
16194 }
16195 if (htab->dt_tlsdesc_plt != 0)
16196 {
16197 /* Mapping symbols for the lazy tls trampoline. */
16198 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16199 return FALSE;
16200
16201 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16202 htab->dt_tlsdesc_plt + 24))
16203 return FALSE;
16204 }
16205 if (htab->tls_trampoline != 0)
16206 {
16207 /* Mapping symbols for the tls trampoline. */
16208 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16209 return FALSE;
16210 #ifdef FOUR_WORD_PLT
16211 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16212 htab->tls_trampoline + 12))
16213 return FALSE;
16214 #endif
16215 }
16216
16217 return TRUE;
16218 }
16219
16220 /* Allocate target specific section data. */
16221
16222 static bfd_boolean
16223 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16224 {
16225 if (!sec->used_by_bfd)
16226 {
16227 _arm_elf_section_data *sdata;
16228 bfd_size_type amt = sizeof (*sdata);
16229
16230 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16231 if (sdata == NULL)
16232 return FALSE;
16233 sec->used_by_bfd = sdata;
16234 }
16235
16236 return _bfd_elf_new_section_hook (abfd, sec);
16237 }
16238
16239
16240 /* Used to order a list of mapping symbols by address. */
16241
16242 static int
16243 elf32_arm_compare_mapping (const void * a, const void * b)
16244 {
16245 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16246 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16247
16248 if (amap->vma > bmap->vma)
16249 return 1;
16250 else if (amap->vma < bmap->vma)
16251 return -1;
16252 else if (amap->type > bmap->type)
16253 /* Ensure results do not depend on the host qsort for objects with
16254 multiple mapping symbols at the same address by sorting on type
16255 after vma. */
16256 return 1;
16257 else if (amap->type < bmap->type)
16258 return -1;
16259 else
16260 return 0;
16261 }
16262
16263 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16264
16265 static unsigned long
16266 offset_prel31 (unsigned long addr, bfd_vma offset)
16267 {
16268 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16269 }
16270
16271 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16272 relocations. */
16273
16274 static void
16275 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16276 {
16277 unsigned long first_word = bfd_get_32 (output_bfd, from);
16278 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16279
16280 /* High bit of first word is supposed to be zero. */
16281 if ((first_word & 0x80000000ul) == 0)
16282 first_word = offset_prel31 (first_word, offset);
16283
16284 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16285 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16286 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16287 second_word = offset_prel31 (second_word, offset);
16288
16289 bfd_put_32 (output_bfd, first_word, to);
16290 bfd_put_32 (output_bfd, second_word, to + 4);
16291 }
16292
16293 /* Data for make_branch_to_a8_stub(). */
16294
16295 struct a8_branch_to_stub_data
16296 {
16297 asection *writing_section;
16298 bfd_byte *contents;
16299 };
16300
16301
16302 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16303 places for a particular section. */
16304
16305 static bfd_boolean
16306 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16307 void *in_arg)
16308 {
16309 struct elf32_arm_stub_hash_entry *stub_entry;
16310 struct a8_branch_to_stub_data *data;
16311 bfd_byte *contents;
16312 unsigned long branch_insn;
16313 bfd_vma veneered_insn_loc, veneer_entry_loc;
16314 bfd_signed_vma branch_offset;
16315 bfd *abfd;
16316 unsigned int loc;
16317
16318 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16319 data = (struct a8_branch_to_stub_data *) in_arg;
16320
16321 if (stub_entry->target_section != data->writing_section
16322 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16323 return TRUE;
16324
16325 contents = data->contents;
16326
16327 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16328 generated when both source and target are in the same section. */
16329 veneered_insn_loc = stub_entry->target_section->output_section->vma
16330 + stub_entry->target_section->output_offset
16331 + stub_entry->source_value;
16332
16333 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16334 + stub_entry->stub_sec->output_offset
16335 + stub_entry->stub_offset;
16336
16337 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16338 veneered_insn_loc &= ~3u;
16339
16340 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16341
16342 abfd = stub_entry->target_section->owner;
16343 loc = stub_entry->source_value;
16344
16345 /* We attempt to avoid this condition by setting stubs_always_after_branch
16346 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16347 This check is just to be on the safe side... */
16348 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16349 {
16350 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16351 "allocated in unsafe location"), abfd);
16352 return FALSE;
16353 }
16354
16355 switch (stub_entry->stub_type)
16356 {
16357 case arm_stub_a8_veneer_b:
16358 case arm_stub_a8_veneer_b_cond:
16359 branch_insn = 0xf0009000;
16360 goto jump24;
16361
16362 case arm_stub_a8_veneer_blx:
16363 branch_insn = 0xf000e800;
16364 goto jump24;
16365
16366 case arm_stub_a8_veneer_bl:
16367 {
16368 unsigned int i1, j1, i2, j2, s;
16369
16370 branch_insn = 0xf000d000;
16371
16372 jump24:
16373 if (branch_offset < -16777216 || branch_offset > 16777214)
16374 {
16375 /* There's not much we can do apart from complain if this
16376 happens. */
16377 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16378 "of range (input file too large)"), abfd);
16379 return FALSE;
16380 }
16381
16382 /* i1 = not(j1 eor s), so:
16383 not i1 = j1 eor s
16384 j1 = (not i1) eor s. */
16385
16386 branch_insn |= (branch_offset >> 1) & 0x7ff;
16387 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16388 i2 = (branch_offset >> 22) & 1;
16389 i1 = (branch_offset >> 23) & 1;
16390 s = (branch_offset >> 24) & 1;
16391 j1 = (!i1) ^ s;
16392 j2 = (!i2) ^ s;
16393 branch_insn |= j2 << 11;
16394 branch_insn |= j1 << 13;
16395 branch_insn |= s << 26;
16396 }
16397 break;
16398
16399 default:
16400 BFD_FAIL ();
16401 return FALSE;
16402 }
16403
16404 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16405 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16406
16407 return TRUE;
16408 }
16409
16410 /* Beginning of stm32l4xx work-around. */
16411
16412 /* Functions encoding instructions necessary for the emission of the
16413 fix-stm32l4xx-629360.
16414 Encoding is extracted from the
16415 ARM (C) Architecture Reference Manual
16416 ARMv7-A and ARMv7-R edition
16417 ARM DDI 0406C.b (ID072512). */
16418
16419 static inline bfd_vma
16420 create_instruction_branch_absolute (int branch_offset)
16421 {
16422 /* A8.8.18 B (A8-334)
16423 B target_address (Encoding T4). */
16424 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16425 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16426 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16427
16428 int s = ((branch_offset & 0x1000000) >> 24);
16429 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16430 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16431
16432 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16433 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16434
16435 bfd_vma patched_inst = 0xf0009000
16436 | s << 26 /* S. */
16437 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16438 | j1 << 13 /* J1. */
16439 | j2 << 11 /* J2. */
16440 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16441
16442 return patched_inst;
16443 }
16444
16445 static inline bfd_vma
16446 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16447 {
16448 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16449 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16450 bfd_vma patched_inst = 0xe8900000
16451 | (/*W=*/wback << 21)
16452 | (base_reg << 16)
16453 | (reg_mask & 0x0000ffff);
16454
16455 return patched_inst;
16456 }
16457
16458 static inline bfd_vma
16459 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16460 {
16461 /* A8.8.60 LDMDB/LDMEA (A8-402)
16462 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16463 bfd_vma patched_inst = 0xe9100000
16464 | (/*W=*/wback << 21)
16465 | (base_reg << 16)
16466 | (reg_mask & 0x0000ffff);
16467
16468 return patched_inst;
16469 }
16470
16471 static inline bfd_vma
16472 create_instruction_mov (int target_reg, int source_reg)
16473 {
16474 /* A8.8.103 MOV (register) (A8-486)
16475 MOV Rd, Rm (Encoding T1). */
16476 bfd_vma patched_inst = 0x4600
16477 | (target_reg & 0x7)
16478 | ((target_reg & 0x8) >> 3) << 7
16479 | (source_reg << 3);
16480
16481 return patched_inst;
16482 }
16483
16484 static inline bfd_vma
16485 create_instruction_sub (int target_reg, int source_reg, int value)
16486 {
16487 /* A8.8.221 SUB (immediate) (A8-708)
16488 SUB Rd, Rn, #value (Encoding T3). */
16489 bfd_vma patched_inst = 0xf1a00000
16490 | (target_reg << 8)
16491 | (source_reg << 16)
16492 | (/*S=*/0 << 20)
16493 | ((value & 0x800) >> 11) << 26
16494 | ((value & 0x700) >> 8) << 12
16495 | (value & 0x0ff);
16496
16497 return patched_inst;
16498 }
16499
16500 static inline bfd_vma
16501 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16502 int first_reg)
16503 {
16504 /* A8.8.332 VLDM (A8-922)
16505 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16506 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16507 | (/*W=*/wback << 21)
16508 | (base_reg << 16)
16509 | (num_words & 0x000000ff)
16510 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16511 | (first_reg & 0x00000001) << 22;
16512
16513 return patched_inst;
16514 }
16515
16516 static inline bfd_vma
16517 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16518 int first_reg)
16519 {
16520 /* A8.8.332 VLDM (A8-922)
16521 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16522 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16523 | (base_reg << 16)
16524 | (num_words & 0x000000ff)
16525 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16526 | (first_reg & 0x00000001) << 22;
16527
16528 return patched_inst;
16529 }
16530
16531 static inline bfd_vma
16532 create_instruction_udf_w (int value)
16533 {
16534 /* A8.8.247 UDF (A8-758)
16535 Undefined (Encoding T2). */
16536 bfd_vma patched_inst = 0xf7f0a000
16537 | (value & 0x00000fff)
16538 | (value & 0x000f0000) << 16;
16539
16540 return patched_inst;
16541 }
16542
16543 static inline bfd_vma
16544 create_instruction_udf (int value)
16545 {
16546 /* A8.8.247 UDF (A8-758)
16547 Undefined (Encoding T1). */
16548 bfd_vma patched_inst = 0xde00
16549 | (value & 0xff);
16550
16551 return patched_inst;
16552 }
16553
16554 /* Functions writing an instruction in memory, returning the next
16555 memory position to write to. */
16556
16557 static inline bfd_byte *
16558 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16559 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16560 {
16561 put_thumb2_insn (htab, output_bfd, insn, pt);
16562 return pt + 4;
16563 }
16564
16565 static inline bfd_byte *
16566 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16567 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16568 {
16569 put_thumb_insn (htab, output_bfd, insn, pt);
16570 return pt + 2;
16571 }
16572
16573 /* Function filling up a region in memory with T1 and T2 UDFs taking
16574 care of alignment. */
16575
16576 static bfd_byte *
16577 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16578 bfd * output_bfd,
16579 const bfd_byte * const base_stub_contents,
16580 bfd_byte * const from_stub_contents,
16581 const bfd_byte * const end_stub_contents)
16582 {
16583 bfd_byte *current_stub_contents = from_stub_contents;
16584
16585 /* Fill the remaining of the stub with deterministic contents : UDF
16586 instructions.
16587 Check if realignment is needed on modulo 4 frontier using T1, to
16588 further use T2. */
16589 if ((current_stub_contents < end_stub_contents)
16590 && !((current_stub_contents - base_stub_contents) % 2)
16591 && ((current_stub_contents - base_stub_contents) % 4))
16592 current_stub_contents =
16593 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16594 create_instruction_udf (0));
16595
16596 for (; current_stub_contents < end_stub_contents;)
16597 current_stub_contents =
16598 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16599 create_instruction_udf_w (0));
16600
16601 return current_stub_contents;
16602 }
16603
16604 /* Functions writing the stream of instructions equivalent to the
16605 derived sequence for ldmia, ldmdb, vldm respectively. */
16606
16607 static void
16608 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16609 bfd * output_bfd,
16610 const insn32 initial_insn,
16611 const bfd_byte *const initial_insn_addr,
16612 bfd_byte *const base_stub_contents)
16613 {
16614 int wback = (initial_insn & 0x00200000) >> 21;
16615 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16616 int insn_all_registers = initial_insn & 0x0000ffff;
16617 int insn_low_registers, insn_high_registers;
16618 int usable_register_mask;
16619 int nb_registers = popcount (insn_all_registers);
16620 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16621 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16622 bfd_byte *current_stub_contents = base_stub_contents;
16623
16624 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16625
16626 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16627 smaller than 8 registers load sequences that do not cause the
16628 hardware issue. */
16629 if (nb_registers <= 8)
16630 {
16631 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16632 current_stub_contents =
16633 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16634 initial_insn);
16635
16636 /* B initial_insn_addr+4. */
16637 if (!restore_pc)
16638 current_stub_contents =
16639 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16640 create_instruction_branch_absolute
16641 (initial_insn_addr - current_stub_contents));
16642
16643
16644 /* Fill the remaining of the stub with deterministic contents. */
16645 current_stub_contents =
16646 stm32l4xx_fill_stub_udf (htab, output_bfd,
16647 base_stub_contents, current_stub_contents,
16648 base_stub_contents +
16649 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16650
16651 return;
16652 }
16653
16654 /* - reg_list[13] == 0. */
16655 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16656
16657 /* - reg_list[14] & reg_list[15] != 1. */
16658 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16659
16660 /* - if (wback==1) reg_list[rn] == 0. */
16661 BFD_ASSERT (!wback || !restore_rn);
16662
16663 /* - nb_registers > 8. */
16664 BFD_ASSERT (popcount (insn_all_registers) > 8);
16665
16666 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16667
16668 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16669 - One with the 7 lowest registers (register mask 0x007F)
16670 This LDM will finally contain between 2 and 7 registers
16671 - One with the 7 highest registers (register mask 0xDF80)
16672 This ldm will finally contain between 2 and 7 registers. */
16673 insn_low_registers = insn_all_registers & 0x007F;
16674 insn_high_registers = insn_all_registers & 0xDF80;
16675
16676 /* A spare register may be needed during this veneer to temporarily
16677 handle the base register. This register will be restored with the
16678 last LDM operation.
16679 The usable register may be any general purpose register (that
16680 excludes PC, SP, LR : register mask is 0x1FFF). */
16681 usable_register_mask = 0x1FFF;
16682
16683 /* Generate the stub function. */
16684 if (wback)
16685 {
16686 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16687 current_stub_contents =
16688 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16689 create_instruction_ldmia
16690 (rn, /*wback=*/1, insn_low_registers));
16691
16692 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16693 current_stub_contents =
16694 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16695 create_instruction_ldmia
16696 (rn, /*wback=*/1, insn_high_registers));
16697 if (!restore_pc)
16698 {
16699 /* B initial_insn_addr+4. */
16700 current_stub_contents =
16701 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16702 create_instruction_branch_absolute
16703 (initial_insn_addr - current_stub_contents));
16704 }
16705 }
16706 else /* if (!wback). */
16707 {
16708 ri = rn;
16709
16710 /* If Rn is not part of the high-register-list, move it there. */
16711 if (!(insn_high_registers & (1 << rn)))
16712 {
16713 /* Choose a Ri in the high-register-list that will be restored. */
16714 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16715
16716 /* MOV Ri, Rn. */
16717 current_stub_contents =
16718 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16719 create_instruction_mov (ri, rn));
16720 }
16721
16722 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16723 current_stub_contents =
16724 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16725 create_instruction_ldmia
16726 (ri, /*wback=*/1, insn_low_registers));
16727
16728 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16729 current_stub_contents =
16730 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16731 create_instruction_ldmia
16732 (ri, /*wback=*/0, insn_high_registers));
16733
16734 if (!restore_pc)
16735 {
16736 /* B initial_insn_addr+4. */
16737 current_stub_contents =
16738 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16739 create_instruction_branch_absolute
16740 (initial_insn_addr - current_stub_contents));
16741 }
16742 }
16743
16744 /* Fill the remaining of the stub with deterministic contents. */
16745 current_stub_contents =
16746 stm32l4xx_fill_stub_udf (htab, output_bfd,
16747 base_stub_contents, current_stub_contents,
16748 base_stub_contents +
16749 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16750 }
16751
16752 static void
16753 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16754 bfd * output_bfd,
16755 const insn32 initial_insn,
16756 const bfd_byte *const initial_insn_addr,
16757 bfd_byte *const base_stub_contents)
16758 {
16759 int wback = (initial_insn & 0x00200000) >> 21;
16760 int ri, rn = (initial_insn & 0x000f0000) >> 16;
16761 int insn_all_registers = initial_insn & 0x0000ffff;
16762 int insn_low_registers, insn_high_registers;
16763 int usable_register_mask;
16764 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16765 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16766 int nb_registers = popcount (insn_all_registers);
16767 bfd_byte *current_stub_contents = base_stub_contents;
16768
16769 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
16770
16771 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16772 smaller than 8 registers load sequences that do not cause the
16773 hardware issue. */
16774 if (nb_registers <= 8)
16775 {
16776 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16777 current_stub_contents =
16778 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16779 initial_insn);
16780
16781 /* B initial_insn_addr+4. */
16782 current_stub_contents =
16783 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16784 create_instruction_branch_absolute
16785 (initial_insn_addr - current_stub_contents));
16786
16787 /* Fill the remaining of the stub with deterministic contents. */
16788 current_stub_contents =
16789 stm32l4xx_fill_stub_udf (htab, output_bfd,
16790 base_stub_contents, current_stub_contents,
16791 base_stub_contents +
16792 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16793
16794 return;
16795 }
16796
16797 /* - reg_list[13] == 0. */
16798 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
16799
16800 /* - reg_list[14] & reg_list[15] != 1. */
16801 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16802
16803 /* - if (wback==1) reg_list[rn] == 0. */
16804 BFD_ASSERT (!wback || !restore_rn);
16805
16806 /* - nb_registers > 8. */
16807 BFD_ASSERT (popcount (insn_all_registers) > 8);
16808
16809 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16810
16811 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16812 - One with the 7 lowest registers (register mask 0x007F)
16813 This LDM will finally contain between 2 and 7 registers
16814 - One with the 7 highest registers (register mask 0xDF80)
16815 This ldm will finally contain between 2 and 7 registers. */
16816 insn_low_registers = insn_all_registers & 0x007F;
16817 insn_high_registers = insn_all_registers & 0xDF80;
16818
16819 /* A spare register may be needed during this veneer to temporarily
16820 handle the base register. This register will be restored with
16821 the last LDM operation.
16822 The usable register may be any general purpose register (that excludes
16823 PC, SP, LR : register mask is 0x1FFF). */
16824 usable_register_mask = 0x1FFF;
16825
16826 /* Generate the stub function. */
16827 if (!wback && !restore_pc && !restore_rn)
16828 {
16829 /* Choose a Ri in the low-register-list that will be restored. */
16830 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
16831
16832 /* MOV Ri, Rn. */
16833 current_stub_contents =
16834 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16835 create_instruction_mov (ri, rn));
16836
16837 /* LDMDB Ri!, {R-high-register-list}. */
16838 current_stub_contents =
16839 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16840 create_instruction_ldmdb
16841 (ri, /*wback=*/1, insn_high_registers));
16842
16843 /* LDMDB Ri, {R-low-register-list}. */
16844 current_stub_contents =
16845 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16846 create_instruction_ldmdb
16847 (ri, /*wback=*/0, insn_low_registers));
16848
16849 /* B initial_insn_addr+4. */
16850 current_stub_contents =
16851 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16852 create_instruction_branch_absolute
16853 (initial_insn_addr - current_stub_contents));
16854 }
16855 else if (wback && !restore_pc && !restore_rn)
16856 {
16857 /* LDMDB Rn!, {R-high-register-list}. */
16858 current_stub_contents =
16859 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16860 create_instruction_ldmdb
16861 (rn, /*wback=*/1, insn_high_registers));
16862
16863 /* LDMDB Rn!, {R-low-register-list}. */
16864 current_stub_contents =
16865 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16866 create_instruction_ldmdb
16867 (rn, /*wback=*/1, insn_low_registers));
16868
16869 /* B initial_insn_addr+4. */
16870 current_stub_contents =
16871 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16872 create_instruction_branch_absolute
16873 (initial_insn_addr - current_stub_contents));
16874 }
16875 else if (!wback && restore_pc && !restore_rn)
16876 {
16877 /* Choose a Ri in the high-register-list that will be restored. */
16878 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16879
16880 /* SUB Ri, Rn, #(4*nb_registers). */
16881 current_stub_contents =
16882 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16883 create_instruction_sub (ri, rn, (4 * nb_registers)));
16884
16885 /* LDMIA Ri!, {R-low-register-list}. */
16886 current_stub_contents =
16887 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16888 create_instruction_ldmia
16889 (ri, /*wback=*/1, insn_low_registers));
16890
16891 /* LDMIA Ri, {R-high-register-list}. */
16892 current_stub_contents =
16893 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16894 create_instruction_ldmia
16895 (ri, /*wback=*/0, insn_high_registers));
16896 }
16897 else if (wback && restore_pc && !restore_rn)
16898 {
16899 /* Choose a Ri in the high-register-list that will be restored. */
16900 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16901
16902 /* SUB Rn, Rn, #(4*nb_registers) */
16903 current_stub_contents =
16904 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16905 create_instruction_sub (rn, rn, (4 * nb_registers)));
16906
16907 /* MOV Ri, Rn. */
16908 current_stub_contents =
16909 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16910 create_instruction_mov (ri, rn));
16911
16912 /* LDMIA Ri!, {R-low-register-list}. */
16913 current_stub_contents =
16914 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16915 create_instruction_ldmia
16916 (ri, /*wback=*/1, insn_low_registers));
16917
16918 /* LDMIA Ri, {R-high-register-list}. */
16919 current_stub_contents =
16920 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16921 create_instruction_ldmia
16922 (ri, /*wback=*/0, insn_high_registers));
16923 }
16924 else if (!wback && !restore_pc && restore_rn)
16925 {
16926 ri = rn;
16927 if (!(insn_low_registers & (1 << rn)))
16928 {
16929 /* Choose a Ri in the low-register-list that will be restored. */
16930 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
16931
16932 /* MOV Ri, Rn. */
16933 current_stub_contents =
16934 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16935 create_instruction_mov (ri, rn));
16936 }
16937
16938 /* LDMDB Ri!, {R-high-register-list}. */
16939 current_stub_contents =
16940 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16941 create_instruction_ldmdb
16942 (ri, /*wback=*/1, insn_high_registers));
16943
16944 /* LDMDB Ri, {R-low-register-list}. */
16945 current_stub_contents =
16946 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16947 create_instruction_ldmdb
16948 (ri, /*wback=*/0, insn_low_registers));
16949
16950 /* B initial_insn_addr+4. */
16951 current_stub_contents =
16952 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16953 create_instruction_branch_absolute
16954 (initial_insn_addr - current_stub_contents));
16955 }
16956 else if (!wback && restore_pc && restore_rn)
16957 {
16958 ri = rn;
16959 if (!(insn_high_registers & (1 << rn)))
16960 {
16961 /* Choose a Ri in the high-register-list that will be restored. */
16962 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16963 }
16964
16965 /* SUB Ri, Rn, #(4*nb_registers). */
16966 current_stub_contents =
16967 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16968 create_instruction_sub (ri, rn, (4 * nb_registers)));
16969
16970 /* LDMIA Ri!, {R-low-register-list}. */
16971 current_stub_contents =
16972 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16973 create_instruction_ldmia
16974 (ri, /*wback=*/1, insn_low_registers));
16975
16976 /* LDMIA Ri, {R-high-register-list}. */
16977 current_stub_contents =
16978 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16979 create_instruction_ldmia
16980 (ri, /*wback=*/0, insn_high_registers));
16981 }
16982 else if (wback && restore_rn)
16983 {
16984 /* The assembler should not have accepted to encode this. */
16985 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
16986 "undefined behavior.\n");
16987 }
16988
16989 /* Fill the remaining of the stub with deterministic contents. */
16990 current_stub_contents =
16991 stm32l4xx_fill_stub_udf (htab, output_bfd,
16992 base_stub_contents, current_stub_contents,
16993 base_stub_contents +
16994 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16995
16996 }
16997
16998 static void
16999 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17000 bfd * output_bfd,
17001 const insn32 initial_insn,
17002 const bfd_byte *const initial_insn_addr,
17003 bfd_byte *const base_stub_contents)
17004 {
17005 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17006 bfd_byte *current_stub_contents = base_stub_contents;
17007
17008 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17009
17010 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17011 smaller than 8 words load sequences that do not cause the
17012 hardware issue. */
17013 if (num_words <= 8)
17014 {
17015 /* Untouched instruction. */
17016 current_stub_contents =
17017 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17018 initial_insn);
17019
17020 /* B initial_insn_addr+4. */
17021 current_stub_contents =
17022 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17023 create_instruction_branch_absolute
17024 (initial_insn_addr - current_stub_contents));
17025 }
17026 else
17027 {
17028 bfd_boolean is_dp = /* DP encoding. */
17029 (initial_insn & 0xfe100f00) == 0xec100b00;
17030 bfd_boolean is_ia_nobang = /* (IA without !). */
17031 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17032 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17033 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17034 bfd_boolean is_db_bang = /* (DB with !). */
17035 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17036 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17037 /* d = UInt (Vd:D);. */
17038 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17039 | (((unsigned int)initial_insn << 9) >> 31);
17040
17041 /* Compute the number of 8-words chunks needed to split. */
17042 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17043 int chunk;
17044
17045 /* The test coverage has been done assuming the following
17046 hypothesis that exactly one of the previous is_ predicates is
17047 true. */
17048 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17049 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17050
17051 /* We treat the cutting of the words in one pass for all
17052 cases, then we emit the adjustments:
17053
17054 vldm rx, {...}
17055 -> vldm rx!, {8_words_or_less} for each needed 8_word
17056 -> sub rx, rx, #size (list)
17057
17058 vldm rx!, {...}
17059 -> vldm rx!, {8_words_or_less} for each needed 8_word
17060 This also handles vpop instruction (when rx is sp)
17061
17062 vldmd rx!, {...}
17063 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17064 for (chunk = 0; chunk < chunks; ++chunk)
17065 {
17066 bfd_vma new_insn = 0;
17067
17068 if (is_ia_nobang || is_ia_bang)
17069 {
17070 new_insn = create_instruction_vldmia
17071 (base_reg,
17072 is_dp,
17073 /*wback= . */1,
17074 chunks - (chunk + 1) ?
17075 8 : num_words - chunk * 8,
17076 first_reg + chunk * 8);
17077 }
17078 else if (is_db_bang)
17079 {
17080 new_insn = create_instruction_vldmdb
17081 (base_reg,
17082 is_dp,
17083 chunks - (chunk + 1) ?
17084 8 : num_words - chunk * 8,
17085 first_reg + chunk * 8);
17086 }
17087
17088 if (new_insn)
17089 current_stub_contents =
17090 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17091 new_insn);
17092 }
17093
17094 /* Only this case requires the base register compensation
17095 subtract. */
17096 if (is_ia_nobang)
17097 {
17098 current_stub_contents =
17099 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17100 create_instruction_sub
17101 (base_reg, base_reg, 4*num_words));
17102 }
17103
17104 /* B initial_insn_addr+4. */
17105 current_stub_contents =
17106 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17107 create_instruction_branch_absolute
17108 (initial_insn_addr - current_stub_contents));
17109 }
17110
17111 /* Fill the remaining of the stub with deterministic contents. */
17112 current_stub_contents =
17113 stm32l4xx_fill_stub_udf (htab, output_bfd,
17114 base_stub_contents, current_stub_contents,
17115 base_stub_contents +
17116 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17117 }
17118
17119 static void
17120 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17121 bfd * output_bfd,
17122 const insn32 wrong_insn,
17123 const bfd_byte *const wrong_insn_addr,
17124 bfd_byte *const stub_contents)
17125 {
17126 if (is_thumb2_ldmia (wrong_insn))
17127 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17128 wrong_insn, wrong_insn_addr,
17129 stub_contents);
17130 else if (is_thumb2_ldmdb (wrong_insn))
17131 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17132 wrong_insn, wrong_insn_addr,
17133 stub_contents);
17134 else if (is_thumb2_vldm (wrong_insn))
17135 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17136 wrong_insn, wrong_insn_addr,
17137 stub_contents);
17138 }
17139
17140 /* End of stm32l4xx work-around. */
17141
17142
17143 static void
17144 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17145 asection *output_sec, Elf_Internal_Rela *rel)
17146 {
17147 BFD_ASSERT (output_sec && rel);
17148 struct bfd_elf_section_reloc_data *output_reldata;
17149 struct elf32_arm_link_hash_table *htab;
17150 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17151 Elf_Internal_Shdr *rel_hdr;
17152
17153
17154 if (oesd->rel.hdr)
17155 {
17156 rel_hdr = oesd->rel.hdr;
17157 output_reldata = &(oesd->rel);
17158 }
17159 else if (oesd->rela.hdr)
17160 {
17161 rel_hdr = oesd->rela.hdr;
17162 output_reldata = &(oesd->rela);
17163 }
17164 else
17165 {
17166 abort ();
17167 }
17168
17169 bfd_byte *erel = rel_hdr->contents;
17170 erel += output_reldata->count * rel_hdr->sh_entsize;
17171 htab = elf32_arm_hash_table (info);
17172 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17173 output_reldata->count++;
17174 }
17175
17176 /* Do code byteswapping. Return FALSE afterwards so that the section is
17177 written out as normal. */
17178
17179 static bfd_boolean
17180 elf32_arm_write_section (bfd *output_bfd,
17181 struct bfd_link_info *link_info,
17182 asection *sec,
17183 bfd_byte *contents)
17184 {
17185 unsigned int mapcount, errcount;
17186 _arm_elf_section_data *arm_data;
17187 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17188 elf32_arm_section_map *map;
17189 elf32_vfp11_erratum_list *errnode;
17190 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17191 bfd_vma ptr;
17192 bfd_vma end;
17193 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17194 bfd_byte tmp;
17195 unsigned int i;
17196
17197 if (globals == NULL)
17198 return FALSE;
17199
17200 /* If this section has not been allocated an _arm_elf_section_data
17201 structure then we cannot record anything. */
17202 arm_data = get_arm_elf_section_data (sec);
17203 if (arm_data == NULL)
17204 return FALSE;
17205
17206 mapcount = arm_data->mapcount;
17207 map = arm_data->map;
17208 errcount = arm_data->erratumcount;
17209
17210 if (errcount != 0)
17211 {
17212 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17213
17214 for (errnode = arm_data->erratumlist; errnode != 0;
17215 errnode = errnode->next)
17216 {
17217 bfd_vma target = errnode->vma - offset;
17218
17219 switch (errnode->type)
17220 {
17221 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17222 {
17223 bfd_vma branch_to_veneer;
17224 /* Original condition code of instruction, plus bit mask for
17225 ARM B instruction. */
17226 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17227 | 0x0a000000;
17228
17229 /* The instruction is before the label. */
17230 target -= 4;
17231
17232 /* Above offset included in -4 below. */
17233 branch_to_veneer = errnode->u.b.veneer->vma
17234 - errnode->vma - 4;
17235
17236 if ((signed) branch_to_veneer < -(1 << 25)
17237 || (signed) branch_to_veneer >= (1 << 25))
17238 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17239 "range"), output_bfd);
17240
17241 insn |= (branch_to_veneer >> 2) & 0xffffff;
17242 contents[endianflip ^ target] = insn & 0xff;
17243 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17244 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17245 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17246 }
17247 break;
17248
17249 case VFP11_ERRATUM_ARM_VENEER:
17250 {
17251 bfd_vma branch_from_veneer;
17252 unsigned int insn;
17253
17254 /* Take size of veneer into account. */
17255 branch_from_veneer = errnode->u.v.branch->vma
17256 - errnode->vma - 12;
17257
17258 if ((signed) branch_from_veneer < -(1 << 25)
17259 || (signed) branch_from_veneer >= (1 << 25))
17260 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17261 "range"), output_bfd);
17262
17263 /* Original instruction. */
17264 insn = errnode->u.v.branch->u.b.vfp_insn;
17265 contents[endianflip ^ target] = insn & 0xff;
17266 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17267 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17268 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17269
17270 /* Branch back to insn after original insn. */
17271 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17272 contents[endianflip ^ (target + 4)] = insn & 0xff;
17273 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17274 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17275 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17276 }
17277 break;
17278
17279 default:
17280 abort ();
17281 }
17282 }
17283 }
17284
17285 if (arm_data->stm32l4xx_erratumcount != 0)
17286 {
17287 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17288 stm32l4xx_errnode != 0;
17289 stm32l4xx_errnode = stm32l4xx_errnode->next)
17290 {
17291 bfd_vma target = stm32l4xx_errnode->vma - offset;
17292
17293 switch (stm32l4xx_errnode->type)
17294 {
17295 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17296 {
17297 unsigned int insn;
17298 bfd_vma branch_to_veneer =
17299 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17300
17301 if ((signed) branch_to_veneer < -(1 << 24)
17302 || (signed) branch_to_veneer >= (1 << 24))
17303 {
17304 bfd_vma out_of_range =
17305 ((signed) branch_to_veneer < -(1 << 24)) ?
17306 - branch_to_veneer - (1 << 24) :
17307 ((signed) branch_to_veneer >= (1 << 24)) ?
17308 branch_to_veneer - (1 << 24) : 0;
17309
17310 (*_bfd_error_handler)
17311 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17312 "Jump out of range by %ld bytes. "
17313 "Cannot encode branch instruction. "),
17314 output_bfd,
17315 (long) (stm32l4xx_errnode->vma - 4),
17316 out_of_range);
17317 continue;
17318 }
17319
17320 insn = create_instruction_branch_absolute
17321 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17322
17323 /* The instruction is before the label. */
17324 target -= 4;
17325
17326 put_thumb2_insn (globals, output_bfd,
17327 (bfd_vma) insn, contents + target);
17328 }
17329 break;
17330
17331 case STM32L4XX_ERRATUM_VENEER:
17332 {
17333 bfd_byte * veneer;
17334 bfd_byte * veneer_r;
17335 unsigned int insn;
17336
17337 veneer = contents + target;
17338 veneer_r = veneer
17339 + stm32l4xx_errnode->u.b.veneer->vma
17340 - stm32l4xx_errnode->vma - 4;
17341
17342 if ((signed) (veneer_r - veneer -
17343 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17344 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17345 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17346 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17347 || (signed) (veneer_r - veneer) >= (1 << 24))
17348 {
17349 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17350 "veneer."), output_bfd);
17351 continue;
17352 }
17353
17354 /* Original instruction. */
17355 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17356
17357 stm32l4xx_create_replacing_stub
17358 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17359 }
17360 break;
17361
17362 default:
17363 abort ();
17364 }
17365 }
17366 }
17367
17368 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17369 {
17370 arm_unwind_table_edit *edit_node
17371 = arm_data->u.exidx.unwind_edit_list;
17372 /* Now, sec->size is the size of the section we will write. The original
17373 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17374 markers) was sec->rawsize. (This isn't the case if we perform no
17375 edits, then rawsize will be zero and we should use size). */
17376 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17377 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17378 unsigned int in_index, out_index;
17379 bfd_vma add_to_offsets = 0;
17380
17381 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17382 {
17383 if (edit_node)
17384 {
17385 unsigned int edit_index = edit_node->index;
17386
17387 if (in_index < edit_index && in_index * 8 < input_size)
17388 {
17389 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17390 contents + in_index * 8, add_to_offsets);
17391 out_index++;
17392 in_index++;
17393 }
17394 else if (in_index == edit_index
17395 || (in_index * 8 >= input_size
17396 && edit_index == UINT_MAX))
17397 {
17398 switch (edit_node->type)
17399 {
17400 case DELETE_EXIDX_ENTRY:
17401 in_index++;
17402 add_to_offsets += 8;
17403 break;
17404
17405 case INSERT_EXIDX_CANTUNWIND_AT_END:
17406 {
17407 asection *text_sec = edit_node->linked_section;
17408 bfd_vma text_offset = text_sec->output_section->vma
17409 + text_sec->output_offset
17410 + text_sec->size;
17411 bfd_vma exidx_offset = offset + out_index * 8;
17412 unsigned long prel31_offset;
17413
17414 /* Note: this is meant to be equivalent to an
17415 R_ARM_PREL31 relocation. These synthetic
17416 EXIDX_CANTUNWIND markers are not relocated by the
17417 usual BFD method. */
17418 prel31_offset = (text_offset - exidx_offset)
17419 & 0x7ffffffful;
17420 if (bfd_link_relocatable (link_info))
17421 {
17422 /* Here relocation for new EXIDX_CANTUNWIND is
17423 created, so there is no need to
17424 adjust offset by hand. */
17425 prel31_offset = text_sec->output_offset
17426 + text_sec->size;
17427
17428 /* New relocation entity. */
17429 asection *text_out = text_sec->output_section;
17430 Elf_Internal_Rela rel;
17431 rel.r_addend = 0;
17432 rel.r_offset = exidx_offset;
17433 rel.r_info = ELF32_R_INFO (text_out->target_index,
17434 R_ARM_PREL31);
17435
17436 elf32_arm_add_relocation (output_bfd, link_info,
17437 sec->output_section,
17438 &rel);
17439 }
17440
17441 /* First address we can't unwind. */
17442 bfd_put_32 (output_bfd, prel31_offset,
17443 &edited_contents[out_index * 8]);
17444
17445 /* Code for EXIDX_CANTUNWIND. */
17446 bfd_put_32 (output_bfd, 0x1,
17447 &edited_contents[out_index * 8 + 4]);
17448
17449 out_index++;
17450 add_to_offsets -= 8;
17451 }
17452 break;
17453 }
17454
17455 edit_node = edit_node->next;
17456 }
17457 }
17458 else
17459 {
17460 /* No more edits, copy remaining entries verbatim. */
17461 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17462 contents + in_index * 8, add_to_offsets);
17463 out_index++;
17464 in_index++;
17465 }
17466 }
17467
17468 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17469 bfd_set_section_contents (output_bfd, sec->output_section,
17470 edited_contents,
17471 (file_ptr) sec->output_offset, sec->size);
17472
17473 return TRUE;
17474 }
17475
17476 /* Fix code to point to Cortex-A8 erratum stubs. */
17477 if (globals->fix_cortex_a8)
17478 {
17479 struct a8_branch_to_stub_data data;
17480
17481 data.writing_section = sec;
17482 data.contents = contents;
17483
17484 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17485 & data);
17486 }
17487
17488 if (mapcount == 0)
17489 return FALSE;
17490
17491 if (globals->byteswap_code)
17492 {
17493 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17494
17495 ptr = map[0].vma;
17496 for (i = 0; i < mapcount; i++)
17497 {
17498 if (i == mapcount - 1)
17499 end = sec->size;
17500 else
17501 end = map[i + 1].vma;
17502
17503 switch (map[i].type)
17504 {
17505 case 'a':
17506 /* Byte swap code words. */
17507 while (ptr + 3 < end)
17508 {
17509 tmp = contents[ptr];
17510 contents[ptr] = contents[ptr + 3];
17511 contents[ptr + 3] = tmp;
17512 tmp = contents[ptr + 1];
17513 contents[ptr + 1] = contents[ptr + 2];
17514 contents[ptr + 2] = tmp;
17515 ptr += 4;
17516 }
17517 break;
17518
17519 case 't':
17520 /* Byte swap code halfwords. */
17521 while (ptr + 1 < end)
17522 {
17523 tmp = contents[ptr];
17524 contents[ptr] = contents[ptr + 1];
17525 contents[ptr + 1] = tmp;
17526 ptr += 2;
17527 }
17528 break;
17529
17530 case 'd':
17531 /* Leave data alone. */
17532 break;
17533 }
17534 ptr = end;
17535 }
17536 }
17537
17538 free (map);
17539 arm_data->mapcount = -1;
17540 arm_data->mapsize = 0;
17541 arm_data->map = NULL;
17542
17543 return FALSE;
17544 }
17545
17546 /* Mangle thumb function symbols as we read them in. */
17547
17548 static bfd_boolean
17549 elf32_arm_swap_symbol_in (bfd * abfd,
17550 const void *psrc,
17551 const void *pshn,
17552 Elf_Internal_Sym *dst)
17553 {
17554 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17555 return FALSE;
17556 dst->st_target_internal = 0;
17557
17558 /* New EABI objects mark thumb function symbols by setting the low bit of
17559 the address. */
17560 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17561 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17562 {
17563 if (dst->st_value & 1)
17564 {
17565 dst->st_value &= ~(bfd_vma) 1;
17566 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17567 ST_BRANCH_TO_THUMB);
17568 }
17569 else
17570 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17571 }
17572 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17573 {
17574 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17575 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17576 }
17577 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17578 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17579 else
17580 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17581
17582 return TRUE;
17583 }
17584
17585
17586 /* Mangle thumb function symbols as we write them out. */
17587
17588 static void
17589 elf32_arm_swap_symbol_out (bfd *abfd,
17590 const Elf_Internal_Sym *src,
17591 void *cdst,
17592 void *shndx)
17593 {
17594 Elf_Internal_Sym newsym;
17595
17596 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17597 of the address set, as per the new EABI. We do this unconditionally
17598 because objcopy does not set the elf header flags until after
17599 it writes out the symbol table. */
17600 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17601 {
17602 newsym = *src;
17603 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17604 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17605 if (newsym.st_shndx != SHN_UNDEF)
17606 {
17607 /* Do this only for defined symbols. At link type, the static
17608 linker will simulate the work of dynamic linker of resolving
17609 symbols and will carry over the thumbness of found symbols to
17610 the output symbol table. It's not clear how it happens, but
17611 the thumbness of undefined symbols can well be different at
17612 runtime, and writing '1' for them will be confusing for users
17613 and possibly for dynamic linker itself.
17614 */
17615 newsym.st_value |= 1;
17616 }
17617
17618 src = &newsym;
17619 }
17620 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17621 }
17622
17623 /* Add the PT_ARM_EXIDX program header. */
17624
17625 static bfd_boolean
17626 elf32_arm_modify_segment_map (bfd *abfd,
17627 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17628 {
17629 struct elf_segment_map *m;
17630 asection *sec;
17631
17632 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17633 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17634 {
17635 /* If there is already a PT_ARM_EXIDX header, then we do not
17636 want to add another one. This situation arises when running
17637 "strip"; the input binary already has the header. */
17638 m = elf_seg_map (abfd);
17639 while (m && m->p_type != PT_ARM_EXIDX)
17640 m = m->next;
17641 if (!m)
17642 {
17643 m = (struct elf_segment_map *)
17644 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17645 if (m == NULL)
17646 return FALSE;
17647 m->p_type = PT_ARM_EXIDX;
17648 m->count = 1;
17649 m->sections[0] = sec;
17650
17651 m->next = elf_seg_map (abfd);
17652 elf_seg_map (abfd) = m;
17653 }
17654 }
17655
17656 return TRUE;
17657 }
17658
17659 /* We may add a PT_ARM_EXIDX program header. */
17660
17661 static int
17662 elf32_arm_additional_program_headers (bfd *abfd,
17663 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17664 {
17665 asection *sec;
17666
17667 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17668 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17669 return 1;
17670 else
17671 return 0;
17672 }
17673
17674 /* Hook called by the linker routine which adds symbols from an object
17675 file. */
17676
17677 static bfd_boolean
17678 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17679 Elf_Internal_Sym *sym, const char **namep,
17680 flagword *flagsp, asection **secp, bfd_vma *valp)
17681 {
17682 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17683 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
17684 && (abfd->flags & DYNAMIC) == 0
17685 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17686 elf_tdata (info->output_bfd)->has_gnu_symbols = elf_gnu_symbol_any;
17687
17688 if (elf32_arm_hash_table (info) == NULL)
17689 return FALSE;
17690
17691 if (elf32_arm_hash_table (info)->vxworks_p
17692 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17693 flagsp, secp, valp))
17694 return FALSE;
17695
17696 return TRUE;
17697 }
17698
17699 /* We use this to override swap_symbol_in and swap_symbol_out. */
17700 const struct elf_size_info elf32_arm_size_info =
17701 {
17702 sizeof (Elf32_External_Ehdr),
17703 sizeof (Elf32_External_Phdr),
17704 sizeof (Elf32_External_Shdr),
17705 sizeof (Elf32_External_Rel),
17706 sizeof (Elf32_External_Rela),
17707 sizeof (Elf32_External_Sym),
17708 sizeof (Elf32_External_Dyn),
17709 sizeof (Elf_External_Note),
17710 4,
17711 1,
17712 32, 2,
17713 ELFCLASS32, EV_CURRENT,
17714 bfd_elf32_write_out_phdrs,
17715 bfd_elf32_write_shdrs_and_ehdr,
17716 bfd_elf32_checksum_contents,
17717 bfd_elf32_write_relocs,
17718 elf32_arm_swap_symbol_in,
17719 elf32_arm_swap_symbol_out,
17720 bfd_elf32_slurp_reloc_table,
17721 bfd_elf32_slurp_symbol_table,
17722 bfd_elf32_swap_dyn_in,
17723 bfd_elf32_swap_dyn_out,
17724 bfd_elf32_swap_reloc_in,
17725 bfd_elf32_swap_reloc_out,
17726 bfd_elf32_swap_reloca_in,
17727 bfd_elf32_swap_reloca_out
17728 };
17729
17730 static bfd_vma
17731 read_code32 (const bfd *abfd, const bfd_byte *addr)
17732 {
17733 /* V7 BE8 code is always little endian. */
17734 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17735 return bfd_getl32 (addr);
17736
17737 return bfd_get_32 (abfd, addr);
17738 }
17739
17740 static bfd_vma
17741 read_code16 (const bfd *abfd, const bfd_byte *addr)
17742 {
17743 /* V7 BE8 code is always little endian. */
17744 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17745 return bfd_getl16 (addr);
17746
17747 return bfd_get_16 (abfd, addr);
17748 }
17749
17750 /* Return size of plt0 entry starting at ADDR
17751 or (bfd_vma) -1 if size can not be determined. */
17752
17753 static bfd_vma
17754 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17755 {
17756 bfd_vma first_word;
17757 bfd_vma plt0_size;
17758
17759 first_word = read_code32 (abfd, addr);
17760
17761 if (first_word == elf32_arm_plt0_entry[0])
17762 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17763 else if (first_word == elf32_thumb2_plt0_entry[0])
17764 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17765 else
17766 /* We don't yet handle this PLT format. */
17767 return (bfd_vma) -1;
17768
17769 return plt0_size;
17770 }
17771
17772 /* Return size of plt entry starting at offset OFFSET
17773 of plt section located at address START
17774 or (bfd_vma) -1 if size can not be determined. */
17775
17776 static bfd_vma
17777 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
17778 {
17779 bfd_vma first_insn;
17780 bfd_vma plt_size = 0;
17781 const bfd_byte *addr = start + offset;
17782
17783 /* PLT entry size if fixed on Thumb-only platforms. */
17784 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
17785 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
17786
17787 /* Respect Thumb stub if necessary. */
17788 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
17789 {
17790 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
17791 }
17792
17793 /* Strip immediate from first add. */
17794 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
17795
17796 #ifdef FOUR_WORD_PLT
17797 if (first_insn == elf32_arm_plt_entry[0])
17798 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
17799 #else
17800 if (first_insn == elf32_arm_plt_entry_long[0])
17801 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
17802 else if (first_insn == elf32_arm_plt_entry_short[0])
17803 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
17804 #endif
17805 else
17806 /* We don't yet handle this PLT format. */
17807 return (bfd_vma) -1;
17808
17809 return plt_size;
17810 }
17811
17812 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17813
17814 static long
17815 elf32_arm_get_synthetic_symtab (bfd *abfd,
17816 long symcount ATTRIBUTE_UNUSED,
17817 asymbol **syms ATTRIBUTE_UNUSED,
17818 long dynsymcount,
17819 asymbol **dynsyms,
17820 asymbol **ret)
17821 {
17822 asection *relplt;
17823 asymbol *s;
17824 arelent *p;
17825 long count, i, n;
17826 size_t size;
17827 Elf_Internal_Shdr *hdr;
17828 char *names;
17829 asection *plt;
17830 bfd_vma offset;
17831 bfd_byte *data;
17832
17833 *ret = NULL;
17834
17835 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
17836 return 0;
17837
17838 if (dynsymcount <= 0)
17839 return 0;
17840
17841 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
17842 if (relplt == NULL)
17843 return 0;
17844
17845 hdr = &elf_section_data (relplt)->this_hdr;
17846 if (hdr->sh_link != elf_dynsymtab (abfd)
17847 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
17848 return 0;
17849
17850 plt = bfd_get_section_by_name (abfd, ".plt");
17851 if (plt == NULL)
17852 return 0;
17853
17854 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
17855 return -1;
17856
17857 data = plt->contents;
17858 if (data == NULL)
17859 {
17860 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
17861 return -1;
17862 bfd_cache_section_contents((asection *) plt, data);
17863 }
17864
17865 count = relplt->size / hdr->sh_entsize;
17866 size = count * sizeof (asymbol);
17867 p = relplt->relocation;
17868 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
17869 {
17870 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
17871 if (p->addend != 0)
17872 size += sizeof ("+0x") - 1 + 8;
17873 }
17874
17875 s = *ret = (asymbol *) bfd_malloc (size);
17876 if (s == NULL)
17877 return -1;
17878
17879 offset = elf32_arm_plt0_size (abfd, data);
17880 if (offset == (bfd_vma) -1)
17881 return -1;
17882
17883 names = (char *) (s + count);
17884 p = relplt->relocation;
17885 n = 0;
17886 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
17887 {
17888 size_t len;
17889
17890 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
17891 if (plt_size == (bfd_vma) -1)
17892 break;
17893
17894 *s = **p->sym_ptr_ptr;
17895 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
17896 we are defining a symbol, ensure one of them is set. */
17897 if ((s->flags & BSF_LOCAL) == 0)
17898 s->flags |= BSF_GLOBAL;
17899 s->flags |= BSF_SYNTHETIC;
17900 s->section = plt;
17901 s->value = offset;
17902 s->name = names;
17903 s->udata.p = NULL;
17904 len = strlen ((*p->sym_ptr_ptr)->name);
17905 memcpy (names, (*p->sym_ptr_ptr)->name, len);
17906 names += len;
17907 if (p->addend != 0)
17908 {
17909 char buf[30], *a;
17910
17911 memcpy (names, "+0x", sizeof ("+0x") - 1);
17912 names += sizeof ("+0x") - 1;
17913 bfd_sprintf_vma (abfd, buf, p->addend);
17914 for (a = buf; *a == '0'; ++a)
17915 ;
17916 len = strlen (a);
17917 memcpy (names, a, len);
17918 names += len;
17919 }
17920 memcpy (names, "@plt", sizeof ("@plt"));
17921 names += sizeof ("@plt");
17922 ++s, ++n;
17923 offset += plt_size;
17924 }
17925
17926 return n;
17927 }
17928
17929 static bfd_boolean
17930 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
17931 {
17932 if (hdr->sh_flags & SHF_ARM_NOREAD)
17933 *flags |= SEC_ELF_NOREAD;
17934 return TRUE;
17935 }
17936
17937 static flagword
17938 elf32_arm_lookup_section_flags (char *flag_name)
17939 {
17940 if (!strcmp (flag_name, "SHF_ARM_NOREAD"))
17941 return SHF_ARM_NOREAD;
17942
17943 return SEC_NO_FLAGS;
17944 }
17945
17946 static unsigned int
17947 elf32_arm_count_additional_relocs (asection *sec)
17948 {
17949 struct _arm_elf_section_data *arm_data;
17950 arm_data = get_arm_elf_section_data (sec);
17951 return arm_data->additional_reloc_count;
17952 }
17953
17954 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
17955 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
17956 FALSE otherwise. ISECTION is the best guess matching section from the
17957 input bfd IBFD, but it might be NULL. */
17958
17959 static bfd_boolean
17960 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
17961 bfd *obfd ATTRIBUTE_UNUSED,
17962 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
17963 Elf_Internal_Shdr *osection)
17964 {
17965 switch (osection->sh_type)
17966 {
17967 case SHT_ARM_EXIDX:
17968 {
17969 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
17970 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
17971 unsigned i = 0;
17972
17973 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
17974 osection->sh_info = 0;
17975
17976 /* The sh_link field must be set to the text section associated with
17977 this index section. Unfortunately the ARM EHABI does not specify
17978 exactly how to determine this association. Our caller does try
17979 to match up OSECTION with its corresponding input section however
17980 so that is a good first guess. */
17981 if (isection != NULL
17982 && osection->bfd_section != NULL
17983 && isection->bfd_section != NULL
17984 && isection->bfd_section->output_section != NULL
17985 && isection->bfd_section->output_section == osection->bfd_section
17986 && iheaders != NULL
17987 && isection->sh_link > 0
17988 && isection->sh_link < elf_numsections (ibfd)
17989 && iheaders[isection->sh_link]->bfd_section != NULL
17990 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
17991 )
17992 {
17993 for (i = elf_numsections (obfd); i-- > 0;)
17994 if (oheaders[i]->bfd_section
17995 == iheaders[isection->sh_link]->bfd_section->output_section)
17996 break;
17997 }
17998
17999 if (i == 0)
18000 {
18001 /* Failing that we have to find a matching section ourselves. If
18002 we had the output section name available we could compare that
18003 with input section names. Unfortunately we don't. So instead
18004 we use a simple heuristic and look for the nearest executable
18005 section before this one. */
18006 for (i = elf_numsections (obfd); i-- > 0;)
18007 if (oheaders[i] == osection)
18008 break;
18009 if (i == 0)
18010 break;
18011
18012 while (i-- > 0)
18013 if (oheaders[i]->sh_type == SHT_PROGBITS
18014 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18015 == (SHF_ALLOC | SHF_EXECINSTR))
18016 break;
18017 }
18018
18019 if (i)
18020 {
18021 osection->sh_link = i;
18022 /* If the text section was part of a group
18023 then the index section should be too. */
18024 if (oheaders[i]->sh_flags & SHF_GROUP)
18025 osection->sh_flags |= SHF_GROUP;
18026 return TRUE;
18027 }
18028 }
18029 break;
18030
18031 case SHT_ARM_PREEMPTMAP:
18032 osection->sh_flags = SHF_ALLOC;
18033 break;
18034
18035 case SHT_ARM_ATTRIBUTES:
18036 case SHT_ARM_DEBUGOVERLAY:
18037 case SHT_ARM_OVERLAYSECTION:
18038 default:
18039 break;
18040 }
18041
18042 return FALSE;
18043 }
18044
18045 #undef elf_backend_copy_special_section_fields
18046 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18047
18048 #define ELF_ARCH bfd_arch_arm
18049 #define ELF_TARGET_ID ARM_ELF_DATA
18050 #define ELF_MACHINE_CODE EM_ARM
18051 #ifdef __QNXTARGET__
18052 #define ELF_MAXPAGESIZE 0x1000
18053 #else
18054 #define ELF_MAXPAGESIZE 0x10000
18055 #endif
18056 #define ELF_MINPAGESIZE 0x1000
18057 #define ELF_COMMONPAGESIZE 0x1000
18058
18059 #define bfd_elf32_mkobject elf32_arm_mkobject
18060
18061 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18062 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18063 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18064 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18065 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18066 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18067 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18068 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18069 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18070 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18071 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18072 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18073 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18074
18075 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18076 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18077 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18078 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18079 #define elf_backend_check_relocs elf32_arm_check_relocs
18080 #define elf_backend_relocate_section elf32_arm_relocate_section
18081 #define elf_backend_write_section elf32_arm_write_section
18082 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18083 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18084 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18085 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18086 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18087 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18088 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18089 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18090 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18091 #define elf_backend_object_p elf32_arm_object_p
18092 #define elf_backend_fake_sections elf32_arm_fake_sections
18093 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18094 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18095 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18096 #define elf_backend_size_info elf32_arm_size_info
18097 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18098 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18099 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18100 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18101 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18102 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18103
18104 #define elf_backend_can_refcount 1
18105 #define elf_backend_can_gc_sections 1
18106 #define elf_backend_plt_readonly 1
18107 #define elf_backend_want_got_plt 1
18108 #define elf_backend_want_plt_sym 0
18109 #define elf_backend_may_use_rel_p 1
18110 #define elf_backend_may_use_rela_p 0
18111 #define elf_backend_default_use_rela_p 0
18112
18113 #define elf_backend_got_header_size 12
18114 #define elf_backend_extern_protected_data 1
18115
18116 #undef elf_backend_obj_attrs_vendor
18117 #define elf_backend_obj_attrs_vendor "aeabi"
18118 #undef elf_backend_obj_attrs_section
18119 #define elf_backend_obj_attrs_section ".ARM.attributes"
18120 #undef elf_backend_obj_attrs_arg_type
18121 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18122 #undef elf_backend_obj_attrs_section_type
18123 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18124 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18125 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18126
18127 #undef elf_backend_section_flags
18128 #define elf_backend_section_flags elf32_arm_section_flags
18129 #undef elf_backend_lookup_section_flags_hook
18130 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18131
18132 #include "elf32-target.h"
18133
18134 /* Native Client targets. */
18135
18136 #undef TARGET_LITTLE_SYM
18137 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18138 #undef TARGET_LITTLE_NAME
18139 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18140 #undef TARGET_BIG_SYM
18141 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18142 #undef TARGET_BIG_NAME
18143 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18144
18145 /* Like elf32_arm_link_hash_table_create -- but overrides
18146 appropriately for NaCl. */
18147
18148 static struct bfd_link_hash_table *
18149 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18150 {
18151 struct bfd_link_hash_table *ret;
18152
18153 ret = elf32_arm_link_hash_table_create (abfd);
18154 if (ret)
18155 {
18156 struct elf32_arm_link_hash_table *htab
18157 = (struct elf32_arm_link_hash_table *) ret;
18158
18159 htab->nacl_p = 1;
18160
18161 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18162 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18163 }
18164 return ret;
18165 }
18166
18167 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18168 really need to use elf32_arm_modify_segment_map. But we do it
18169 anyway just to reduce gratuitous differences with the stock ARM backend. */
18170
18171 static bfd_boolean
18172 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18173 {
18174 return (elf32_arm_modify_segment_map (abfd, info)
18175 && nacl_modify_segment_map (abfd, info));
18176 }
18177
18178 static void
18179 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18180 {
18181 elf32_arm_final_write_processing (abfd, linker);
18182 nacl_final_write_processing (abfd, linker);
18183 }
18184
18185 static bfd_vma
18186 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18187 const arelent *rel ATTRIBUTE_UNUSED)
18188 {
18189 return plt->vma
18190 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18191 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18192 }
18193
18194 #undef elf32_bed
18195 #define elf32_bed elf32_arm_nacl_bed
18196 #undef bfd_elf32_bfd_link_hash_table_create
18197 #define bfd_elf32_bfd_link_hash_table_create \
18198 elf32_arm_nacl_link_hash_table_create
18199 #undef elf_backend_plt_alignment
18200 #define elf_backend_plt_alignment 4
18201 #undef elf_backend_modify_segment_map
18202 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18203 #undef elf_backend_modify_program_headers
18204 #define elf_backend_modify_program_headers nacl_modify_program_headers
18205 #undef elf_backend_final_write_processing
18206 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18207 #undef bfd_elf32_get_synthetic_symtab
18208 #undef elf_backend_plt_sym_val
18209 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18210 #undef elf_backend_copy_special_section_fields
18211
18212 #undef ELF_MINPAGESIZE
18213 #undef ELF_COMMONPAGESIZE
18214
18215
18216 #include "elf32-target.h"
18217
18218 /* Reset to defaults. */
18219 #undef elf_backend_plt_alignment
18220 #undef elf_backend_modify_segment_map
18221 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18222 #undef elf_backend_modify_program_headers
18223 #undef elf_backend_final_write_processing
18224 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18225 #undef ELF_MINPAGESIZE
18226 #define ELF_MINPAGESIZE 0x1000
18227 #undef ELF_COMMONPAGESIZE
18228 #define ELF_COMMONPAGESIZE 0x1000
18229
18230
18231 /* VxWorks Targets. */
18232
18233 #undef TARGET_LITTLE_SYM
18234 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18235 #undef TARGET_LITTLE_NAME
18236 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18237 #undef TARGET_BIG_SYM
18238 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18239 #undef TARGET_BIG_NAME
18240 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18241
18242 /* Like elf32_arm_link_hash_table_create -- but overrides
18243 appropriately for VxWorks. */
18244
18245 static struct bfd_link_hash_table *
18246 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18247 {
18248 struct bfd_link_hash_table *ret;
18249
18250 ret = elf32_arm_link_hash_table_create (abfd);
18251 if (ret)
18252 {
18253 struct elf32_arm_link_hash_table *htab
18254 = (struct elf32_arm_link_hash_table *) ret;
18255 htab->use_rel = 0;
18256 htab->vxworks_p = 1;
18257 }
18258 return ret;
18259 }
18260
18261 static void
18262 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18263 {
18264 elf32_arm_final_write_processing (abfd, linker);
18265 elf_vxworks_final_write_processing (abfd, linker);
18266 }
18267
18268 #undef elf32_bed
18269 #define elf32_bed elf32_arm_vxworks_bed
18270
18271 #undef bfd_elf32_bfd_link_hash_table_create
18272 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18273 #undef elf_backend_final_write_processing
18274 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18275 #undef elf_backend_emit_relocs
18276 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18277
18278 #undef elf_backend_may_use_rel_p
18279 #define elf_backend_may_use_rel_p 0
18280 #undef elf_backend_may_use_rela_p
18281 #define elf_backend_may_use_rela_p 1
18282 #undef elf_backend_default_use_rela_p
18283 #define elf_backend_default_use_rela_p 1
18284 #undef elf_backend_want_plt_sym
18285 #define elf_backend_want_plt_sym 1
18286 #undef ELF_MAXPAGESIZE
18287 #define ELF_MAXPAGESIZE 0x1000
18288
18289 #include "elf32-target.h"
18290
18291
18292 /* Merge backend specific data from an object file to the output
18293 object file when linking. */
18294
18295 static bfd_boolean
18296 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18297 {
18298 flagword out_flags;
18299 flagword in_flags;
18300 bfd_boolean flags_compatible = TRUE;
18301 asection *sec;
18302
18303 /* Check if we have the same endianness. */
18304 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18305 return FALSE;
18306
18307 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18308 return TRUE;
18309
18310 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18311 return FALSE;
18312
18313 /* The input BFD must have had its flags initialised. */
18314 /* The following seems bogus to me -- The flags are initialized in
18315 the assembler but I don't think an elf_flags_init field is
18316 written into the object. */
18317 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18318
18319 in_flags = elf_elfheader (ibfd)->e_flags;
18320 out_flags = elf_elfheader (obfd)->e_flags;
18321
18322 /* In theory there is no reason why we couldn't handle this. However
18323 in practice it isn't even close to working and there is no real
18324 reason to want it. */
18325 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18326 && !(ibfd->flags & DYNAMIC)
18327 && (in_flags & EF_ARM_BE8))
18328 {
18329 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18330 ibfd);
18331 return FALSE;
18332 }
18333
18334 if (!elf_flags_init (obfd))
18335 {
18336 /* If the input is the default architecture and had the default
18337 flags then do not bother setting the flags for the output
18338 architecture, instead allow future merges to do this. If no
18339 future merges ever set these flags then they will retain their
18340 uninitialised values, which surprise surprise, correspond
18341 to the default values. */
18342 if (bfd_get_arch_info (ibfd)->the_default
18343 && elf_elfheader (ibfd)->e_flags == 0)
18344 return TRUE;
18345
18346 elf_flags_init (obfd) = TRUE;
18347 elf_elfheader (obfd)->e_flags = in_flags;
18348
18349 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18350 && bfd_get_arch_info (obfd)->the_default)
18351 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18352
18353 return TRUE;
18354 }
18355
18356 /* Determine what should happen if the input ARM architecture
18357 does not match the output ARM architecture. */
18358 if (! bfd_arm_merge_machines (ibfd, obfd))
18359 return FALSE;
18360
18361 /* Identical flags must be compatible. */
18362 if (in_flags == out_flags)
18363 return TRUE;
18364
18365 /* Check to see if the input BFD actually contains any sections. If
18366 not, its flags may not have been initialised either, but it
18367 cannot actually cause any incompatiblity. Do not short-circuit
18368 dynamic objects; their section list may be emptied by
18369 elf_link_add_object_symbols.
18370
18371 Also check to see if there are no code sections in the input.
18372 In this case there is no need to check for code specific flags.
18373 XXX - do we need to worry about floating-point format compatability
18374 in data sections ? */
18375 if (!(ibfd->flags & DYNAMIC))
18376 {
18377 bfd_boolean null_input_bfd = TRUE;
18378 bfd_boolean only_data_sections = TRUE;
18379
18380 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18381 {
18382 /* Ignore synthetic glue sections. */
18383 if (strcmp (sec->name, ".glue_7")
18384 && strcmp (sec->name, ".glue_7t"))
18385 {
18386 if ((bfd_get_section_flags (ibfd, sec)
18387 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18388 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18389 only_data_sections = FALSE;
18390
18391 null_input_bfd = FALSE;
18392 break;
18393 }
18394 }
18395
18396 if (null_input_bfd || only_data_sections)
18397 return TRUE;
18398 }
18399
18400 /* Complain about various flag mismatches. */
18401 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18402 EF_ARM_EABI_VERSION (out_flags)))
18403 {
18404 _bfd_error_handler
18405 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18406 ibfd, obfd,
18407 (in_flags & EF_ARM_EABIMASK) >> 24,
18408 (out_flags & EF_ARM_EABIMASK) >> 24);
18409 return FALSE;
18410 }
18411
18412 /* Not sure what needs to be checked for EABI versions >= 1. */
18413 /* VxWorks libraries do not use these flags. */
18414 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18415 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18416 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18417 {
18418 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18419 {
18420 _bfd_error_handler
18421 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18422 ibfd, obfd,
18423 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18424 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18425 flags_compatible = FALSE;
18426 }
18427
18428 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18429 {
18430 if (in_flags & EF_ARM_APCS_FLOAT)
18431 _bfd_error_handler
18432 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18433 ibfd, obfd);
18434 else
18435 _bfd_error_handler
18436 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18437 ibfd, obfd);
18438
18439 flags_compatible = FALSE;
18440 }
18441
18442 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18443 {
18444 if (in_flags & EF_ARM_VFP_FLOAT)
18445 _bfd_error_handler
18446 (_("error: %B uses VFP instructions, whereas %B does not"),
18447 ibfd, obfd);
18448 else
18449 _bfd_error_handler
18450 (_("error: %B uses FPA instructions, whereas %B does not"),
18451 ibfd, obfd);
18452
18453 flags_compatible = FALSE;
18454 }
18455
18456 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18457 {
18458 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18459 _bfd_error_handler
18460 (_("error: %B uses Maverick instructions, whereas %B does not"),
18461 ibfd, obfd);
18462 else
18463 _bfd_error_handler
18464 (_("error: %B does not use Maverick instructions, whereas %B does"),
18465 ibfd, obfd);
18466
18467 flags_compatible = FALSE;
18468 }
18469
18470 #ifdef EF_ARM_SOFT_FLOAT
18471 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18472 {
18473 /* We can allow interworking between code that is VFP format
18474 layout, and uses either soft float or integer regs for
18475 passing floating point arguments and results. We already
18476 know that the APCS_FLOAT flags match; similarly for VFP
18477 flags. */
18478 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18479 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18480 {
18481 if (in_flags & EF_ARM_SOFT_FLOAT)
18482 _bfd_error_handler
18483 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18484 ibfd, obfd);
18485 else
18486 _bfd_error_handler
18487 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18488 ibfd, obfd);
18489
18490 flags_compatible = FALSE;
18491 }
18492 }
18493 #endif
18494
18495 /* Interworking mismatch is only a warning. */
18496 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18497 {
18498 if (in_flags & EF_ARM_INTERWORK)
18499 {
18500 _bfd_error_handler
18501 (_("Warning: %B supports interworking, whereas %B does not"),
18502 ibfd, obfd);
18503 }
18504 else
18505 {
18506 _bfd_error_handler
18507 (_("Warning: %B does not support interworking, whereas %B does"),
18508 ibfd, obfd);
18509 }
18510 }
18511 }
18512
18513 return flags_compatible;
18514 }
18515
18516
18517 /* Symbian OS Targets. */
18518
18519 #undef TARGET_LITTLE_SYM
18520 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18521 #undef TARGET_LITTLE_NAME
18522 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18523 #undef TARGET_BIG_SYM
18524 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18525 #undef TARGET_BIG_NAME
18526 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18527
18528 /* Like elf32_arm_link_hash_table_create -- but overrides
18529 appropriately for Symbian OS. */
18530
18531 static struct bfd_link_hash_table *
18532 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18533 {
18534 struct bfd_link_hash_table *ret;
18535
18536 ret = elf32_arm_link_hash_table_create (abfd);
18537 if (ret)
18538 {
18539 struct elf32_arm_link_hash_table *htab
18540 = (struct elf32_arm_link_hash_table *)ret;
18541 /* There is no PLT header for Symbian OS. */
18542 htab->plt_header_size = 0;
18543 /* The PLT entries are each one instruction and one word. */
18544 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18545 htab->symbian_p = 1;
18546 /* Symbian uses armv5t or above, so use_blx is always true. */
18547 htab->use_blx = 1;
18548 htab->root.is_relocatable_executable = 1;
18549 }
18550 return ret;
18551 }
18552
18553 static const struct bfd_elf_special_section
18554 elf32_arm_symbian_special_sections[] =
18555 {
18556 /* In a BPABI executable, the dynamic linking sections do not go in
18557 the loadable read-only segment. The post-linker may wish to
18558 refer to these sections, but they are not part of the final
18559 program image. */
18560 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18561 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18562 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18563 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18564 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18565 /* These sections do not need to be writable as the SymbianOS
18566 postlinker will arrange things so that no dynamic relocation is
18567 required. */
18568 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18569 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18570 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18571 { NULL, 0, 0, 0, 0 }
18572 };
18573
18574 static void
18575 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18576 struct bfd_link_info *link_info)
18577 {
18578 /* BPABI objects are never loaded directly by an OS kernel; they are
18579 processed by a postlinker first, into an OS-specific format. If
18580 the D_PAGED bit is set on the file, BFD will align segments on
18581 page boundaries, so that an OS can directly map the file. With
18582 BPABI objects, that just results in wasted space. In addition,
18583 because we clear the D_PAGED bit, map_sections_to_segments will
18584 recognize that the program headers should not be mapped into any
18585 loadable segment. */
18586 abfd->flags &= ~D_PAGED;
18587 elf32_arm_begin_write_processing (abfd, link_info);
18588 }
18589
18590 static bfd_boolean
18591 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18592 struct bfd_link_info *info)
18593 {
18594 struct elf_segment_map *m;
18595 asection *dynsec;
18596
18597 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18598 segment. However, because the .dynamic section is not marked
18599 with SEC_LOAD, the generic ELF code will not create such a
18600 segment. */
18601 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18602 if (dynsec)
18603 {
18604 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18605 if (m->p_type == PT_DYNAMIC)
18606 break;
18607
18608 if (m == NULL)
18609 {
18610 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18611 m->next = elf_seg_map (abfd);
18612 elf_seg_map (abfd) = m;
18613 }
18614 }
18615
18616 /* Also call the generic arm routine. */
18617 return elf32_arm_modify_segment_map (abfd, info);
18618 }
18619
18620 /* Return address for Ith PLT stub in section PLT, for relocation REL
18621 or (bfd_vma) -1 if it should not be included. */
18622
18623 static bfd_vma
18624 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18625 const arelent *rel ATTRIBUTE_UNUSED)
18626 {
18627 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18628 }
18629
18630 #undef elf32_bed
18631 #define elf32_bed elf32_arm_symbian_bed
18632
18633 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18634 will process them and then discard them. */
18635 #undef ELF_DYNAMIC_SEC_FLAGS
18636 #define ELF_DYNAMIC_SEC_FLAGS \
18637 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18638
18639 #undef elf_backend_emit_relocs
18640
18641 #undef bfd_elf32_bfd_link_hash_table_create
18642 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18643 #undef elf_backend_special_sections
18644 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18645 #undef elf_backend_begin_write_processing
18646 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18647 #undef elf_backend_final_write_processing
18648 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18649
18650 #undef elf_backend_modify_segment_map
18651 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18652
18653 /* There is no .got section for BPABI objects, and hence no header. */
18654 #undef elf_backend_got_header_size
18655 #define elf_backend_got_header_size 0
18656
18657 /* Similarly, there is no .got.plt section. */
18658 #undef elf_backend_want_got_plt
18659 #define elf_backend_want_got_plt 0
18660
18661 #undef elf_backend_plt_sym_val
18662 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18663
18664 #undef elf_backend_may_use_rel_p
18665 #define elf_backend_may_use_rel_p 1
18666 #undef elf_backend_may_use_rela_p
18667 #define elf_backend_may_use_rela_p 0
18668 #undef elf_backend_default_use_rela_p
18669 #define elf_backend_default_use_rela_p 0
18670 #undef elf_backend_want_plt_sym
18671 #define elf_backend_want_plt_sym 0
18672 #undef ELF_MAXPAGESIZE
18673 #define ELF_MAXPAGESIZE 0x8000
18674
18675 #include "elf32-target.h"
This page took 0.42908 seconds and 5 git commands to generate.