2010-01-20 Doug Kwan <dougkwan@google.com>
[deliverable/binutils-gdb.git] / gold / arm.cc
1 // arm.cc -- arm target support for gold.
2
3 // Copyright 2009 Free Software Foundation, Inc.
4 // Written by Doug Kwan <dougkwan@google.com> based on the i386 code
5 // by Ian Lance Taylor <iant@google.com>.
6 // This file also contains borrowed and adapted code from
7 // bfd/elf32-arm.c.
8
9 // This file is part of gold.
10
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 3 of the License, or
14 // (at your option) any later version.
15
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
20
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 // MA 02110-1301, USA.
25
26 #include "gold.h"
27
28 #include <cstring>
29 #include <limits>
30 #include <cstdio>
31 #include <string>
32 #include <algorithm>
33 #include <map>
34 #include <utility>
35
36 #include "elfcpp.h"
37 #include "parameters.h"
38 #include "reloc.h"
39 #include "arm.h"
40 #include "object.h"
41 #include "symtab.h"
42 #include "layout.h"
43 #include "output.h"
44 #include "copy-relocs.h"
45 #include "target.h"
46 #include "target-reloc.h"
47 #include "target-select.h"
48 #include "tls.h"
49 #include "defstd.h"
50 #include "gc.h"
51 #include "attributes.h"
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<bool big_endian>
59 class Output_data_plt_arm;
60
61 template<bool big_endian>
62 class Stub_table;
63
64 template<bool big_endian>
65 class Arm_input_section;
66
67 template<bool big_endian>
68 class Arm_output_section;
69
70 template<bool big_endian>
71 class Arm_relobj;
72
73 template<bool big_endian>
74 class Target_arm;
75
76 // For convenience.
77 typedef elfcpp::Elf_types<32>::Elf_Addr Arm_address;
78
79 // Maximum branch offsets for ARM, THUMB and THUMB2.
80 const int32_t ARM_MAX_FWD_BRANCH_OFFSET = ((((1 << 23) - 1) << 2) + 8);
81 const int32_t ARM_MAX_BWD_BRANCH_OFFSET = ((-((1 << 23) << 2)) + 8);
82 const int32_t THM_MAX_FWD_BRANCH_OFFSET = ((1 << 22) -2 + 4);
83 const int32_t THM_MAX_BWD_BRANCH_OFFSET = (-(1 << 22) + 4);
84 const int32_t THM2_MAX_FWD_BRANCH_OFFSET = (((1 << 24) - 2) + 4);
85 const int32_t THM2_MAX_BWD_BRANCH_OFFSET = (-(1 << 24) + 4);
86
87 // The arm target class.
88 //
89 // This is a very simple port of gold for ARM-EABI. It is intended for
90 // supporting Android only for the time being. Only these relocation types
91 // are supported.
92 //
93 // R_ARM_NONE
94 // R_ARM_ABS32
95 // R_ARM_ABS32_NOI
96 // R_ARM_ABS16
97 // R_ARM_ABS12
98 // R_ARM_ABS8
99 // R_ARM_THM_ABS5
100 // R_ARM_BASE_ABS
101 // R_ARM_REL32
102 // R_ARM_THM_CALL
103 // R_ARM_COPY
104 // R_ARM_GLOB_DAT
105 // R_ARM_BASE_PREL
106 // R_ARM_JUMP_SLOT
107 // R_ARM_RELATIVE
108 // R_ARM_GOTOFF32
109 // R_ARM_GOT_BREL
110 // R_ARM_GOT_PREL
111 // R_ARM_PLT32
112 // R_ARM_CALL
113 // R_ARM_JUMP24
114 // R_ARM_TARGET1
115 // R_ARM_PREL31
116 // R_ARM_ABS8
117 // R_ARM_MOVW_ABS_NC
118 // R_ARM_MOVT_ABS
119 // R_ARM_THM_MOVW_ABS_NC
120 // R_ARM_THM_MOVT_ABS
121 // R_ARM_MOVW_PREL_NC
122 // R_ARM_MOVT_PREL
123 // R_ARM_THM_MOVW_PREL_NC
124 // R_ARM_THM_MOVT_PREL
125 // R_ARM_V4BX
126 // R_ARM_THM_JUMP6
127 // R_ARM_THM_JUMP8
128 // R_ARM_THM_JUMP11
129 //
130 // TODOs:
131 // - Support more relocation types as needed.
132 // - Make PLTs more flexible for different architecture features like
133 // Thumb-2 and BE8.
134 // There are probably a lot more.
135
136 // Instruction template class. This class is similar to the insn_sequence
137 // struct in bfd/elf32-arm.c.
138
139 class Insn_template
140 {
141 public:
142 // Types of instruction templates.
143 enum Type
144 {
145 THUMB16_TYPE = 1,
146 // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
147 // templates with class-specific semantics. Currently this is used
148 // only by the Cortex_a8_stub class for handling condition codes in
149 // conditional branches.
150 THUMB16_SPECIAL_TYPE,
151 THUMB32_TYPE,
152 ARM_TYPE,
153 DATA_TYPE
154 };
155
156 // Factory methods to create instruction templates in different formats.
157
158 static const Insn_template
159 thumb16_insn(uint32_t data)
160 { return Insn_template(data, THUMB16_TYPE, elfcpp::R_ARM_NONE, 0); }
161
162 // A Thumb conditional branch, in which the proper condition is inserted
163 // when we build the stub.
164 static const Insn_template
165 thumb16_bcond_insn(uint32_t data)
166 { return Insn_template(data, THUMB16_SPECIAL_TYPE, elfcpp::R_ARM_NONE, 1); }
167
168 static const Insn_template
169 thumb32_insn(uint32_t data)
170 { return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_NONE, 0); }
171
172 static const Insn_template
173 thumb32_b_insn(uint32_t data, int reloc_addend)
174 {
175 return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_THM_JUMP24,
176 reloc_addend);
177 }
178
179 static const Insn_template
180 arm_insn(uint32_t data)
181 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_NONE, 0); }
182
183 static const Insn_template
184 arm_rel_insn(unsigned data, int reloc_addend)
185 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_JUMP24, reloc_addend); }
186
187 static const Insn_template
188 data_word(unsigned data, unsigned int r_type, int reloc_addend)
189 { return Insn_template(data, DATA_TYPE, r_type, reloc_addend); }
190
191 // Accessors. This class is used for read-only objects so no modifiers
192 // are provided.
193
194 uint32_t
195 data() const
196 { return this->data_; }
197
198 // Return the instruction sequence type of this.
199 Type
200 type() const
201 { return this->type_; }
202
203 // Return the ARM relocation type of this.
204 unsigned int
205 r_type() const
206 { return this->r_type_; }
207
208 int32_t
209 reloc_addend() const
210 { return this->reloc_addend_; }
211
212 // Return size of instruction template in bytes.
213 size_t
214 size() const;
215
216 // Return byte-alignment of instruction template.
217 unsigned
218 alignment() const;
219
220 private:
221 // We make the constructor private to ensure that only the factory
222 // methods are used.
223 inline
224 Insn_template(unsigned data, Type type, unsigned int r_type, int reloc_addend)
225 : data_(data), type_(type), r_type_(r_type), reloc_addend_(reloc_addend)
226 { }
227
228 // Instruction specific data. This is used to store information like
229 // some of the instruction bits.
230 uint32_t data_;
231 // Instruction template type.
232 Type type_;
233 // Relocation type if there is a relocation or R_ARM_NONE otherwise.
234 unsigned int r_type_;
235 // Relocation addend.
236 int32_t reloc_addend_;
237 };
238
239 // Macro for generating code to stub types. One entry per long/short
240 // branch stub
241
242 #define DEF_STUBS \
243 DEF_STUB(long_branch_any_any) \
244 DEF_STUB(long_branch_v4t_arm_thumb) \
245 DEF_STUB(long_branch_thumb_only) \
246 DEF_STUB(long_branch_v4t_thumb_thumb) \
247 DEF_STUB(long_branch_v4t_thumb_arm) \
248 DEF_STUB(short_branch_v4t_thumb_arm) \
249 DEF_STUB(long_branch_any_arm_pic) \
250 DEF_STUB(long_branch_any_thumb_pic) \
251 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
252 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
253 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
254 DEF_STUB(long_branch_thumb_only_pic) \
255 DEF_STUB(a8_veneer_b_cond) \
256 DEF_STUB(a8_veneer_b) \
257 DEF_STUB(a8_veneer_bl) \
258 DEF_STUB(a8_veneer_blx) \
259 DEF_STUB(v4_veneer_bx)
260
261 // Stub types.
262
263 #define DEF_STUB(x) arm_stub_##x,
264 typedef enum
265 {
266 arm_stub_none,
267 DEF_STUBS
268
269 // First reloc stub type.
270 arm_stub_reloc_first = arm_stub_long_branch_any_any,
271 // Last reloc stub type.
272 arm_stub_reloc_last = arm_stub_long_branch_thumb_only_pic,
273
274 // First Cortex-A8 stub type.
275 arm_stub_cortex_a8_first = arm_stub_a8_veneer_b_cond,
276 // Last Cortex-A8 stub type.
277 arm_stub_cortex_a8_last = arm_stub_a8_veneer_blx,
278
279 // Last stub type.
280 arm_stub_type_last = arm_stub_v4_veneer_bx
281 } Stub_type;
282 #undef DEF_STUB
283
284 // Stub template class. Templates are meant to be read-only objects.
285 // A stub template for a stub type contains all read-only attributes
286 // common to all stubs of the same type.
287
288 class Stub_template
289 {
290 public:
291 Stub_template(Stub_type, const Insn_template*, size_t);
292
293 ~Stub_template()
294 { }
295
296 // Return stub type.
297 Stub_type
298 type() const
299 { return this->type_; }
300
301 // Return an array of instruction templates.
302 const Insn_template*
303 insns() const
304 { return this->insns_; }
305
306 // Return size of template in number of instructions.
307 size_t
308 insn_count() const
309 { return this->insn_count_; }
310
311 // Return size of template in bytes.
312 size_t
313 size() const
314 { return this->size_; }
315
316 // Return alignment of the stub template.
317 unsigned
318 alignment() const
319 { return this->alignment_; }
320
321 // Return whether entry point is in thumb mode.
322 bool
323 entry_in_thumb_mode() const
324 { return this->entry_in_thumb_mode_; }
325
326 // Return number of relocations in this template.
327 size_t
328 reloc_count() const
329 { return this->relocs_.size(); }
330
331 // Return index of the I-th instruction with relocation.
332 size_t
333 reloc_insn_index(size_t i) const
334 {
335 gold_assert(i < this->relocs_.size());
336 return this->relocs_[i].first;
337 }
338
339 // Return the offset of the I-th instruction with relocation from the
340 // beginning of the stub.
341 section_size_type
342 reloc_offset(size_t i) const
343 {
344 gold_assert(i < this->relocs_.size());
345 return this->relocs_[i].second;
346 }
347
348 private:
349 // This contains information about an instruction template with a relocation
350 // and its offset from start of stub.
351 typedef std::pair<size_t, section_size_type> Reloc;
352
353 // A Stub_template may not be copied. We want to share templates as much
354 // as possible.
355 Stub_template(const Stub_template&);
356 Stub_template& operator=(const Stub_template&);
357
358 // Stub type.
359 Stub_type type_;
360 // Points to an array of Insn_templates.
361 const Insn_template* insns_;
362 // Number of Insn_templates in insns_[].
363 size_t insn_count_;
364 // Size of templated instructions in bytes.
365 size_t size_;
366 // Alignment of templated instructions.
367 unsigned alignment_;
368 // Flag to indicate if entry is in thumb mode.
369 bool entry_in_thumb_mode_;
370 // A table of reloc instruction indices and offsets. We can find these by
371 // looking at the instruction templates but we pre-compute and then stash
372 // them here for speed.
373 std::vector<Reloc> relocs_;
374 };
375
376 //
377 // A class for code stubs. This is a base class for different type of
378 // stubs used in the ARM target.
379 //
380
381 class Stub
382 {
383 private:
384 static const section_offset_type invalid_offset =
385 static_cast<section_offset_type>(-1);
386
387 public:
388 Stub(const Stub_template* stub_template)
389 : stub_template_(stub_template), offset_(invalid_offset)
390 { }
391
392 virtual
393 ~Stub()
394 { }
395
396 // Return the stub template.
397 const Stub_template*
398 stub_template() const
399 { return this->stub_template_; }
400
401 // Return offset of code stub from beginning of its containing stub table.
402 section_offset_type
403 offset() const
404 {
405 gold_assert(this->offset_ != invalid_offset);
406 return this->offset_;
407 }
408
409 // Set offset of code stub from beginning of its containing stub table.
410 void
411 set_offset(section_offset_type offset)
412 { this->offset_ = offset; }
413
414 // Return the relocation target address of the i-th relocation in the
415 // stub. This must be defined in a child class.
416 Arm_address
417 reloc_target(size_t i)
418 { return this->do_reloc_target(i); }
419
420 // Write a stub at output VIEW. BIG_ENDIAN select how a stub is written.
421 void
422 write(unsigned char* view, section_size_type view_size, bool big_endian)
423 { this->do_write(view, view_size, big_endian); }
424
425 // Return the instruction for THUMB16_SPECIAL_TYPE instruction template
426 // for the i-th instruction.
427 uint16_t
428 thumb16_special(size_t i)
429 { return this->do_thumb16_special(i); }
430
431 protected:
432 // This must be defined in the child class.
433 virtual Arm_address
434 do_reloc_target(size_t) = 0;
435
436 // This may be overridden in the child class.
437 virtual void
438 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
439 {
440 if (big_endian)
441 this->do_fixed_endian_write<true>(view, view_size);
442 else
443 this->do_fixed_endian_write<false>(view, view_size);
444 }
445
446 // This must be overridden if a child class uses the THUMB16_SPECIAL_TYPE
447 // instruction template.
448 virtual uint16_t
449 do_thumb16_special(size_t)
450 { gold_unreachable(); }
451
452 private:
453 // A template to implement do_write.
454 template<bool big_endian>
455 void inline
456 do_fixed_endian_write(unsigned char*, section_size_type);
457
458 // Its template.
459 const Stub_template* stub_template_;
460 // Offset within the section of containing this stub.
461 section_offset_type offset_;
462 };
463
464 // Reloc stub class. These are stubs we use to fix up relocation because
465 // of limited branch ranges.
466
467 class Reloc_stub : public Stub
468 {
469 public:
470 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
471 // We assume we never jump to this address.
472 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
473
474 // Return destination address.
475 Arm_address
476 destination_address() const
477 {
478 gold_assert(this->destination_address_ != this->invalid_address);
479 return this->destination_address_;
480 }
481
482 // Set destination address.
483 void
484 set_destination_address(Arm_address address)
485 {
486 gold_assert(address != this->invalid_address);
487 this->destination_address_ = address;
488 }
489
490 // Reset destination address.
491 void
492 reset_destination_address()
493 { this->destination_address_ = this->invalid_address; }
494
495 // Determine stub type for a branch of a relocation of R_TYPE going
496 // from BRANCH_ADDRESS to BRANCH_TARGET. If TARGET_IS_THUMB is set,
497 // the branch target is a thumb instruction. TARGET is used for look
498 // up ARM-specific linker settings.
499 static Stub_type
500 stub_type_for_reloc(unsigned int r_type, Arm_address branch_address,
501 Arm_address branch_target, bool target_is_thumb);
502
503 // Reloc_stub key. A key is logically a triplet of a stub type, a symbol
504 // and an addend. Since we treat global and local symbol differently, we
505 // use a Symbol object for a global symbol and a object-index pair for
506 // a local symbol.
507 class Key
508 {
509 public:
510 // If SYMBOL is not null, this is a global symbol, we ignore RELOBJ and
511 // R_SYM. Otherwise, this is a local symbol and RELOBJ must non-NULL
512 // and R_SYM must not be invalid_index.
513 Key(Stub_type stub_type, const Symbol* symbol, const Relobj* relobj,
514 unsigned int r_sym, int32_t addend)
515 : stub_type_(stub_type), addend_(addend)
516 {
517 if (symbol != NULL)
518 {
519 this->r_sym_ = Reloc_stub::invalid_index;
520 this->u_.symbol = symbol;
521 }
522 else
523 {
524 gold_assert(relobj != NULL && r_sym != invalid_index);
525 this->r_sym_ = r_sym;
526 this->u_.relobj = relobj;
527 }
528 }
529
530 ~Key()
531 { }
532
533 // Accessors: Keys are meant to be read-only object so no modifiers are
534 // provided.
535
536 // Return stub type.
537 Stub_type
538 stub_type() const
539 { return this->stub_type_; }
540
541 // Return the local symbol index or invalid_index.
542 unsigned int
543 r_sym() const
544 { return this->r_sym_; }
545
546 // Return the symbol if there is one.
547 const Symbol*
548 symbol() const
549 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
550
551 // Return the relobj if there is one.
552 const Relobj*
553 relobj() const
554 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
555
556 // Whether this equals to another key k.
557 bool
558 eq(const Key& k) const
559 {
560 return ((this->stub_type_ == k.stub_type_)
561 && (this->r_sym_ == k.r_sym_)
562 && ((this->r_sym_ != Reloc_stub::invalid_index)
563 ? (this->u_.relobj == k.u_.relobj)
564 : (this->u_.symbol == k.u_.symbol))
565 && (this->addend_ == k.addend_));
566 }
567
568 // Return a hash value.
569 size_t
570 hash_value() const
571 {
572 return (this->stub_type_
573 ^ this->r_sym_
574 ^ gold::string_hash<char>(
575 (this->r_sym_ != Reloc_stub::invalid_index)
576 ? this->u_.relobj->name().c_str()
577 : this->u_.symbol->name())
578 ^ this->addend_);
579 }
580
581 // Functors for STL associative containers.
582 struct hash
583 {
584 size_t
585 operator()(const Key& k) const
586 { return k.hash_value(); }
587 };
588
589 struct equal_to
590 {
591 bool
592 operator()(const Key& k1, const Key& k2) const
593 { return k1.eq(k2); }
594 };
595
596 // Name of key. This is mainly for debugging.
597 std::string
598 name() const;
599
600 private:
601 // Stub type.
602 Stub_type stub_type_;
603 // If this is a local symbol, this is the index in the defining object.
604 // Otherwise, it is invalid_index for a global symbol.
605 unsigned int r_sym_;
606 // If r_sym_ is invalid index. This points to a global symbol.
607 // Otherwise, this points a relobj. We used the unsized and target
608 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
609 // Arm_relobj. This is done to avoid making the stub class a template
610 // as most of the stub machinery is endianity-neutral. However, it
611 // may require a bit of casting done by users of this class.
612 union
613 {
614 const Symbol* symbol;
615 const Relobj* relobj;
616 } u_;
617 // Addend associated with a reloc.
618 int32_t addend_;
619 };
620
621 protected:
622 // Reloc_stubs are created via a stub factory. So these are protected.
623 Reloc_stub(const Stub_template* stub_template)
624 : Stub(stub_template), destination_address_(invalid_address)
625 { }
626
627 ~Reloc_stub()
628 { }
629
630 friend class Stub_factory;
631
632 // Return the relocation target address of the i-th relocation in the
633 // stub.
634 Arm_address
635 do_reloc_target(size_t i)
636 {
637 // All reloc stub have only one relocation.
638 gold_assert(i == 0);
639 return this->destination_address_;
640 }
641
642 private:
643 // Address of destination.
644 Arm_address destination_address_;
645 };
646
647 // Cortex-A8 stub class. We need a Cortex-A8 stub to redirect any 32-bit
648 // THUMB branch that meets the following conditions:
649 //
650 // 1. The branch straddles across a page boundary. i.e. lower 12-bit of
651 // branch address is 0xffe.
652 // 2. The branch target address is in the same page as the first word of the
653 // branch.
654 // 3. The branch follows a 32-bit instruction which is not a branch.
655 //
656 // To do the fix up, we need to store the address of the branch instruction
657 // and its target at least. We also need to store the original branch
658 // instruction bits for the condition code in a conditional branch. The
659 // condition code is used in a special instruction template. We also want
660 // to identify input sections needing Cortex-A8 workaround quickly. We store
661 // extra information about object and section index of the code section
662 // containing a branch being fixed up. The information is used to mark
663 // the code section when we finalize the Cortex-A8 stubs.
664 //
665
666 class Cortex_a8_stub : public Stub
667 {
668 public:
669 ~Cortex_a8_stub()
670 { }
671
672 // Return the object of the code section containing the branch being fixed
673 // up.
674 Relobj*
675 relobj() const
676 { return this->relobj_; }
677
678 // Return the section index of the code section containing the branch being
679 // fixed up.
680 unsigned int
681 shndx() const
682 { return this->shndx_; }
683
684 // Return the source address of stub. This is the address of the original
685 // branch instruction. LSB is 1 always set to indicate that it is a THUMB
686 // instruction.
687 Arm_address
688 source_address() const
689 { return this->source_address_; }
690
691 // Return the destination address of the stub. This is the branch taken
692 // address of the original branch instruction. LSB is 1 if it is a THUMB
693 // instruction address.
694 Arm_address
695 destination_address() const
696 { return this->destination_address_; }
697
698 // Return the instruction being fixed up.
699 uint32_t
700 original_insn() const
701 { return this->original_insn_; }
702
703 protected:
704 // Cortex_a8_stubs are created via a stub factory. So these are protected.
705 Cortex_a8_stub(const Stub_template* stub_template, Relobj* relobj,
706 unsigned int shndx, Arm_address source_address,
707 Arm_address destination_address, uint32_t original_insn)
708 : Stub(stub_template), relobj_(relobj), shndx_(shndx),
709 source_address_(source_address | 1U),
710 destination_address_(destination_address),
711 original_insn_(original_insn)
712 { }
713
714 friend class Stub_factory;
715
716 // Return the relocation target address of the i-th relocation in the
717 // stub.
718 Arm_address
719 do_reloc_target(size_t i)
720 {
721 if (this->stub_template()->type() == arm_stub_a8_veneer_b_cond)
722 {
723 // The conditional branch veneer has two relocations.
724 gold_assert(i < 2);
725 return i == 0 ? this->source_address_ + 4 : this->destination_address_;
726 }
727 else
728 {
729 // All other Cortex-A8 stubs have only one relocation.
730 gold_assert(i == 0);
731 return this->destination_address_;
732 }
733 }
734
735 // Return an instruction for the THUMB16_SPECIAL_TYPE instruction template.
736 uint16_t
737 do_thumb16_special(size_t);
738
739 private:
740 // Object of the code section containing the branch being fixed up.
741 Relobj* relobj_;
742 // Section index of the code section containing the branch begin fixed up.
743 unsigned int shndx_;
744 // Source address of original branch.
745 Arm_address source_address_;
746 // Destination address of the original branch.
747 Arm_address destination_address_;
748 // Original branch instruction. This is needed for copying the condition
749 // code from a condition branch to its stub.
750 uint32_t original_insn_;
751 };
752
753 // ARMv4 BX Rx branch relocation stub class.
754 class Arm_v4bx_stub : public Stub
755 {
756 public:
757 ~Arm_v4bx_stub()
758 { }
759
760 // Return the associated register.
761 uint32_t
762 reg() const
763 { return this->reg_; }
764
765 protected:
766 // Arm V4BX stubs are created via a stub factory. So these are protected.
767 Arm_v4bx_stub(const Stub_template* stub_template, const uint32_t reg)
768 : Stub(stub_template), reg_(reg)
769 { }
770
771 friend class Stub_factory;
772
773 // Return the relocation target address of the i-th relocation in the
774 // stub.
775 Arm_address
776 do_reloc_target(size_t)
777 { gold_unreachable(); }
778
779 // This may be overridden in the child class.
780 virtual void
781 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
782 {
783 if (big_endian)
784 this->do_fixed_endian_v4bx_write<true>(view, view_size);
785 else
786 this->do_fixed_endian_v4bx_write<false>(view, view_size);
787 }
788
789 private:
790 // A template to implement do_write.
791 template<bool big_endian>
792 void inline
793 do_fixed_endian_v4bx_write(unsigned char* view, section_size_type)
794 {
795 const Insn_template* insns = this->stub_template()->insns();
796 elfcpp::Swap<32, big_endian>::writeval(view,
797 (insns[0].data()
798 + (this->reg_ << 16)));
799 view += insns[0].size();
800 elfcpp::Swap<32, big_endian>::writeval(view,
801 (insns[1].data() + this->reg_));
802 view += insns[1].size();
803 elfcpp::Swap<32, big_endian>::writeval(view,
804 (insns[2].data() + this->reg_));
805 }
806
807 // A register index (r0-r14), which is associated with the stub.
808 uint32_t reg_;
809 };
810
811 // Stub factory class.
812
813 class Stub_factory
814 {
815 public:
816 // Return the unique instance of this class.
817 static const Stub_factory&
818 get_instance()
819 {
820 static Stub_factory singleton;
821 return singleton;
822 }
823
824 // Make a relocation stub.
825 Reloc_stub*
826 make_reloc_stub(Stub_type stub_type) const
827 {
828 gold_assert(stub_type >= arm_stub_reloc_first
829 && stub_type <= arm_stub_reloc_last);
830 return new Reloc_stub(this->stub_templates_[stub_type]);
831 }
832
833 // Make a Cortex-A8 stub.
834 Cortex_a8_stub*
835 make_cortex_a8_stub(Stub_type stub_type, Relobj* relobj, unsigned int shndx,
836 Arm_address source, Arm_address destination,
837 uint32_t original_insn) const
838 {
839 gold_assert(stub_type >= arm_stub_cortex_a8_first
840 && stub_type <= arm_stub_cortex_a8_last);
841 return new Cortex_a8_stub(this->stub_templates_[stub_type], relobj, shndx,
842 source, destination, original_insn);
843 }
844
845 // Make an ARM V4BX relocation stub.
846 // This method creates a stub from the arm_stub_v4_veneer_bx template only.
847 Arm_v4bx_stub*
848 make_arm_v4bx_stub(uint32_t reg) const
849 {
850 gold_assert(reg < 0xf);
851 return new Arm_v4bx_stub(this->stub_templates_[arm_stub_v4_veneer_bx],
852 reg);
853 }
854
855 private:
856 // Constructor and destructor are protected since we only return a single
857 // instance created in Stub_factory::get_instance().
858
859 Stub_factory();
860
861 // A Stub_factory may not be copied since it is a singleton.
862 Stub_factory(const Stub_factory&);
863 Stub_factory& operator=(Stub_factory&);
864
865 // Stub templates. These are initialized in the constructor.
866 const Stub_template* stub_templates_[arm_stub_type_last+1];
867 };
868
869 // A class to hold stubs for the ARM target.
870
871 template<bool big_endian>
872 class Stub_table : public Output_data
873 {
874 public:
875 Stub_table(Arm_input_section<big_endian>* owner)
876 : Output_data(), owner_(owner), reloc_stubs_(), cortex_a8_stubs_(),
877 arm_v4bx_stubs_(0xf), prev_data_size_(0), prev_addralign_(1)
878 { }
879
880 ~Stub_table()
881 { }
882
883 // Owner of this stub table.
884 Arm_input_section<big_endian>*
885 owner() const
886 { return this->owner_; }
887
888 // Whether this stub table is empty.
889 bool
890 empty() const
891 {
892 return (this->reloc_stubs_.empty()
893 && this->cortex_a8_stubs_.empty()
894 && this->arm_v4bx_stubs_.empty());
895 }
896
897 // Return the current data size.
898 off_t
899 current_data_size() const
900 { return this->current_data_size_for_child(); }
901
902 // Add a STUB with using KEY. Caller is reponsible for avoid adding
903 // if already a STUB with the same key has been added.
904 void
905 add_reloc_stub(Reloc_stub* stub, const Reloc_stub::Key& key)
906 {
907 const Stub_template* stub_template = stub->stub_template();
908 gold_assert(stub_template->type() == key.stub_type());
909 this->reloc_stubs_[key] = stub;
910 }
911
912 // Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
913 // Caller is reponsible for avoid adding if already a STUB with the same
914 // address has been added.
915 void
916 add_cortex_a8_stub(Arm_address address, Cortex_a8_stub* stub)
917 {
918 std::pair<Arm_address, Cortex_a8_stub*> value(address, stub);
919 this->cortex_a8_stubs_.insert(value);
920 }
921
922 // Add an ARM V4BX relocation stub. A register index will be retrieved
923 // from the stub.
924 void
925 add_arm_v4bx_stub(Arm_v4bx_stub* stub)
926 {
927 gold_assert(stub != NULL && this->arm_v4bx_stubs_[stub->reg()] == NULL);
928 this->arm_v4bx_stubs_[stub->reg()] = stub;
929 }
930
931 // Remove all Cortex-A8 stubs.
932 void
933 remove_all_cortex_a8_stubs();
934
935 // Look up a relocation stub using KEY. Return NULL if there is none.
936 Reloc_stub*
937 find_reloc_stub(const Reloc_stub::Key& key) const
938 {
939 typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.find(key);
940 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
941 }
942
943 // Look up an arm v4bx relocation stub using the register index.
944 // Return NULL if there is none.
945 Arm_v4bx_stub*
946 find_arm_v4bx_stub(const uint32_t reg) const
947 {
948 gold_assert(reg < 0xf);
949 return this->arm_v4bx_stubs_[reg];
950 }
951
952 // Relocate stubs in this stub table.
953 void
954 relocate_stubs(const Relocate_info<32, big_endian>*,
955 Target_arm<big_endian>*, Output_section*,
956 unsigned char*, Arm_address, section_size_type);
957
958 // Update data size and alignment at the end of a relaxation pass. Return
959 // true if either data size or alignment is different from that of the
960 // previous relaxation pass.
961 bool
962 update_data_size_and_addralign();
963
964 // Finalize stubs. Set the offsets of all stubs and mark input sections
965 // needing the Cortex-A8 workaround.
966 void
967 finalize_stubs();
968
969 // Apply Cortex-A8 workaround to an address range.
970 void
971 apply_cortex_a8_workaround_to_address_range(Target_arm<big_endian>*,
972 unsigned char*, Arm_address,
973 section_size_type);
974
975 protected:
976 // Write out section contents.
977 void
978 do_write(Output_file*);
979
980 // Return the required alignment.
981 uint64_t
982 do_addralign() const
983 { return this->prev_addralign_; }
984
985 // Reset address and file offset.
986 void
987 do_reset_address_and_file_offset()
988 { this->set_current_data_size_for_child(this->prev_data_size_); }
989
990 // Set final data size.
991 void
992 set_final_data_size()
993 { this->set_data_size(this->current_data_size()); }
994
995 private:
996 // Relocate one stub.
997 void
998 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
999 Target_arm<big_endian>*, Output_section*,
1000 unsigned char*, Arm_address, section_size_type);
1001
1002 // Unordered map of relocation stubs.
1003 typedef
1004 Unordered_map<Reloc_stub::Key, Reloc_stub*, Reloc_stub::Key::hash,
1005 Reloc_stub::Key::equal_to>
1006 Reloc_stub_map;
1007
1008 // List of Cortex-A8 stubs ordered by addresses of branches being
1009 // fixed up in output.
1010 typedef std::map<Arm_address, Cortex_a8_stub*> Cortex_a8_stub_list;
1011 // List of Arm V4BX relocation stubs ordered by associated registers.
1012 typedef std::vector<Arm_v4bx_stub*> Arm_v4bx_stub_list;
1013
1014 // Owner of this stub table.
1015 Arm_input_section<big_endian>* owner_;
1016 // The relocation stubs.
1017 Reloc_stub_map reloc_stubs_;
1018 // The cortex_a8_stubs.
1019 Cortex_a8_stub_list cortex_a8_stubs_;
1020 // The Arm V4BX relocation stubs.
1021 Arm_v4bx_stub_list arm_v4bx_stubs_;
1022 // data size of this in the previous pass.
1023 off_t prev_data_size_;
1024 // address alignment of this in the previous pass.
1025 uint64_t prev_addralign_;
1026 };
1027
1028 // A class to wrap an ordinary input section containing executable code.
1029
1030 template<bool big_endian>
1031 class Arm_input_section : public Output_relaxed_input_section
1032 {
1033 public:
1034 Arm_input_section(Relobj* relobj, unsigned int shndx)
1035 : Output_relaxed_input_section(relobj, shndx, 1),
1036 original_addralign_(1), original_size_(0), stub_table_(NULL)
1037 { }
1038
1039 ~Arm_input_section()
1040 { }
1041
1042 // Initialize.
1043 void
1044 init();
1045
1046 // Whether this is a stub table owner.
1047 bool
1048 is_stub_table_owner() const
1049 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
1050
1051 // Return the stub table.
1052 Stub_table<big_endian>*
1053 stub_table() const
1054 { return this->stub_table_; }
1055
1056 // Set the stub_table.
1057 void
1058 set_stub_table(Stub_table<big_endian>* stub_table)
1059 { this->stub_table_ = stub_table; }
1060
1061 // Downcast a base pointer to an Arm_input_section pointer. This is
1062 // not type-safe but we only use Arm_input_section not the base class.
1063 static Arm_input_section<big_endian>*
1064 as_arm_input_section(Output_relaxed_input_section* poris)
1065 { return static_cast<Arm_input_section<big_endian>*>(poris); }
1066
1067 protected:
1068 // Write data to output file.
1069 void
1070 do_write(Output_file*);
1071
1072 // Return required alignment of this.
1073 uint64_t
1074 do_addralign() const
1075 {
1076 if (this->is_stub_table_owner())
1077 return std::max(this->stub_table_->addralign(),
1078 this->original_addralign_);
1079 else
1080 return this->original_addralign_;
1081 }
1082
1083 // Finalize data size.
1084 void
1085 set_final_data_size();
1086
1087 // Reset address and file offset.
1088 void
1089 do_reset_address_and_file_offset();
1090
1091 // Output offset.
1092 bool
1093 do_output_offset(const Relobj* object, unsigned int shndx,
1094 section_offset_type offset,
1095 section_offset_type* poutput) const
1096 {
1097 if ((object == this->relobj())
1098 && (shndx == this->shndx())
1099 && (offset >= 0)
1100 && (convert_types<uint64_t, section_offset_type>(offset)
1101 <= this->original_size_))
1102 {
1103 *poutput = offset;
1104 return true;
1105 }
1106 else
1107 return false;
1108 }
1109
1110 private:
1111 // Copying is not allowed.
1112 Arm_input_section(const Arm_input_section&);
1113 Arm_input_section& operator=(const Arm_input_section&);
1114
1115 // Address alignment of the original input section.
1116 uint64_t original_addralign_;
1117 // Section size of the original input section.
1118 uint64_t original_size_;
1119 // Stub table.
1120 Stub_table<big_endian>* stub_table_;
1121 };
1122
1123 // Arm output section class. This is defined mainly to add a number of
1124 // stub generation methods.
1125
1126 template<bool big_endian>
1127 class Arm_output_section : public Output_section
1128 {
1129 public:
1130 Arm_output_section(const char* name, elfcpp::Elf_Word type,
1131 elfcpp::Elf_Xword flags)
1132 : Output_section(name, type, flags)
1133 { }
1134
1135 ~Arm_output_section()
1136 { }
1137
1138 // Group input sections for stub generation.
1139 void
1140 group_sections(section_size_type, bool, Target_arm<big_endian>*);
1141
1142 // Downcast a base pointer to an Arm_output_section pointer. This is
1143 // not type-safe but we only use Arm_output_section not the base class.
1144 static Arm_output_section<big_endian>*
1145 as_arm_output_section(Output_section* os)
1146 { return static_cast<Arm_output_section<big_endian>*>(os); }
1147
1148 private:
1149 // For convenience.
1150 typedef Output_section::Input_section Input_section;
1151 typedef Output_section::Input_section_list Input_section_list;
1152
1153 // Create a stub group.
1154 void create_stub_group(Input_section_list::const_iterator,
1155 Input_section_list::const_iterator,
1156 Input_section_list::const_iterator,
1157 Target_arm<big_endian>*,
1158 std::vector<Output_relaxed_input_section*>*);
1159 };
1160
1161 // Arm_relobj class.
1162
1163 template<bool big_endian>
1164 class Arm_relobj : public Sized_relobj<32, big_endian>
1165 {
1166 public:
1167 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
1168
1169 Arm_relobj(const std::string& name, Input_file* input_file, off_t offset,
1170 const typename elfcpp::Ehdr<32, big_endian>& ehdr)
1171 : Sized_relobj<32, big_endian>(name, input_file, offset, ehdr),
1172 stub_tables_(), local_symbol_is_thumb_function_(),
1173 attributes_section_data_(NULL), mapping_symbols_info_(),
1174 section_has_cortex_a8_workaround_(NULL)
1175 { }
1176
1177 ~Arm_relobj()
1178 { delete this->attributes_section_data_; }
1179
1180 // Return the stub table of the SHNDX-th section if there is one.
1181 Stub_table<big_endian>*
1182 stub_table(unsigned int shndx) const
1183 {
1184 gold_assert(shndx < this->stub_tables_.size());
1185 return this->stub_tables_[shndx];
1186 }
1187
1188 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1189 void
1190 set_stub_table(unsigned int shndx, Stub_table<big_endian>* stub_table)
1191 {
1192 gold_assert(shndx < this->stub_tables_.size());
1193 this->stub_tables_[shndx] = stub_table;
1194 }
1195
1196 // Whether a local symbol is a THUMB function. R_SYM is the symbol table
1197 // index. This is only valid after do_count_local_symbol is called.
1198 bool
1199 local_symbol_is_thumb_function(unsigned int r_sym) const
1200 {
1201 gold_assert(r_sym < this->local_symbol_is_thumb_function_.size());
1202 return this->local_symbol_is_thumb_function_[r_sym];
1203 }
1204
1205 // Scan all relocation sections for stub generation.
1206 void
1207 scan_sections_for_stubs(Target_arm<big_endian>*, const Symbol_table*,
1208 const Layout*);
1209
1210 // Convert regular input section with index SHNDX to a relaxed section.
1211 void
1212 convert_input_section_to_relaxed_section(unsigned shndx)
1213 {
1214 // The stubs have relocations and we need to process them after writing
1215 // out the stubs. So relocation now must follow section write.
1216 this->invalidate_section_offset(shndx);
1217 this->set_relocs_must_follow_section_writes();
1218 }
1219
1220 // Downcast a base pointer to an Arm_relobj pointer. This is
1221 // not type-safe but we only use Arm_relobj not the base class.
1222 static Arm_relobj<big_endian>*
1223 as_arm_relobj(Relobj* relobj)
1224 { return static_cast<Arm_relobj<big_endian>*>(relobj); }
1225
1226 // Processor-specific flags in ELF file header. This is valid only after
1227 // reading symbols.
1228 elfcpp::Elf_Word
1229 processor_specific_flags() const
1230 { return this->processor_specific_flags_; }
1231
1232 // Attribute section data This is the contents of the .ARM.attribute section
1233 // if there is one.
1234 const Attributes_section_data*
1235 attributes_section_data() const
1236 { return this->attributes_section_data_; }
1237
1238 // Mapping symbol location.
1239 typedef std::pair<unsigned int, Arm_address> Mapping_symbol_position;
1240
1241 // Functor for STL container.
1242 struct Mapping_symbol_position_less
1243 {
1244 bool
1245 operator()(const Mapping_symbol_position& p1,
1246 const Mapping_symbol_position& p2) const
1247 {
1248 return (p1.first < p2.first
1249 || (p1.first == p2.first && p1.second < p2.second));
1250 }
1251 };
1252
1253 // We only care about the first character of a mapping symbol, so
1254 // we only store that instead of the whole symbol name.
1255 typedef std::map<Mapping_symbol_position, char,
1256 Mapping_symbol_position_less> Mapping_symbols_info;
1257
1258 // Whether a section contains any Cortex-A8 workaround.
1259 bool
1260 section_has_cortex_a8_workaround(unsigned int shndx) const
1261 {
1262 return (this->section_has_cortex_a8_workaround_ != NULL
1263 && (*this->section_has_cortex_a8_workaround_)[shndx]);
1264 }
1265
1266 // Mark a section that has Cortex-A8 workaround.
1267 void
1268 mark_section_for_cortex_a8_workaround(unsigned int shndx)
1269 {
1270 if (this->section_has_cortex_a8_workaround_ == NULL)
1271 this->section_has_cortex_a8_workaround_ =
1272 new std::vector<bool>(this->shnum(), false);
1273 (*this->section_has_cortex_a8_workaround_)[shndx] = true;
1274 }
1275
1276 protected:
1277 // Post constructor setup.
1278 void
1279 do_setup()
1280 {
1281 // Call parent's setup method.
1282 Sized_relobj<32, big_endian>::do_setup();
1283
1284 // Initialize look-up tables.
1285 Stub_table_list empty_stub_table_list(this->shnum(), NULL);
1286 this->stub_tables_.swap(empty_stub_table_list);
1287 }
1288
1289 // Count the local symbols.
1290 void
1291 do_count_local_symbols(Stringpool_template<char>*,
1292 Stringpool_template<char>*);
1293
1294 void
1295 do_relocate_sections(const Symbol_table* symtab, const Layout* layout,
1296 const unsigned char* pshdrs,
1297 typename Sized_relobj<32, big_endian>::Views* pivews);
1298
1299 // Read the symbol information.
1300 void
1301 do_read_symbols(Read_symbols_data* sd);
1302
1303 // Process relocs for garbage collection.
1304 void
1305 do_gc_process_relocs(Symbol_table*, Layout*, Read_relocs_data*);
1306
1307 private:
1308
1309 // Whether a section needs to be scanned for relocation stubs.
1310 bool
1311 section_needs_reloc_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1312 const Relobj::Output_sections&,
1313 const Symbol_table *);
1314
1315 // Whether a section needs to be scanned for the Cortex-A8 erratum.
1316 bool
1317 section_needs_cortex_a8_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1318 unsigned int, Output_section*,
1319 const Symbol_table *);
1320
1321 // Scan a section for the Cortex-A8 erratum.
1322 void
1323 scan_section_for_cortex_a8_erratum(const elfcpp::Shdr<32, big_endian>&,
1324 unsigned int, Output_section*,
1325 Target_arm<big_endian>*);
1326
1327 // List of stub tables.
1328 typedef std::vector<Stub_table<big_endian>*> Stub_table_list;
1329 Stub_table_list stub_tables_;
1330 // Bit vector to tell if a local symbol is a thumb function or not.
1331 // This is only valid after do_count_local_symbol is called.
1332 std::vector<bool> local_symbol_is_thumb_function_;
1333 // processor-specific flags in ELF file header.
1334 elfcpp::Elf_Word processor_specific_flags_;
1335 // Object attributes if there is an .ARM.attributes section or NULL.
1336 Attributes_section_data* attributes_section_data_;
1337 // Mapping symbols information.
1338 Mapping_symbols_info mapping_symbols_info_;
1339 // Bitmap to indicate sections with Cortex-A8 workaround or NULL.
1340 std::vector<bool>* section_has_cortex_a8_workaround_;
1341 };
1342
1343 // Arm_dynobj class.
1344
1345 template<bool big_endian>
1346 class Arm_dynobj : public Sized_dynobj<32, big_endian>
1347 {
1348 public:
1349 Arm_dynobj(const std::string& name, Input_file* input_file, off_t offset,
1350 const elfcpp::Ehdr<32, big_endian>& ehdr)
1351 : Sized_dynobj<32, big_endian>(name, input_file, offset, ehdr),
1352 processor_specific_flags_(0), attributes_section_data_(NULL)
1353 { }
1354
1355 ~Arm_dynobj()
1356 { delete this->attributes_section_data_; }
1357
1358 // Downcast a base pointer to an Arm_relobj pointer. This is
1359 // not type-safe but we only use Arm_relobj not the base class.
1360 static Arm_dynobj<big_endian>*
1361 as_arm_dynobj(Dynobj* dynobj)
1362 { return static_cast<Arm_dynobj<big_endian>*>(dynobj); }
1363
1364 // Processor-specific flags in ELF file header. This is valid only after
1365 // reading symbols.
1366 elfcpp::Elf_Word
1367 processor_specific_flags() const
1368 { return this->processor_specific_flags_; }
1369
1370 // Attributes section data.
1371 const Attributes_section_data*
1372 attributes_section_data() const
1373 { return this->attributes_section_data_; }
1374
1375 protected:
1376 // Read the symbol information.
1377 void
1378 do_read_symbols(Read_symbols_data* sd);
1379
1380 private:
1381 // processor-specific flags in ELF file header.
1382 elfcpp::Elf_Word processor_specific_flags_;
1383 // Object attributes if there is an .ARM.attributes section or NULL.
1384 Attributes_section_data* attributes_section_data_;
1385 };
1386
1387 // Functor to read reloc addends during stub generation.
1388
1389 template<int sh_type, bool big_endian>
1390 struct Stub_addend_reader
1391 {
1392 // Return the addend for a relocation of a particular type. Depending
1393 // on whether this is a REL or RELA relocation, read the addend from a
1394 // view or from a Reloc object.
1395 elfcpp::Elf_types<32>::Elf_Swxword
1396 operator()(
1397 unsigned int /* r_type */,
1398 const unsigned char* /* view */,
1399 const typename Reloc_types<sh_type,
1400 32, big_endian>::Reloc& /* reloc */) const;
1401 };
1402
1403 // Specialized Stub_addend_reader for SHT_REL type relocation sections.
1404
1405 template<bool big_endian>
1406 struct Stub_addend_reader<elfcpp::SHT_REL, big_endian>
1407 {
1408 elfcpp::Elf_types<32>::Elf_Swxword
1409 operator()(
1410 unsigned int,
1411 const unsigned char*,
1412 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const;
1413 };
1414
1415 // Specialized Stub_addend_reader for RELA type relocation sections.
1416 // We currently do not handle RELA type relocation sections but it is trivial
1417 // to implement the addend reader. This is provided for completeness and to
1418 // make it easier to add support for RELA relocation sections in the future.
1419
1420 template<bool big_endian>
1421 struct Stub_addend_reader<elfcpp::SHT_RELA, big_endian>
1422 {
1423 elfcpp::Elf_types<32>::Elf_Swxword
1424 operator()(
1425 unsigned int,
1426 const unsigned char*,
1427 const typename Reloc_types<elfcpp::SHT_RELA, 32,
1428 big_endian>::Reloc& reloc) const
1429 { return reloc.get_r_addend(); }
1430 };
1431
1432 // Cortex_a8_reloc class. We keep record of relocation that may need
1433 // the Cortex-A8 erratum workaround.
1434
1435 class Cortex_a8_reloc
1436 {
1437 public:
1438 Cortex_a8_reloc(Reloc_stub* reloc_stub, unsigned r_type,
1439 Arm_address destination)
1440 : reloc_stub_(reloc_stub), r_type_(r_type), destination_(destination)
1441 { }
1442
1443 ~Cortex_a8_reloc()
1444 { }
1445
1446 // Accessors: This is a read-only class.
1447
1448 // Return the relocation stub associated with this relocation if there is
1449 // one.
1450 const Reloc_stub*
1451 reloc_stub() const
1452 { return this->reloc_stub_; }
1453
1454 // Return the relocation type.
1455 unsigned int
1456 r_type() const
1457 { return this->r_type_; }
1458
1459 // Return the destination address of the relocation. LSB stores the THUMB
1460 // bit.
1461 Arm_address
1462 destination() const
1463 { return this->destination_; }
1464
1465 private:
1466 // Associated relocation stub if there is one, or NULL.
1467 const Reloc_stub* reloc_stub_;
1468 // Relocation type.
1469 unsigned int r_type_;
1470 // Destination address of this relocation. LSB is used to distinguish
1471 // ARM/THUMB mode.
1472 Arm_address destination_;
1473 };
1474
1475 // Utilities for manipulating integers of up to 32-bits
1476
1477 namespace utils
1478 {
1479 // Sign extend an n-bit unsigned integer stored in an uint32_t into
1480 // an int32_t. NO_BITS must be between 1 to 32.
1481 template<int no_bits>
1482 static inline int32_t
1483 sign_extend(uint32_t bits)
1484 {
1485 gold_assert(no_bits >= 0 && no_bits <= 32);
1486 if (no_bits == 32)
1487 return static_cast<int32_t>(bits);
1488 uint32_t mask = (~((uint32_t) 0)) >> (32 - no_bits);
1489 bits &= mask;
1490 uint32_t top_bit = 1U << (no_bits - 1);
1491 int32_t as_signed = static_cast<int32_t>(bits);
1492 return (bits & top_bit) ? as_signed + (-top_bit * 2) : as_signed;
1493 }
1494
1495 // Detects overflow of an NO_BITS integer stored in a uint32_t.
1496 template<int no_bits>
1497 static inline bool
1498 has_overflow(uint32_t bits)
1499 {
1500 gold_assert(no_bits >= 0 && no_bits <= 32);
1501 if (no_bits == 32)
1502 return false;
1503 int32_t max = (1 << (no_bits - 1)) - 1;
1504 int32_t min = -(1 << (no_bits - 1));
1505 int32_t as_signed = static_cast<int32_t>(bits);
1506 return as_signed > max || as_signed < min;
1507 }
1508
1509 // Detects overflow of an NO_BITS integer stored in a uint32_t when it
1510 // fits in the given number of bits as either a signed or unsigned value.
1511 // For example, has_signed_unsigned_overflow<8> would check
1512 // -128 <= bits <= 255
1513 template<int no_bits>
1514 static inline bool
1515 has_signed_unsigned_overflow(uint32_t bits)
1516 {
1517 gold_assert(no_bits >= 2 && no_bits <= 32);
1518 if (no_bits == 32)
1519 return false;
1520 int32_t max = static_cast<int32_t>((1U << no_bits) - 1);
1521 int32_t min = -(1 << (no_bits - 1));
1522 int32_t as_signed = static_cast<int32_t>(bits);
1523 return as_signed > max || as_signed < min;
1524 }
1525
1526 // Select bits from A and B using bits in MASK. For each n in [0..31],
1527 // the n-th bit in the result is chosen from the n-th bits of A and B.
1528 // A zero selects A and a one selects B.
1529 static inline uint32_t
1530 bit_select(uint32_t a, uint32_t b, uint32_t mask)
1531 { return (a & ~mask) | (b & mask); }
1532 };
1533
1534 template<bool big_endian>
1535 class Target_arm : public Sized_target<32, big_endian>
1536 {
1537 public:
1538 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
1539 Reloc_section;
1540
1541 // When were are relocating a stub, we pass this as the relocation number.
1542 static const size_t fake_relnum_for_stubs = static_cast<size_t>(-1);
1543
1544 Target_arm()
1545 : Sized_target<32, big_endian>(&arm_info),
1546 got_(NULL), plt_(NULL), got_plt_(NULL), rel_dyn_(NULL),
1547 copy_relocs_(elfcpp::R_ARM_COPY), dynbss_(NULL), stub_tables_(),
1548 stub_factory_(Stub_factory::get_instance()), may_use_blx_(false),
1549 should_force_pic_veneer_(false), arm_input_section_map_(),
1550 attributes_section_data_(NULL), fix_cortex_a8_(false),
1551 cortex_a8_relocs_info_(), fix_v4bx_(0)
1552 { }
1553
1554 // Whether we can use BLX.
1555 bool
1556 may_use_blx() const
1557 { return this->may_use_blx_; }
1558
1559 // Set use-BLX flag.
1560 void
1561 set_may_use_blx(bool value)
1562 { this->may_use_blx_ = value; }
1563
1564 // Whether we force PCI branch veneers.
1565 bool
1566 should_force_pic_veneer() const
1567 { return this->should_force_pic_veneer_; }
1568
1569 // Set PIC veneer flag.
1570 void
1571 set_should_force_pic_veneer(bool value)
1572 { this->should_force_pic_veneer_ = value; }
1573
1574 // Whether we use THUMB-2 instructions.
1575 bool
1576 using_thumb2() const
1577 {
1578 Object_attribute* attr =
1579 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1580 int arch = attr->int_value();
1581 return arch == elfcpp::TAG_CPU_ARCH_V6T2 || arch >= elfcpp::TAG_CPU_ARCH_V7;
1582 }
1583
1584 // Whether we use THUMB/THUMB-2 instructions only.
1585 bool
1586 using_thumb_only() const
1587 {
1588 Object_attribute* attr =
1589 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1590 if (attr->int_value() != elfcpp::TAG_CPU_ARCH_V7
1591 && attr->int_value() != elfcpp::TAG_CPU_ARCH_V7E_M)
1592 return false;
1593 attr = this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
1594 return attr->int_value() == 'M';
1595 }
1596
1597 // Whether we have an NOP instruction. If not, use mov r0, r0 instead.
1598 bool
1599 may_use_arm_nop() const
1600 {
1601 Object_attribute* attr =
1602 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1603 int arch = attr->int_value();
1604 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
1605 || arch == elfcpp::TAG_CPU_ARCH_V6K
1606 || arch == elfcpp::TAG_CPU_ARCH_V7
1607 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
1608 }
1609
1610 // Whether we have THUMB-2 NOP.W instruction.
1611 bool
1612 may_use_thumb2_nop() const
1613 {
1614 Object_attribute* attr =
1615 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1616 int arch = attr->int_value();
1617 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
1618 || arch == elfcpp::TAG_CPU_ARCH_V7
1619 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
1620 }
1621
1622 // Process the relocations to determine unreferenced sections for
1623 // garbage collection.
1624 void
1625 gc_process_relocs(Symbol_table* symtab,
1626 Layout* layout,
1627 Sized_relobj<32, big_endian>* object,
1628 unsigned int data_shndx,
1629 unsigned int sh_type,
1630 const unsigned char* prelocs,
1631 size_t reloc_count,
1632 Output_section* output_section,
1633 bool needs_special_offset_handling,
1634 size_t local_symbol_count,
1635 const unsigned char* plocal_symbols);
1636
1637 // Scan the relocations to look for symbol adjustments.
1638 void
1639 scan_relocs(Symbol_table* symtab,
1640 Layout* layout,
1641 Sized_relobj<32, big_endian>* object,
1642 unsigned int data_shndx,
1643 unsigned int sh_type,
1644 const unsigned char* prelocs,
1645 size_t reloc_count,
1646 Output_section* output_section,
1647 bool needs_special_offset_handling,
1648 size_t local_symbol_count,
1649 const unsigned char* plocal_symbols);
1650
1651 // Finalize the sections.
1652 void
1653 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
1654
1655 // Return the value to use for a dynamic symbol which requires special
1656 // treatment.
1657 uint64_t
1658 do_dynsym_value(const Symbol*) const;
1659
1660 // Relocate a section.
1661 void
1662 relocate_section(const Relocate_info<32, big_endian>*,
1663 unsigned int sh_type,
1664 const unsigned char* prelocs,
1665 size_t reloc_count,
1666 Output_section* output_section,
1667 bool needs_special_offset_handling,
1668 unsigned char* view,
1669 Arm_address view_address,
1670 section_size_type view_size,
1671 const Reloc_symbol_changes*);
1672
1673 // Scan the relocs during a relocatable link.
1674 void
1675 scan_relocatable_relocs(Symbol_table* symtab,
1676 Layout* layout,
1677 Sized_relobj<32, big_endian>* object,
1678 unsigned int data_shndx,
1679 unsigned int sh_type,
1680 const unsigned char* prelocs,
1681 size_t reloc_count,
1682 Output_section* output_section,
1683 bool needs_special_offset_handling,
1684 size_t local_symbol_count,
1685 const unsigned char* plocal_symbols,
1686 Relocatable_relocs*);
1687
1688 // Relocate a section during a relocatable link.
1689 void
1690 relocate_for_relocatable(const Relocate_info<32, big_endian>*,
1691 unsigned int sh_type,
1692 const unsigned char* prelocs,
1693 size_t reloc_count,
1694 Output_section* output_section,
1695 off_t offset_in_output_section,
1696 const Relocatable_relocs*,
1697 unsigned char* view,
1698 Arm_address view_address,
1699 section_size_type view_size,
1700 unsigned char* reloc_view,
1701 section_size_type reloc_view_size);
1702
1703 // Return whether SYM is defined by the ABI.
1704 bool
1705 do_is_defined_by_abi(Symbol* sym) const
1706 { return strcmp(sym->name(), "__tls_get_addr") == 0; }
1707
1708 // Return the size of the GOT section.
1709 section_size_type
1710 got_size()
1711 {
1712 gold_assert(this->got_ != NULL);
1713 return this->got_->data_size();
1714 }
1715
1716 // Map platform-specific reloc types
1717 static unsigned int
1718 get_real_reloc_type (unsigned int r_type);
1719
1720 //
1721 // Methods to support stub-generations.
1722 //
1723
1724 // Return the stub factory
1725 const Stub_factory&
1726 stub_factory() const
1727 { return this->stub_factory_; }
1728
1729 // Make a new Arm_input_section object.
1730 Arm_input_section<big_endian>*
1731 new_arm_input_section(Relobj*, unsigned int);
1732
1733 // Find the Arm_input_section object corresponding to the SHNDX-th input
1734 // section of RELOBJ.
1735 Arm_input_section<big_endian>*
1736 find_arm_input_section(Relobj* relobj, unsigned int shndx) const;
1737
1738 // Make a new Stub_table
1739 Stub_table<big_endian>*
1740 new_stub_table(Arm_input_section<big_endian>*);
1741
1742 // Scan a section for stub generation.
1743 void
1744 scan_section_for_stubs(const Relocate_info<32, big_endian>*, unsigned int,
1745 const unsigned char*, size_t, Output_section*,
1746 bool, const unsigned char*, Arm_address,
1747 section_size_type);
1748
1749 // Relocate a stub.
1750 void
1751 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
1752 Output_section*, unsigned char*, Arm_address,
1753 section_size_type);
1754
1755 // Get the default ARM target.
1756 static Target_arm<big_endian>*
1757 default_target()
1758 {
1759 gold_assert(parameters->target().machine_code() == elfcpp::EM_ARM
1760 && parameters->target().is_big_endian() == big_endian);
1761 return static_cast<Target_arm<big_endian>*>(
1762 parameters->sized_target<32, big_endian>());
1763 }
1764
1765 // Whether relocation type uses LSB to distinguish THUMB addresses.
1766 static bool
1767 reloc_uses_thumb_bit(unsigned int r_type);
1768
1769 // Whether NAME belongs to a mapping symbol.
1770 static bool
1771 is_mapping_symbol_name(const char* name)
1772 {
1773 return (name
1774 && name[0] == '$'
1775 && (name[1] == 'a' || name[1] == 't' || name[1] == 'd')
1776 && (name[2] == '\0' || name[2] == '.'));
1777 }
1778
1779 // Whether we work around the Cortex-A8 erratum.
1780 bool
1781 fix_cortex_a8() const
1782 { return this->fix_cortex_a8_; }
1783
1784 // Whether we fix R_ARM_V4BX relocation.
1785 // 0 - do not fix
1786 // 1 - replace with MOV instruction (armv4 target)
1787 // 2 - make interworking veneer (>= armv4t targets only)
1788 int
1789 fix_v4bx() const
1790 { return this->fix_v4bx_; }
1791
1792 // Scan a span of THUMB code section for Cortex-A8 erratum.
1793 void
1794 scan_span_for_cortex_a8_erratum(Arm_relobj<big_endian>*, unsigned int,
1795 section_size_type, section_size_type,
1796 const unsigned char*, Arm_address);
1797
1798 // Apply Cortex-A8 workaround to a branch.
1799 void
1800 apply_cortex_a8_workaround(const Cortex_a8_stub*, Arm_address,
1801 unsigned char*, Arm_address);
1802
1803 protected:
1804 // Make an ELF object.
1805 Object*
1806 do_make_elf_object(const std::string&, Input_file*, off_t,
1807 const elfcpp::Ehdr<32, big_endian>& ehdr);
1808
1809 Object*
1810 do_make_elf_object(const std::string&, Input_file*, off_t,
1811 const elfcpp::Ehdr<32, !big_endian>&)
1812 { gold_unreachable(); }
1813
1814 Object*
1815 do_make_elf_object(const std::string&, Input_file*, off_t,
1816 const elfcpp::Ehdr<64, false>&)
1817 { gold_unreachable(); }
1818
1819 Object*
1820 do_make_elf_object(const std::string&, Input_file*, off_t,
1821 const elfcpp::Ehdr<64, true>&)
1822 { gold_unreachable(); }
1823
1824 // Make an output section.
1825 Output_section*
1826 do_make_output_section(const char* name, elfcpp::Elf_Word type,
1827 elfcpp::Elf_Xword flags)
1828 { return new Arm_output_section<big_endian>(name, type, flags); }
1829
1830 void
1831 do_adjust_elf_header(unsigned char* view, int len) const;
1832
1833 // We only need to generate stubs, and hence perform relaxation if we are
1834 // not doing relocatable linking.
1835 bool
1836 do_may_relax() const
1837 { return !parameters->options().relocatable(); }
1838
1839 bool
1840 do_relax(int, const Input_objects*, Symbol_table*, Layout*);
1841
1842 // Determine whether an object attribute tag takes an integer, a
1843 // string or both.
1844 int
1845 do_attribute_arg_type(int tag) const;
1846
1847 // Reorder tags during output.
1848 int
1849 do_attributes_order(int num) const;
1850
1851 private:
1852 // The class which scans relocations.
1853 class Scan
1854 {
1855 public:
1856 Scan()
1857 : issued_non_pic_error_(false)
1858 { }
1859
1860 inline void
1861 local(Symbol_table* symtab, Layout* layout, Target_arm* target,
1862 Sized_relobj<32, big_endian>* object,
1863 unsigned int data_shndx,
1864 Output_section* output_section,
1865 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
1866 const elfcpp::Sym<32, big_endian>& lsym);
1867
1868 inline void
1869 global(Symbol_table* symtab, Layout* layout, Target_arm* target,
1870 Sized_relobj<32, big_endian>* object,
1871 unsigned int data_shndx,
1872 Output_section* output_section,
1873 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
1874 Symbol* gsym);
1875
1876 private:
1877 static void
1878 unsupported_reloc_local(Sized_relobj<32, big_endian>*,
1879 unsigned int r_type);
1880
1881 static void
1882 unsupported_reloc_global(Sized_relobj<32, big_endian>*,
1883 unsigned int r_type, Symbol*);
1884
1885 void
1886 check_non_pic(Relobj*, unsigned int r_type);
1887
1888 // Almost identical to Symbol::needs_plt_entry except that it also
1889 // handles STT_ARM_TFUNC.
1890 static bool
1891 symbol_needs_plt_entry(const Symbol* sym)
1892 {
1893 // An undefined symbol from an executable does not need a PLT entry.
1894 if (sym->is_undefined() && !parameters->options().shared())
1895 return false;
1896
1897 return (!parameters->doing_static_link()
1898 && (sym->type() == elfcpp::STT_FUNC
1899 || sym->type() == elfcpp::STT_ARM_TFUNC)
1900 && (sym->is_from_dynobj()
1901 || sym->is_undefined()
1902 || sym->is_preemptible()));
1903 }
1904
1905 // Whether we have issued an error about a non-PIC compilation.
1906 bool issued_non_pic_error_;
1907 };
1908
1909 // The class which implements relocation.
1910 class Relocate
1911 {
1912 public:
1913 Relocate()
1914 { }
1915
1916 ~Relocate()
1917 { }
1918
1919 // Return whether the static relocation needs to be applied.
1920 inline bool
1921 should_apply_static_reloc(const Sized_symbol<32>* gsym,
1922 int ref_flags,
1923 bool is_32bit,
1924 Output_section* output_section);
1925
1926 // Do a relocation. Return false if the caller should not issue
1927 // any warnings about this relocation.
1928 inline bool
1929 relocate(const Relocate_info<32, big_endian>*, Target_arm*,
1930 Output_section*, size_t relnum,
1931 const elfcpp::Rel<32, big_endian>&,
1932 unsigned int r_type, const Sized_symbol<32>*,
1933 const Symbol_value<32>*,
1934 unsigned char*, Arm_address,
1935 section_size_type);
1936
1937 // Return whether we want to pass flag NON_PIC_REF for this
1938 // reloc. This means the relocation type accesses a symbol not via
1939 // GOT or PLT.
1940 static inline bool
1941 reloc_is_non_pic (unsigned int r_type)
1942 {
1943 switch (r_type)
1944 {
1945 // These relocation types reference GOT or PLT entries explicitly.
1946 case elfcpp::R_ARM_GOT_BREL:
1947 case elfcpp::R_ARM_GOT_ABS:
1948 case elfcpp::R_ARM_GOT_PREL:
1949 case elfcpp::R_ARM_GOT_BREL12:
1950 case elfcpp::R_ARM_PLT32_ABS:
1951 case elfcpp::R_ARM_TLS_GD32:
1952 case elfcpp::R_ARM_TLS_LDM32:
1953 case elfcpp::R_ARM_TLS_IE32:
1954 case elfcpp::R_ARM_TLS_IE12GP:
1955
1956 // These relocate types may use PLT entries.
1957 case elfcpp::R_ARM_CALL:
1958 case elfcpp::R_ARM_THM_CALL:
1959 case elfcpp::R_ARM_JUMP24:
1960 case elfcpp::R_ARM_THM_JUMP24:
1961 case elfcpp::R_ARM_THM_JUMP19:
1962 case elfcpp::R_ARM_PLT32:
1963 case elfcpp::R_ARM_THM_XPC22:
1964 return false;
1965
1966 default:
1967 return true;
1968 }
1969 }
1970 };
1971
1972 // A class which returns the size required for a relocation type,
1973 // used while scanning relocs during a relocatable link.
1974 class Relocatable_size_for_reloc
1975 {
1976 public:
1977 unsigned int
1978 get_size_for_reloc(unsigned int, Relobj*);
1979 };
1980
1981 // Get the GOT section, creating it if necessary.
1982 Output_data_got<32, big_endian>*
1983 got_section(Symbol_table*, Layout*);
1984
1985 // Get the GOT PLT section.
1986 Output_data_space*
1987 got_plt_section() const
1988 {
1989 gold_assert(this->got_plt_ != NULL);
1990 return this->got_plt_;
1991 }
1992
1993 // Create a PLT entry for a global symbol.
1994 void
1995 make_plt_entry(Symbol_table*, Layout*, Symbol*);
1996
1997 // Get the PLT section.
1998 const Output_data_plt_arm<big_endian>*
1999 plt_section() const
2000 {
2001 gold_assert(this->plt_ != NULL);
2002 return this->plt_;
2003 }
2004
2005 // Get the dynamic reloc section, creating it if necessary.
2006 Reloc_section*
2007 rel_dyn_section(Layout*);
2008
2009 // Return true if the symbol may need a COPY relocation.
2010 // References from an executable object to non-function symbols
2011 // defined in a dynamic object may need a COPY relocation.
2012 bool
2013 may_need_copy_reloc(Symbol* gsym)
2014 {
2015 return (gsym->type() != elfcpp::STT_ARM_TFUNC
2016 && gsym->may_need_copy_reloc());
2017 }
2018
2019 // Add a potential copy relocation.
2020 void
2021 copy_reloc(Symbol_table* symtab, Layout* layout,
2022 Sized_relobj<32, big_endian>* object,
2023 unsigned int shndx, Output_section* output_section,
2024 Symbol* sym, const elfcpp::Rel<32, big_endian>& reloc)
2025 {
2026 this->copy_relocs_.copy_reloc(symtab, layout,
2027 symtab->get_sized_symbol<32>(sym),
2028 object, shndx, output_section, reloc,
2029 this->rel_dyn_section(layout));
2030 }
2031
2032 // Whether two EABI versions are compatible.
2033 static bool
2034 are_eabi_versions_compatible(elfcpp::Elf_Word v1, elfcpp::Elf_Word v2);
2035
2036 // Merge processor-specific flags from input object and those in the ELF
2037 // header of the output.
2038 void
2039 merge_processor_specific_flags(const std::string&, elfcpp::Elf_Word);
2040
2041 // Get the secondary compatible architecture.
2042 static int
2043 get_secondary_compatible_arch(const Attributes_section_data*);
2044
2045 // Set the secondary compatible architecture.
2046 static void
2047 set_secondary_compatible_arch(Attributes_section_data*, int);
2048
2049 static int
2050 tag_cpu_arch_combine(const char*, int, int*, int, int);
2051
2052 // Helper to print AEABI enum tag value.
2053 static std::string
2054 aeabi_enum_name(unsigned int);
2055
2056 // Return string value for TAG_CPU_name.
2057 static std::string
2058 tag_cpu_name_value(unsigned int);
2059
2060 // Merge object attributes from input object and those in the output.
2061 void
2062 merge_object_attributes(const char*, const Attributes_section_data*);
2063
2064 // Helper to get an AEABI object attribute
2065 Object_attribute*
2066 get_aeabi_object_attribute(int tag) const
2067 {
2068 Attributes_section_data* pasd = this->attributes_section_data_;
2069 gold_assert(pasd != NULL);
2070 Object_attribute* attr =
2071 pasd->get_attribute(Object_attribute::OBJ_ATTR_PROC, tag);
2072 gold_assert(attr != NULL);
2073 return attr;
2074 }
2075
2076 //
2077 // Methods to support stub-generations.
2078 //
2079
2080 // Group input sections for stub generation.
2081 void
2082 group_sections(Layout*, section_size_type, bool);
2083
2084 // Scan a relocation for stub generation.
2085 void
2086 scan_reloc_for_stub(const Relocate_info<32, big_endian>*, unsigned int,
2087 const Sized_symbol<32>*, unsigned int,
2088 const Symbol_value<32>*,
2089 elfcpp::Elf_types<32>::Elf_Swxword, Arm_address);
2090
2091 // Scan a relocation section for stub.
2092 template<int sh_type>
2093 void
2094 scan_reloc_section_for_stubs(
2095 const Relocate_info<32, big_endian>* relinfo,
2096 const unsigned char* prelocs,
2097 size_t reloc_count,
2098 Output_section* output_section,
2099 bool needs_special_offset_handling,
2100 const unsigned char* view,
2101 elfcpp::Elf_types<32>::Elf_Addr view_address,
2102 section_size_type);
2103
2104 // Information about this specific target which we pass to the
2105 // general Target structure.
2106 static const Target::Target_info arm_info;
2107
2108 // The types of GOT entries needed for this platform.
2109 enum Got_type
2110 {
2111 GOT_TYPE_STANDARD = 0 // GOT entry for a regular symbol
2112 };
2113
2114 typedef typename std::vector<Stub_table<big_endian>*> Stub_table_list;
2115
2116 // Map input section to Arm_input_section.
2117 typedef Unordered_map<Section_id,
2118 Arm_input_section<big_endian>*,
2119 Section_id_hash>
2120 Arm_input_section_map;
2121
2122 // Map output addresses to relocs for Cortex-A8 erratum.
2123 typedef Unordered_map<Arm_address, const Cortex_a8_reloc*>
2124 Cortex_a8_relocs_info;
2125
2126 // The GOT section.
2127 Output_data_got<32, big_endian>* got_;
2128 // The PLT section.
2129 Output_data_plt_arm<big_endian>* plt_;
2130 // The GOT PLT section.
2131 Output_data_space* got_plt_;
2132 // The dynamic reloc section.
2133 Reloc_section* rel_dyn_;
2134 // Relocs saved to avoid a COPY reloc.
2135 Copy_relocs<elfcpp::SHT_REL, 32, big_endian> copy_relocs_;
2136 // Space for variables copied with a COPY reloc.
2137 Output_data_space* dynbss_;
2138 // Vector of Stub_tables created.
2139 Stub_table_list stub_tables_;
2140 // Stub factory.
2141 const Stub_factory &stub_factory_;
2142 // Whether we can use BLX.
2143 bool may_use_blx_;
2144 // Whether we force PIC branch veneers.
2145 bool should_force_pic_veneer_;
2146 // Map for locating Arm_input_sections.
2147 Arm_input_section_map arm_input_section_map_;
2148 // Attributes section data in output.
2149 Attributes_section_data* attributes_section_data_;
2150 // Whether we want to fix code for Cortex-A8 erratum.
2151 bool fix_cortex_a8_;
2152 // Map addresses to relocs for Cortex-A8 erratum.
2153 Cortex_a8_relocs_info cortex_a8_relocs_info_;
2154 // Whether we need to fix code for V4BX relocations.
2155 int fix_v4bx_;
2156 };
2157
2158 template<bool big_endian>
2159 const Target::Target_info Target_arm<big_endian>::arm_info =
2160 {
2161 32, // size
2162 big_endian, // is_big_endian
2163 elfcpp::EM_ARM, // machine_code
2164 false, // has_make_symbol
2165 false, // has_resolve
2166 false, // has_code_fill
2167 true, // is_default_stack_executable
2168 '\0', // wrap_char
2169 "/usr/lib/libc.so.1", // dynamic_linker
2170 0x8000, // default_text_segment_address
2171 0x1000, // abi_pagesize (overridable by -z max-page-size)
2172 0x1000, // common_pagesize (overridable by -z common-page-size)
2173 elfcpp::SHN_UNDEF, // small_common_shndx
2174 elfcpp::SHN_UNDEF, // large_common_shndx
2175 0, // small_common_section_flags
2176 0, // large_common_section_flags
2177 ".ARM.attributes", // attributes_section
2178 "aeabi" // attributes_vendor
2179 };
2180
2181 // Arm relocate functions class
2182 //
2183
2184 template<bool big_endian>
2185 class Arm_relocate_functions : public Relocate_functions<32, big_endian>
2186 {
2187 public:
2188 typedef enum
2189 {
2190 STATUS_OKAY, // No error during relocation.
2191 STATUS_OVERFLOW, // Relocation oveflow.
2192 STATUS_BAD_RELOC // Relocation cannot be applied.
2193 } Status;
2194
2195 private:
2196 typedef Relocate_functions<32, big_endian> Base;
2197 typedef Arm_relocate_functions<big_endian> This;
2198
2199 // Encoding of imm16 argument for movt and movw ARM instructions
2200 // from ARM ARM:
2201 //
2202 // imm16 := imm4 | imm12
2203 //
2204 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2205 // +-------+---------------+-------+-------+-----------------------+
2206 // | | |imm4 | |imm12 |
2207 // +-------+---------------+-------+-------+-----------------------+
2208
2209 // Extract the relocation addend from VAL based on the ARM
2210 // instruction encoding described above.
2211 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2212 extract_arm_movw_movt_addend(
2213 typename elfcpp::Swap<32, big_endian>::Valtype val)
2214 {
2215 // According to the Elf ABI for ARM Architecture the immediate
2216 // field is sign-extended to form the addend.
2217 return utils::sign_extend<16>(((val >> 4) & 0xf000) | (val & 0xfff));
2218 }
2219
2220 // Insert X into VAL based on the ARM instruction encoding described
2221 // above.
2222 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2223 insert_val_arm_movw_movt(
2224 typename elfcpp::Swap<32, big_endian>::Valtype val,
2225 typename elfcpp::Swap<32, big_endian>::Valtype x)
2226 {
2227 val &= 0xfff0f000;
2228 val |= x & 0x0fff;
2229 val |= (x & 0xf000) << 4;
2230 return val;
2231 }
2232
2233 // Encoding of imm16 argument for movt and movw Thumb2 instructions
2234 // from ARM ARM:
2235 //
2236 // imm16 := imm4 | i | imm3 | imm8
2237 //
2238 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2239 // +---------+-+-----------+-------++-+-----+-------+---------------+
2240 // | |i| |imm4 || |imm3 | |imm8 |
2241 // +---------+-+-----------+-------++-+-----+-------+---------------+
2242
2243 // Extract the relocation addend from VAL based on the Thumb2
2244 // instruction encoding described above.
2245 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2246 extract_thumb_movw_movt_addend(
2247 typename elfcpp::Swap<32, big_endian>::Valtype val)
2248 {
2249 // According to the Elf ABI for ARM Architecture the immediate
2250 // field is sign-extended to form the addend.
2251 return utils::sign_extend<16>(((val >> 4) & 0xf000)
2252 | ((val >> 15) & 0x0800)
2253 | ((val >> 4) & 0x0700)
2254 | (val & 0x00ff));
2255 }
2256
2257 // Insert X into VAL based on the Thumb2 instruction encoding
2258 // described above.
2259 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2260 insert_val_thumb_movw_movt(
2261 typename elfcpp::Swap<32, big_endian>::Valtype val,
2262 typename elfcpp::Swap<32, big_endian>::Valtype x)
2263 {
2264 val &= 0xfbf08f00;
2265 val |= (x & 0xf000) << 4;
2266 val |= (x & 0x0800) << 15;
2267 val |= (x & 0x0700) << 4;
2268 val |= (x & 0x00ff);
2269 return val;
2270 }
2271
2272 // Handle ARM long branches.
2273 static typename This::Status
2274 arm_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2275 unsigned char *, const Sized_symbol<32>*,
2276 const Arm_relobj<big_endian>*, unsigned int,
2277 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2278
2279 // Handle THUMB long branches.
2280 static typename This::Status
2281 thumb_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2282 unsigned char *, const Sized_symbol<32>*,
2283 const Arm_relobj<big_endian>*, unsigned int,
2284 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2285
2286 public:
2287
2288 // Return the branch offset of a 32-bit THUMB branch.
2289 static inline int32_t
2290 thumb32_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2291 {
2292 // We use the Thumb-2 encoding (backwards compatible with Thumb-1)
2293 // involving the J1 and J2 bits.
2294 uint32_t s = (upper_insn & (1U << 10)) >> 10;
2295 uint32_t upper = upper_insn & 0x3ffU;
2296 uint32_t lower = lower_insn & 0x7ffU;
2297 uint32_t j1 = (lower_insn & (1U << 13)) >> 13;
2298 uint32_t j2 = (lower_insn & (1U << 11)) >> 11;
2299 uint32_t i1 = j1 ^ s ? 0 : 1;
2300 uint32_t i2 = j2 ^ s ? 0 : 1;
2301
2302 return utils::sign_extend<25>((s << 24) | (i1 << 23) | (i2 << 22)
2303 | (upper << 12) | (lower << 1));
2304 }
2305
2306 // Insert OFFSET to a 32-bit THUMB branch and return the upper instruction.
2307 // UPPER_INSN is the original upper instruction of the branch. Caller is
2308 // responsible for overflow checking and BLX offset adjustment.
2309 static inline uint16_t
2310 thumb32_branch_upper(uint16_t upper_insn, int32_t offset)
2311 {
2312 uint32_t s = offset < 0 ? 1 : 0;
2313 uint32_t bits = static_cast<uint32_t>(offset);
2314 return (upper_insn & ~0x7ffU) | ((bits >> 12) & 0x3ffU) | (s << 10);
2315 }
2316
2317 // Insert OFFSET to a 32-bit THUMB branch and return the lower instruction.
2318 // LOWER_INSN is the original lower instruction of the branch. Caller is
2319 // responsible for overflow checking and BLX offset adjustment.
2320 static inline uint16_t
2321 thumb32_branch_lower(uint16_t lower_insn, int32_t offset)
2322 {
2323 uint32_t s = offset < 0 ? 1 : 0;
2324 uint32_t bits = static_cast<uint32_t>(offset);
2325 return ((lower_insn & ~0x2fffU)
2326 | ((((bits >> 23) & 1) ^ !s) << 13)
2327 | ((((bits >> 22) & 1) ^ !s) << 11)
2328 | ((bits >> 1) & 0x7ffU));
2329 }
2330
2331 // Return the branch offset of a 32-bit THUMB conditional branch.
2332 static inline int32_t
2333 thumb32_cond_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2334 {
2335 uint32_t s = (upper_insn & 0x0400U) >> 10;
2336 uint32_t j1 = (lower_insn & 0x2000U) >> 13;
2337 uint32_t j2 = (lower_insn & 0x0800U) >> 11;
2338 uint32_t lower = (lower_insn & 0x07ffU);
2339 uint32_t upper = (s << 8) | (j2 << 7) | (j1 << 6) | (upper_insn & 0x003fU);
2340
2341 return utils::sign_extend<21>((upper << 12) | (lower << 1));
2342 }
2343
2344 // Insert OFFSET to a 32-bit THUMB conditional branch and return the upper
2345 // instruction. UPPER_INSN is the original upper instruction of the branch.
2346 // Caller is responsible for overflow checking.
2347 static inline uint16_t
2348 thumb32_cond_branch_upper(uint16_t upper_insn, int32_t offset)
2349 {
2350 uint32_t s = offset < 0 ? 1 : 0;
2351 uint32_t bits = static_cast<uint32_t>(offset);
2352 return (upper_insn & 0xfbc0U) | (s << 10) | ((bits & 0x0003f000U) >> 12);
2353 }
2354
2355 // Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
2356 // instruction. LOWER_INSN is the original lower instruction of the branch.
2357 // Caller is reponsible for overflow checking.
2358 static inline uint16_t
2359 thumb32_cond_branch_lower(uint16_t lower_insn, int32_t offset)
2360 {
2361 uint32_t bits = static_cast<uint32_t>(offset);
2362 uint32_t j2 = (bits & 0x00080000U) >> 19;
2363 uint32_t j1 = (bits & 0x00040000U) >> 18;
2364 uint32_t lo = (bits & 0x00000ffeU) >> 1;
2365
2366 return (lower_insn & 0xd000U) | (j1 << 13) | (j2 << 11) | lo;
2367 }
2368
2369 // R_ARM_ABS8: S + A
2370 static inline typename This::Status
2371 abs8(unsigned char *view,
2372 const Sized_relobj<32, big_endian>* object,
2373 const Symbol_value<32>* psymval)
2374 {
2375 typedef typename elfcpp::Swap<8, big_endian>::Valtype Valtype;
2376 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2377 Valtype* wv = reinterpret_cast<Valtype*>(view);
2378 Valtype val = elfcpp::Swap<8, big_endian>::readval(wv);
2379 Reltype addend = utils::sign_extend<8>(val);
2380 Reltype x = psymval->value(object, addend);
2381 val = utils::bit_select(val, x, 0xffU);
2382 elfcpp::Swap<8, big_endian>::writeval(wv, val);
2383 return (utils::has_signed_unsigned_overflow<8>(x)
2384 ? This::STATUS_OVERFLOW
2385 : This::STATUS_OKAY);
2386 }
2387
2388 // R_ARM_THM_ABS5: S + A
2389 static inline typename This::Status
2390 thm_abs5(unsigned char *view,
2391 const Sized_relobj<32, big_endian>* object,
2392 const Symbol_value<32>* psymval)
2393 {
2394 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2395 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2396 Valtype* wv = reinterpret_cast<Valtype*>(view);
2397 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2398 Reltype addend = (val & 0x7e0U) >> 6;
2399 Reltype x = psymval->value(object, addend);
2400 val = utils::bit_select(val, x << 6, 0x7e0U);
2401 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2402 return (utils::has_overflow<5>(x)
2403 ? This::STATUS_OVERFLOW
2404 : This::STATUS_OKAY);
2405 }
2406
2407 // R_ARM_ABS12: S + A
2408 static inline typename This::Status
2409 abs12(unsigned char *view,
2410 const Sized_relobj<32, big_endian>* object,
2411 const Symbol_value<32>* psymval)
2412 {
2413 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2414 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2415 Valtype* wv = reinterpret_cast<Valtype*>(view);
2416 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2417 Reltype addend = val & 0x0fffU;
2418 Reltype x = psymval->value(object, addend);
2419 val = utils::bit_select(val, x, 0x0fffU);
2420 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2421 return (utils::has_overflow<12>(x)
2422 ? This::STATUS_OVERFLOW
2423 : This::STATUS_OKAY);
2424 }
2425
2426 // R_ARM_ABS16: S + A
2427 static inline typename This::Status
2428 abs16(unsigned char *view,
2429 const Sized_relobj<32, big_endian>* object,
2430 const Symbol_value<32>* psymval)
2431 {
2432 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2433 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2434 Valtype* wv = reinterpret_cast<Valtype*>(view);
2435 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2436 Reltype addend = utils::sign_extend<16>(val);
2437 Reltype x = psymval->value(object, addend);
2438 val = utils::bit_select(val, x, 0xffffU);
2439 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2440 return (utils::has_signed_unsigned_overflow<16>(x)
2441 ? This::STATUS_OVERFLOW
2442 : This::STATUS_OKAY);
2443 }
2444
2445 // R_ARM_ABS32: (S + A) | T
2446 static inline typename This::Status
2447 abs32(unsigned char *view,
2448 const Sized_relobj<32, big_endian>* object,
2449 const Symbol_value<32>* psymval,
2450 Arm_address thumb_bit)
2451 {
2452 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2453 Valtype* wv = reinterpret_cast<Valtype*>(view);
2454 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
2455 Valtype x = psymval->value(object, addend) | thumb_bit;
2456 elfcpp::Swap<32, big_endian>::writeval(wv, x);
2457 return This::STATUS_OKAY;
2458 }
2459
2460 // R_ARM_REL32: (S + A) | T - P
2461 static inline typename This::Status
2462 rel32(unsigned char *view,
2463 const Sized_relobj<32, big_endian>* object,
2464 const Symbol_value<32>* psymval,
2465 Arm_address address,
2466 Arm_address thumb_bit)
2467 {
2468 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2469 Valtype* wv = reinterpret_cast<Valtype*>(view);
2470 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
2471 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2472 elfcpp::Swap<32, big_endian>::writeval(wv, x);
2473 return This::STATUS_OKAY;
2474 }
2475
2476 // R_ARM_THM_CALL: (S + A) | T - P
2477 static inline typename This::Status
2478 thm_call(const Relocate_info<32, big_endian>* relinfo, unsigned char *view,
2479 const Sized_symbol<32>* gsym, const Arm_relobj<big_endian>* object,
2480 unsigned int r_sym, const Symbol_value<32>* psymval,
2481 Arm_address address, Arm_address thumb_bit,
2482 bool is_weakly_undefined_without_plt)
2483 {
2484 return thumb_branch_common(elfcpp::R_ARM_THM_CALL, relinfo, view, gsym,
2485 object, r_sym, psymval, address, thumb_bit,
2486 is_weakly_undefined_without_plt);
2487 }
2488
2489 // R_ARM_THM_JUMP24: (S + A) | T - P
2490 static inline typename This::Status
2491 thm_jump24(const Relocate_info<32, big_endian>* relinfo, unsigned char *view,
2492 const Sized_symbol<32>* gsym, const Arm_relobj<big_endian>* object,
2493 unsigned int r_sym, const Symbol_value<32>* psymval,
2494 Arm_address address, Arm_address thumb_bit,
2495 bool is_weakly_undefined_without_plt)
2496 {
2497 return thumb_branch_common(elfcpp::R_ARM_THM_JUMP24, relinfo, view, gsym,
2498 object, r_sym, psymval, address, thumb_bit,
2499 is_weakly_undefined_without_plt);
2500 }
2501
2502 // R_ARM_THM_JUMP24: (S + A) | T - P
2503 static typename This::Status
2504 thm_jump19(unsigned char *view, const Arm_relobj<big_endian>* object,
2505 const Symbol_value<32>* psymval, Arm_address address,
2506 Arm_address thumb_bit);
2507
2508 // R_ARM_THM_XPC22: (S + A) | T - P
2509 static inline typename This::Status
2510 thm_xpc22(const Relocate_info<32, big_endian>* relinfo, unsigned char *view,
2511 const Sized_symbol<32>* gsym, const Arm_relobj<big_endian>* object,
2512 unsigned int r_sym, const Symbol_value<32>* psymval,
2513 Arm_address address, Arm_address thumb_bit,
2514 bool is_weakly_undefined_without_plt)
2515 {
2516 return thumb_branch_common(elfcpp::R_ARM_THM_XPC22, relinfo, view, gsym,
2517 object, r_sym, psymval, address, thumb_bit,
2518 is_weakly_undefined_without_plt);
2519 }
2520
2521 // R_ARM_THM_JUMP6: S + A – P
2522 static inline typename This::Status
2523 thm_jump6(unsigned char *view,
2524 const Sized_relobj<32, big_endian>* object,
2525 const Symbol_value<32>* psymval,
2526 Arm_address address)
2527 {
2528 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2529 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
2530 Valtype* wv = reinterpret_cast<Valtype*>(view);
2531 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2532 // bit[9]:bit[7:3]:’0’ (mask: 0x02f8)
2533 Reltype addend = (((val & 0x0200) >> 3) | ((val & 0x00f8) >> 2));
2534 Reltype x = (psymval->value(object, addend) - address);
2535 val = (val & 0xfd07) | ((x & 0x0040) << 3) | ((val & 0x003e) << 2);
2536 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2537 // CZB does only forward jumps.
2538 return ((x > 0x007e)
2539 ? This::STATUS_OVERFLOW
2540 : This::STATUS_OKAY);
2541 }
2542
2543 // R_ARM_THM_JUMP8: S + A – P
2544 static inline typename This::Status
2545 thm_jump8(unsigned char *view,
2546 const Sized_relobj<32, big_endian>* object,
2547 const Symbol_value<32>* psymval,
2548 Arm_address address)
2549 {
2550 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2551 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
2552 Valtype* wv = reinterpret_cast<Valtype*>(view);
2553 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2554 Reltype addend = utils::sign_extend<8>((val & 0x00ff) << 1);
2555 Reltype x = (psymval->value(object, addend) - address);
2556 elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xff00) | ((x & 0x01fe) >> 1));
2557 return (utils::has_overflow<8>(x)
2558 ? This::STATUS_OVERFLOW
2559 : This::STATUS_OKAY);
2560 }
2561
2562 // R_ARM_THM_JUMP11: S + A – P
2563 static inline typename This::Status
2564 thm_jump11(unsigned char *view,
2565 const Sized_relobj<32, big_endian>* object,
2566 const Symbol_value<32>* psymval,
2567 Arm_address address)
2568 {
2569 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2570 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
2571 Valtype* wv = reinterpret_cast<Valtype*>(view);
2572 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2573 Reltype addend = utils::sign_extend<11>((val & 0x07ff) << 1);
2574 Reltype x = (psymval->value(object, addend) - address);
2575 elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xf800) | ((x & 0x0ffe) >> 1));
2576 return (utils::has_overflow<11>(x)
2577 ? This::STATUS_OVERFLOW
2578 : This::STATUS_OKAY);
2579 }
2580
2581 // R_ARM_BASE_PREL: B(S) + A - P
2582 static inline typename This::Status
2583 base_prel(unsigned char* view,
2584 Arm_address origin,
2585 Arm_address address)
2586 {
2587 Base::rel32(view, origin - address);
2588 return STATUS_OKAY;
2589 }
2590
2591 // R_ARM_BASE_ABS: B(S) + A
2592 static inline typename This::Status
2593 base_abs(unsigned char* view,
2594 Arm_address origin)
2595 {
2596 Base::rel32(view, origin);
2597 return STATUS_OKAY;
2598 }
2599
2600 // R_ARM_GOT_BREL: GOT(S) + A - GOT_ORG
2601 static inline typename This::Status
2602 got_brel(unsigned char* view,
2603 typename elfcpp::Swap<32, big_endian>::Valtype got_offset)
2604 {
2605 Base::rel32(view, got_offset);
2606 return This::STATUS_OKAY;
2607 }
2608
2609 // R_ARM_GOT_PREL: GOT(S) + A - P
2610 static inline typename This::Status
2611 got_prel(unsigned char *view,
2612 Arm_address got_entry,
2613 Arm_address address)
2614 {
2615 Base::rel32(view, got_entry - address);
2616 return This::STATUS_OKAY;
2617 }
2618
2619 // R_ARM_PLT32: (S + A) | T - P
2620 static inline typename This::Status
2621 plt32(const Relocate_info<32, big_endian>* relinfo,
2622 unsigned char *view,
2623 const Sized_symbol<32>* gsym,
2624 const Arm_relobj<big_endian>* object,
2625 unsigned int r_sym,
2626 const Symbol_value<32>* psymval,
2627 Arm_address address,
2628 Arm_address thumb_bit,
2629 bool is_weakly_undefined_without_plt)
2630 {
2631 return arm_branch_common(elfcpp::R_ARM_PLT32, relinfo, view, gsym,
2632 object, r_sym, psymval, address, thumb_bit,
2633 is_weakly_undefined_without_plt);
2634 }
2635
2636 // R_ARM_XPC25: (S + A) | T - P
2637 static inline typename This::Status
2638 xpc25(const Relocate_info<32, big_endian>* relinfo,
2639 unsigned char *view,
2640 const Sized_symbol<32>* gsym,
2641 const Arm_relobj<big_endian>* object,
2642 unsigned int r_sym,
2643 const Symbol_value<32>* psymval,
2644 Arm_address address,
2645 Arm_address thumb_bit,
2646 bool is_weakly_undefined_without_plt)
2647 {
2648 return arm_branch_common(elfcpp::R_ARM_XPC25, relinfo, view, gsym,
2649 object, r_sym, psymval, address, thumb_bit,
2650 is_weakly_undefined_without_plt);
2651 }
2652
2653 // R_ARM_CALL: (S + A) | T - P
2654 static inline typename This::Status
2655 call(const Relocate_info<32, big_endian>* relinfo,
2656 unsigned char *view,
2657 const Sized_symbol<32>* gsym,
2658 const Arm_relobj<big_endian>* object,
2659 unsigned int r_sym,
2660 const Symbol_value<32>* psymval,
2661 Arm_address address,
2662 Arm_address thumb_bit,
2663 bool is_weakly_undefined_without_plt)
2664 {
2665 return arm_branch_common(elfcpp::R_ARM_CALL, relinfo, view, gsym,
2666 object, r_sym, psymval, address, thumb_bit,
2667 is_weakly_undefined_without_plt);
2668 }
2669
2670 // R_ARM_JUMP24: (S + A) | T - P
2671 static inline typename This::Status
2672 jump24(const Relocate_info<32, big_endian>* relinfo,
2673 unsigned char *view,
2674 const Sized_symbol<32>* gsym,
2675 const Arm_relobj<big_endian>* object,
2676 unsigned int r_sym,
2677 const Symbol_value<32>* psymval,
2678 Arm_address address,
2679 Arm_address thumb_bit,
2680 bool is_weakly_undefined_without_plt)
2681 {
2682 return arm_branch_common(elfcpp::R_ARM_JUMP24, relinfo, view, gsym,
2683 object, r_sym, psymval, address, thumb_bit,
2684 is_weakly_undefined_without_plt);
2685 }
2686
2687 // R_ARM_PREL: (S + A) | T - P
2688 static inline typename This::Status
2689 prel31(unsigned char *view,
2690 const Sized_relobj<32, big_endian>* object,
2691 const Symbol_value<32>* psymval,
2692 Arm_address address,
2693 Arm_address thumb_bit)
2694 {
2695 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2696 Valtype* wv = reinterpret_cast<Valtype*>(view);
2697 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2698 Valtype addend = utils::sign_extend<31>(val);
2699 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2700 val = utils::bit_select(val, x, 0x7fffffffU);
2701 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2702 return (utils::has_overflow<31>(x) ?
2703 This::STATUS_OVERFLOW : This::STATUS_OKAY);
2704 }
2705
2706 // R_ARM_MOVW_ABS_NC: (S + A) | T
2707 static inline typename This::Status
2708 movw_abs_nc(unsigned char *view,
2709 const Sized_relobj<32, big_endian>* object,
2710 const Symbol_value<32>* psymval,
2711 Arm_address thumb_bit)
2712 {
2713 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2714 Valtype* wv = reinterpret_cast<Valtype*>(view);
2715 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2716 Valtype addend = This::extract_arm_movw_movt_addend(val);
2717 Valtype x = psymval->value(object, addend) | thumb_bit;
2718 val = This::insert_val_arm_movw_movt(val, x);
2719 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2720 return This::STATUS_OKAY;
2721 }
2722
2723 // R_ARM_MOVT_ABS: S + A
2724 static inline typename This::Status
2725 movt_abs(unsigned char *view,
2726 const Sized_relobj<32, big_endian>* object,
2727 const Symbol_value<32>* psymval)
2728 {
2729 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2730 Valtype* wv = reinterpret_cast<Valtype*>(view);
2731 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2732 Valtype addend = This::extract_arm_movw_movt_addend(val);
2733 Valtype x = psymval->value(object, addend) >> 16;
2734 val = This::insert_val_arm_movw_movt(val, x);
2735 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2736 return This::STATUS_OKAY;
2737 }
2738
2739 // R_ARM_THM_MOVW_ABS_NC: S + A | T
2740 static inline typename This::Status
2741 thm_movw_abs_nc(unsigned char *view,
2742 const Sized_relobj<32, big_endian>* object,
2743 const Symbol_value<32>* psymval,
2744 Arm_address thumb_bit)
2745 {
2746 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2747 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2748 Valtype* wv = reinterpret_cast<Valtype*>(view);
2749 Reltype val = ((elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2750 | elfcpp::Swap<16, big_endian>::readval(wv + 1));
2751 Reltype addend = extract_thumb_movw_movt_addend(val);
2752 Reltype x = psymval->value(object, addend) | thumb_bit;
2753 val = This::insert_val_thumb_movw_movt(val, x);
2754 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2755 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2756 return This::STATUS_OKAY;
2757 }
2758
2759 // R_ARM_THM_MOVT_ABS: S + A
2760 static inline typename This::Status
2761 thm_movt_abs(unsigned char *view,
2762 const Sized_relobj<32, big_endian>* object,
2763 const Symbol_value<32>* psymval)
2764 {
2765 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2766 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2767 Valtype* wv = reinterpret_cast<Valtype*>(view);
2768 Reltype val = ((elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2769 | elfcpp::Swap<16, big_endian>::readval(wv + 1));
2770 Reltype addend = This::extract_thumb_movw_movt_addend(val);
2771 Reltype x = psymval->value(object, addend) >> 16;
2772 val = This::insert_val_thumb_movw_movt(val, x);
2773 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2774 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2775 return This::STATUS_OKAY;
2776 }
2777
2778 // R_ARM_MOVW_PREL_NC: (S + A) | T - P
2779 static inline typename This::Status
2780 movw_prel_nc(unsigned char *view,
2781 const Sized_relobj<32, big_endian>* object,
2782 const Symbol_value<32>* psymval,
2783 Arm_address address,
2784 Arm_address thumb_bit)
2785 {
2786 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2787 Valtype* wv = reinterpret_cast<Valtype*>(view);
2788 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2789 Valtype addend = This::extract_arm_movw_movt_addend(val);
2790 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2791 val = This::insert_val_arm_movw_movt(val, x);
2792 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2793 return This::STATUS_OKAY;
2794 }
2795
2796 // R_ARM_MOVT_PREL: S + A - P
2797 static inline typename This::Status
2798 movt_prel(unsigned char *view,
2799 const Sized_relobj<32, big_endian>* object,
2800 const Symbol_value<32>* psymval,
2801 Arm_address address)
2802 {
2803 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2804 Valtype* wv = reinterpret_cast<Valtype*>(view);
2805 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2806 Valtype addend = This::extract_arm_movw_movt_addend(val);
2807 Valtype x = (psymval->value(object, addend) - address) >> 16;
2808 val = This::insert_val_arm_movw_movt(val, x);
2809 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2810 return This::STATUS_OKAY;
2811 }
2812
2813 // R_ARM_THM_MOVW_PREL_NC: (S + A) | T - P
2814 static inline typename This::Status
2815 thm_movw_prel_nc(unsigned char *view,
2816 const Sized_relobj<32, big_endian>* object,
2817 const Symbol_value<32>* psymval,
2818 Arm_address address,
2819 Arm_address thumb_bit)
2820 {
2821 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2822 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2823 Valtype* wv = reinterpret_cast<Valtype*>(view);
2824 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2825 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
2826 Reltype addend = This::extract_thumb_movw_movt_addend(val);
2827 Reltype x = (psymval->value(object, addend) | thumb_bit) - address;
2828 val = This::insert_val_thumb_movw_movt(val, x);
2829 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2830 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2831 return This::STATUS_OKAY;
2832 }
2833
2834 // R_ARM_THM_MOVT_PREL: S + A - P
2835 static inline typename This::Status
2836 thm_movt_prel(unsigned char *view,
2837 const Sized_relobj<32, big_endian>* object,
2838 const Symbol_value<32>* psymval,
2839 Arm_address address)
2840 {
2841 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2842 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2843 Valtype* wv = reinterpret_cast<Valtype*>(view);
2844 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
2845 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
2846 Reltype addend = This::extract_thumb_movw_movt_addend(val);
2847 Reltype x = (psymval->value(object, addend) - address) >> 16;
2848 val = This::insert_val_thumb_movw_movt(val, x);
2849 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
2850 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
2851 return This::STATUS_OKAY;
2852 }
2853
2854 // R_ARM_V4BX
2855 static inline typename This::Status
2856 v4bx(const Relocate_info<32, big_endian>* relinfo,
2857 unsigned char *view,
2858 const Arm_relobj<big_endian>* object,
2859 const Arm_address address,
2860 const bool is_interworking)
2861 {
2862
2863 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2864 Valtype* wv = reinterpret_cast<Valtype*>(view);
2865 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2866
2867 // Ensure that we have a BX instruction.
2868 gold_assert((val & 0x0ffffff0) == 0x012fff10);
2869 const uint32_t reg = (val & 0xf);
2870 if (is_interworking && reg != 0xf)
2871 {
2872 Stub_table<big_endian>* stub_table =
2873 object->stub_table(relinfo->data_shndx);
2874 gold_assert(stub_table != NULL);
2875
2876 Arm_v4bx_stub* stub = stub_table->find_arm_v4bx_stub(reg);
2877 gold_assert(stub != NULL);
2878
2879 int32_t veneer_address =
2880 stub_table->address() + stub->offset() - 8 - address;
2881 gold_assert((veneer_address <= ARM_MAX_FWD_BRANCH_OFFSET)
2882 && (veneer_address >= ARM_MAX_BWD_BRANCH_OFFSET));
2883 // Replace with a branch to veneer (B <addr>)
2884 val = (val & 0xf0000000) | 0x0a000000
2885 | ((veneer_address >> 2) & 0x00ffffff);
2886 }
2887 else
2888 {
2889 // Preserve Rm (lowest four bits) and the condition code
2890 // (highest four bits). Other bits encode MOV PC,Rm.
2891 val = (val & 0xf000000f) | 0x01a0f000;
2892 }
2893 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2894 return This::STATUS_OKAY;
2895 }
2896 };
2897
2898 // Relocate ARM long branches. This handles relocation types
2899 // R_ARM_CALL, R_ARM_JUMP24, R_ARM_PLT32 and R_ARM_XPC25.
2900 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
2901 // undefined and we do not use PLT in this relocation. In such a case,
2902 // the branch is converted into an NOP.
2903
2904 template<bool big_endian>
2905 typename Arm_relocate_functions<big_endian>::Status
2906 Arm_relocate_functions<big_endian>::arm_branch_common(
2907 unsigned int r_type,
2908 const Relocate_info<32, big_endian>* relinfo,
2909 unsigned char *view,
2910 const Sized_symbol<32>* gsym,
2911 const Arm_relobj<big_endian>* object,
2912 unsigned int r_sym,
2913 const Symbol_value<32>* psymval,
2914 Arm_address address,
2915 Arm_address thumb_bit,
2916 bool is_weakly_undefined_without_plt)
2917 {
2918 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2919 Valtype* wv = reinterpret_cast<Valtype*>(view);
2920 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2921
2922 bool insn_is_b = (((val >> 28) & 0xf) <= 0xe)
2923 && ((val & 0x0f000000UL) == 0x0a000000UL);
2924 bool insn_is_uncond_bl = (val & 0xff000000UL) == 0xeb000000UL;
2925 bool insn_is_cond_bl = (((val >> 28) & 0xf) < 0xe)
2926 && ((val & 0x0f000000UL) == 0x0b000000UL);
2927 bool insn_is_blx = (val & 0xfe000000UL) == 0xfa000000UL;
2928 bool insn_is_any_branch = (val & 0x0e000000UL) == 0x0a000000UL;
2929
2930 // Check that the instruction is valid.
2931 if (r_type == elfcpp::R_ARM_CALL)
2932 {
2933 if (!insn_is_uncond_bl && !insn_is_blx)
2934 return This::STATUS_BAD_RELOC;
2935 }
2936 else if (r_type == elfcpp::R_ARM_JUMP24)
2937 {
2938 if (!insn_is_b && !insn_is_cond_bl)
2939 return This::STATUS_BAD_RELOC;
2940 }
2941 else if (r_type == elfcpp::R_ARM_PLT32)
2942 {
2943 if (!insn_is_any_branch)
2944 return This::STATUS_BAD_RELOC;
2945 }
2946 else if (r_type == elfcpp::R_ARM_XPC25)
2947 {
2948 // FIXME: AAELF document IH0044C does not say much about it other
2949 // than it being obsolete.
2950 if (!insn_is_any_branch)
2951 return This::STATUS_BAD_RELOC;
2952 }
2953 else
2954 gold_unreachable();
2955
2956 // A branch to an undefined weak symbol is turned into a jump to
2957 // the next instruction unless a PLT entry will be created.
2958 // Do the same for local undefined symbols.
2959 // The jump to the next instruction is optimized as a NOP depending
2960 // on the architecture.
2961 const Target_arm<big_endian>* arm_target =
2962 Target_arm<big_endian>::default_target();
2963 if (is_weakly_undefined_without_plt)
2964 {
2965 Valtype cond = val & 0xf0000000U;
2966 if (arm_target->may_use_arm_nop())
2967 val = cond | 0x0320f000;
2968 else
2969 val = cond | 0x01a00000; // Using pre-UAL nop: mov r0, r0.
2970 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2971 return This::STATUS_OKAY;
2972 }
2973
2974 Valtype addend = utils::sign_extend<26>(val << 2);
2975 Valtype branch_target = psymval->value(object, addend);
2976 int32_t branch_offset = branch_target - address;
2977
2978 // We need a stub if the branch offset is too large or if we need
2979 // to switch mode.
2980 bool may_use_blx = arm_target->may_use_blx();
2981 Reloc_stub* stub = NULL;
2982 if ((branch_offset > ARM_MAX_FWD_BRANCH_OFFSET)
2983 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
2984 || ((thumb_bit != 0) && !(may_use_blx && r_type == elfcpp::R_ARM_CALL)))
2985 {
2986 Stub_type stub_type =
2987 Reloc_stub::stub_type_for_reloc(r_type, address, branch_target,
2988 (thumb_bit != 0));
2989 if (stub_type != arm_stub_none)
2990 {
2991 Stub_table<big_endian>* stub_table =
2992 object->stub_table(relinfo->data_shndx);
2993 gold_assert(stub_table != NULL);
2994
2995 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
2996 stub = stub_table->find_reloc_stub(stub_key);
2997 gold_assert(stub != NULL);
2998 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
2999 branch_target = stub_table->address() + stub->offset() + addend;
3000 branch_offset = branch_target - address;
3001 gold_assert((branch_offset <= ARM_MAX_FWD_BRANCH_OFFSET)
3002 && (branch_offset >= ARM_MAX_BWD_BRANCH_OFFSET));
3003 }
3004 }
3005
3006 // At this point, if we still need to switch mode, the instruction
3007 // must either be a BLX or a BL that can be converted to a BLX.
3008 if (thumb_bit != 0)
3009 {
3010 // Turn BL to BLX.
3011 gold_assert(may_use_blx && r_type == elfcpp::R_ARM_CALL);
3012 val = (val & 0xffffff) | 0xfa000000 | ((branch_offset & 2) << 23);
3013 }
3014
3015 val = utils::bit_select(val, (branch_offset >> 2), 0xffffffUL);
3016 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3017 return (utils::has_overflow<26>(branch_offset)
3018 ? This::STATUS_OVERFLOW : This::STATUS_OKAY);
3019 }
3020
3021 // Relocate THUMB long branches. This handles relocation types
3022 // R_ARM_THM_CALL, R_ARM_THM_JUMP24 and R_ARM_THM_XPC22.
3023 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3024 // undefined and we do not use PLT in this relocation. In such a case,
3025 // the branch is converted into an NOP.
3026
3027 template<bool big_endian>
3028 typename Arm_relocate_functions<big_endian>::Status
3029 Arm_relocate_functions<big_endian>::thumb_branch_common(
3030 unsigned int r_type,
3031 const Relocate_info<32, big_endian>* relinfo,
3032 unsigned char *view,
3033 const Sized_symbol<32>* gsym,
3034 const Arm_relobj<big_endian>* object,
3035 unsigned int r_sym,
3036 const Symbol_value<32>* psymval,
3037 Arm_address address,
3038 Arm_address thumb_bit,
3039 bool is_weakly_undefined_without_plt)
3040 {
3041 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3042 Valtype* wv = reinterpret_cast<Valtype*>(view);
3043 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
3044 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
3045
3046 // FIXME: These tests are too loose and do not take THUMB/THUMB-2 difference
3047 // into account.
3048 bool is_bl_insn = (lower_insn & 0x1000U) == 0x1000U;
3049 bool is_blx_insn = (lower_insn & 0x1000U) == 0x0000U;
3050
3051 // Check that the instruction is valid.
3052 if (r_type == elfcpp::R_ARM_THM_CALL)
3053 {
3054 if (!is_bl_insn && !is_blx_insn)
3055 return This::STATUS_BAD_RELOC;
3056 }
3057 else if (r_type == elfcpp::R_ARM_THM_JUMP24)
3058 {
3059 // This cannot be a BLX.
3060 if (!is_bl_insn)
3061 return This::STATUS_BAD_RELOC;
3062 }
3063 else if (r_type == elfcpp::R_ARM_THM_XPC22)
3064 {
3065 // Check for Thumb to Thumb call.
3066 if (!is_blx_insn)
3067 return This::STATUS_BAD_RELOC;
3068 if (thumb_bit != 0)
3069 {
3070 gold_warning(_("%s: Thumb BLX instruction targets "
3071 "thumb function '%s'."),
3072 object->name().c_str(),
3073 (gsym ? gsym->name() : "(local)"));
3074 // Convert BLX to BL.
3075 lower_insn |= 0x1000U;
3076 }
3077 }
3078 else
3079 gold_unreachable();
3080
3081 // A branch to an undefined weak symbol is turned into a jump to
3082 // the next instruction unless a PLT entry will be created.
3083 // The jump to the next instruction is optimized as a NOP.W for
3084 // Thumb-2 enabled architectures.
3085 const Target_arm<big_endian>* arm_target =
3086 Target_arm<big_endian>::default_target();
3087 if (is_weakly_undefined_without_plt)
3088 {
3089 if (arm_target->may_use_thumb2_nop())
3090 {
3091 elfcpp::Swap<16, big_endian>::writeval(wv, 0xf3af);
3092 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0x8000);
3093 }
3094 else
3095 {
3096 elfcpp::Swap<16, big_endian>::writeval(wv, 0xe000);
3097 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0xbf00);
3098 }
3099 return This::STATUS_OKAY;
3100 }
3101
3102 int32_t addend = This::thumb32_branch_offset(upper_insn, lower_insn);
3103 Arm_address branch_target = psymval->value(object, addend);
3104 int32_t branch_offset = branch_target - address;
3105
3106 // We need a stub if the branch offset is too large or if we need
3107 // to switch mode.
3108 bool may_use_blx = arm_target->may_use_blx();
3109 bool thumb2 = arm_target->using_thumb2();
3110 if ((!thumb2
3111 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3112 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3113 || (thumb2
3114 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3115 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3116 || ((thumb_bit == 0)
3117 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
3118 || r_type == elfcpp::R_ARM_THM_JUMP24)))
3119 {
3120 Stub_type stub_type =
3121 Reloc_stub::stub_type_for_reloc(r_type, address, branch_target,
3122 (thumb_bit != 0));
3123 if (stub_type != arm_stub_none)
3124 {
3125 Stub_table<big_endian>* stub_table =
3126 object->stub_table(relinfo->data_shndx);
3127 gold_assert(stub_table != NULL);
3128
3129 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
3130 Reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
3131 gold_assert(stub != NULL);
3132 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
3133 branch_target = stub_table->address() + stub->offset() + addend;
3134 branch_offset = branch_target - address;
3135 }
3136 }
3137
3138 // At this point, if we still need to switch mode, the instruction
3139 // must either be a BLX or a BL that can be converted to a BLX.
3140 if (thumb_bit == 0)
3141 {
3142 gold_assert(may_use_blx
3143 && (r_type == elfcpp::R_ARM_THM_CALL
3144 || r_type == elfcpp::R_ARM_THM_XPC22));
3145 // Make sure this is a BLX.
3146 lower_insn &= ~0x1000U;
3147 }
3148 else
3149 {
3150 // Make sure this is a BL.
3151 lower_insn |= 0x1000U;
3152 }
3153
3154 if ((lower_insn & 0x5000U) == 0x4000U)
3155 // For a BLX instruction, make sure that the relocation is rounded up
3156 // to a word boundary. This follows the semantics of the instruction
3157 // which specifies that bit 1 of the target address will come from bit
3158 // 1 of the base address.
3159 branch_offset = (branch_offset + 2) & ~3;
3160
3161 // Put BRANCH_OFFSET back into the insn. Assumes two's complement.
3162 // We use the Thumb-2 encoding, which is safe even if dealing with
3163 // a Thumb-1 instruction by virtue of our overflow check above. */
3164 upper_insn = This::thumb32_branch_upper(upper_insn, branch_offset);
3165 lower_insn = This::thumb32_branch_lower(lower_insn, branch_offset);
3166
3167 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3168 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3169
3170 return ((thumb2
3171 ? utils::has_overflow<25>(branch_offset)
3172 : utils::has_overflow<23>(branch_offset))
3173 ? This::STATUS_OVERFLOW
3174 : This::STATUS_OKAY);
3175 }
3176
3177 // Relocate THUMB-2 long conditional branches.
3178 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3179 // undefined and we do not use PLT in this relocation. In such a case,
3180 // the branch is converted into an NOP.
3181
3182 template<bool big_endian>
3183 typename Arm_relocate_functions<big_endian>::Status
3184 Arm_relocate_functions<big_endian>::thm_jump19(
3185 unsigned char *view,
3186 const Arm_relobj<big_endian>* object,
3187 const Symbol_value<32>* psymval,
3188 Arm_address address,
3189 Arm_address thumb_bit)
3190 {
3191 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3192 Valtype* wv = reinterpret_cast<Valtype*>(view);
3193 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
3194 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
3195 int32_t addend = This::thumb32_cond_branch_offset(upper_insn, lower_insn);
3196
3197 Arm_address branch_target = psymval->value(object, addend);
3198 int32_t branch_offset = branch_target - address;
3199
3200 // ??? Should handle interworking? GCC might someday try to
3201 // use this for tail calls.
3202 // FIXME: We do support thumb entry to PLT yet.
3203 if (thumb_bit == 0)
3204 {
3205 gold_error(_("conditional branch to PLT in THUMB-2 not supported yet."));
3206 return This::STATUS_BAD_RELOC;
3207 }
3208
3209 // Put RELOCATION back into the insn.
3210 upper_insn = This::thumb32_cond_branch_upper(upper_insn, branch_offset);
3211 lower_insn = This::thumb32_cond_branch_lower(lower_insn, branch_offset);
3212
3213 // Put the relocated value back in the object file:
3214 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3215 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3216
3217 return (utils::has_overflow<21>(branch_offset)
3218 ? This::STATUS_OVERFLOW
3219 : This::STATUS_OKAY);
3220 }
3221
3222 // Get the GOT section, creating it if necessary.
3223
3224 template<bool big_endian>
3225 Output_data_got<32, big_endian>*
3226 Target_arm<big_endian>::got_section(Symbol_table* symtab, Layout* layout)
3227 {
3228 if (this->got_ == NULL)
3229 {
3230 gold_assert(symtab != NULL && layout != NULL);
3231
3232 this->got_ = new Output_data_got<32, big_endian>();
3233
3234 Output_section* os;
3235 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3236 (elfcpp::SHF_ALLOC
3237 | elfcpp::SHF_WRITE),
3238 this->got_, false, true, true,
3239 false);
3240
3241 // The old GNU linker creates a .got.plt section. We just
3242 // create another set of data in the .got section. Note that we
3243 // always create a PLT if we create a GOT, although the PLT
3244 // might be empty.
3245 this->got_plt_ = new Output_data_space(4, "** GOT PLT");
3246 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3247 (elfcpp::SHF_ALLOC
3248 | elfcpp::SHF_WRITE),
3249 this->got_plt_, false, false,
3250 false, true);
3251
3252 // The first three entries are reserved.
3253 this->got_plt_->set_current_data_size(3 * 4);
3254
3255 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3256 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3257 Symbol_table::PREDEFINED,
3258 this->got_plt_,
3259 0, 0, elfcpp::STT_OBJECT,
3260 elfcpp::STB_LOCAL,
3261 elfcpp::STV_HIDDEN, 0,
3262 false, false);
3263 }
3264 return this->got_;
3265 }
3266
3267 // Get the dynamic reloc section, creating it if necessary.
3268
3269 template<bool big_endian>
3270 typename Target_arm<big_endian>::Reloc_section*
3271 Target_arm<big_endian>::rel_dyn_section(Layout* layout)
3272 {
3273 if (this->rel_dyn_ == NULL)
3274 {
3275 gold_assert(layout != NULL);
3276 this->rel_dyn_ = new Reloc_section(parameters->options().combreloc());
3277 layout->add_output_section_data(".rel.dyn", elfcpp::SHT_REL,
3278 elfcpp::SHF_ALLOC, this->rel_dyn_, true,
3279 false, false, false);
3280 }
3281 return this->rel_dyn_;
3282 }
3283
3284 // Insn_template methods.
3285
3286 // Return byte size of an instruction template.
3287
3288 size_t
3289 Insn_template::size() const
3290 {
3291 switch (this->type())
3292 {
3293 case THUMB16_TYPE:
3294 case THUMB16_SPECIAL_TYPE:
3295 return 2;
3296 case ARM_TYPE:
3297 case THUMB32_TYPE:
3298 case DATA_TYPE:
3299 return 4;
3300 default:
3301 gold_unreachable();
3302 }
3303 }
3304
3305 // Return alignment of an instruction template.
3306
3307 unsigned
3308 Insn_template::alignment() const
3309 {
3310 switch (this->type())
3311 {
3312 case THUMB16_TYPE:
3313 case THUMB16_SPECIAL_TYPE:
3314 case THUMB32_TYPE:
3315 return 2;
3316 case ARM_TYPE:
3317 case DATA_TYPE:
3318 return 4;
3319 default:
3320 gold_unreachable();
3321 }
3322 }
3323
3324 // Stub_template methods.
3325
3326 Stub_template::Stub_template(
3327 Stub_type type, const Insn_template* insns,
3328 size_t insn_count)
3329 : type_(type), insns_(insns), insn_count_(insn_count), alignment_(1),
3330 entry_in_thumb_mode_(false), relocs_()
3331 {
3332 off_t offset = 0;
3333
3334 // Compute byte size and alignment of stub template.
3335 for (size_t i = 0; i < insn_count; i++)
3336 {
3337 unsigned insn_alignment = insns[i].alignment();
3338 size_t insn_size = insns[i].size();
3339 gold_assert((offset & (insn_alignment - 1)) == 0);
3340 this->alignment_ = std::max(this->alignment_, insn_alignment);
3341 switch (insns[i].type())
3342 {
3343 case Insn_template::THUMB16_TYPE:
3344 case Insn_template::THUMB16_SPECIAL_TYPE:
3345 if (i == 0)
3346 this->entry_in_thumb_mode_ = true;
3347 break;
3348
3349 case Insn_template::THUMB32_TYPE:
3350 if (insns[i].r_type() != elfcpp::R_ARM_NONE)
3351 this->relocs_.push_back(Reloc(i, offset));
3352 if (i == 0)
3353 this->entry_in_thumb_mode_ = true;
3354 break;
3355
3356 case Insn_template::ARM_TYPE:
3357 // Handle cases where the target is encoded within the
3358 // instruction.
3359 if (insns[i].r_type() == elfcpp::R_ARM_JUMP24)
3360 this->relocs_.push_back(Reloc(i, offset));
3361 break;
3362
3363 case Insn_template::DATA_TYPE:
3364 // Entry point cannot be data.
3365 gold_assert(i != 0);
3366 this->relocs_.push_back(Reloc(i, offset));
3367 break;
3368
3369 default:
3370 gold_unreachable();
3371 }
3372 offset += insn_size;
3373 }
3374 this->size_ = offset;
3375 }
3376
3377 // Stub methods.
3378
3379 // Template to implement do_write for a specific target endianity.
3380
3381 template<bool big_endian>
3382 void inline
3383 Stub::do_fixed_endian_write(unsigned char* view, section_size_type view_size)
3384 {
3385 const Stub_template* stub_template = this->stub_template();
3386 const Insn_template* insns = stub_template->insns();
3387
3388 // FIXME: We do not handle BE8 encoding yet.
3389 unsigned char* pov = view;
3390 for (size_t i = 0; i < stub_template->insn_count(); i++)
3391 {
3392 switch (insns[i].type())
3393 {
3394 case Insn_template::THUMB16_TYPE:
3395 elfcpp::Swap<16, big_endian>::writeval(pov, insns[i].data() & 0xffff);
3396 break;
3397 case Insn_template::THUMB16_SPECIAL_TYPE:
3398 elfcpp::Swap<16, big_endian>::writeval(
3399 pov,
3400 this->thumb16_special(i));
3401 break;
3402 case Insn_template::THUMB32_TYPE:
3403 {
3404 uint32_t hi = (insns[i].data() >> 16) & 0xffff;
3405 uint32_t lo = insns[i].data() & 0xffff;
3406 elfcpp::Swap<16, big_endian>::writeval(pov, hi);
3407 elfcpp::Swap<16, big_endian>::writeval(pov + 2, lo);
3408 }
3409 break;
3410 case Insn_template::ARM_TYPE:
3411 case Insn_template::DATA_TYPE:
3412 elfcpp::Swap<32, big_endian>::writeval(pov, insns[i].data());
3413 break;
3414 default:
3415 gold_unreachable();
3416 }
3417 pov += insns[i].size();
3418 }
3419 gold_assert(static_cast<section_size_type>(pov - view) == view_size);
3420 }
3421
3422 // Reloc_stub::Key methods.
3423
3424 // Dump a Key as a string for debugging.
3425
3426 std::string
3427 Reloc_stub::Key::name() const
3428 {
3429 if (this->r_sym_ == invalid_index)
3430 {
3431 // Global symbol key name
3432 // <stub-type>:<symbol name>:<addend>.
3433 const std::string sym_name = this->u_.symbol->name();
3434 // We need to print two hex number and two colons. So just add 100 bytes
3435 // to the symbol name size.
3436 size_t len = sym_name.size() + 100;
3437 char* buffer = new char[len];
3438 int c = snprintf(buffer, len, "%d:%s:%x", this->stub_type_,
3439 sym_name.c_str(), this->addend_);
3440 gold_assert(c > 0 && c < static_cast<int>(len));
3441 delete[] buffer;
3442 return std::string(buffer);
3443 }
3444 else
3445 {
3446 // local symbol key name
3447 // <stub-type>:<object>:<r_sym>:<addend>.
3448 const size_t len = 200;
3449 char buffer[len];
3450 int c = snprintf(buffer, len, "%d:%p:%u:%x", this->stub_type_,
3451 this->u_.relobj, this->r_sym_, this->addend_);
3452 gold_assert(c > 0 && c < static_cast<int>(len));
3453 return std::string(buffer);
3454 }
3455 }
3456
3457 // Reloc_stub methods.
3458
3459 // Determine the type of stub needed, if any, for a relocation of R_TYPE at
3460 // LOCATION to DESTINATION.
3461 // This code is based on the arm_type_of_stub function in
3462 // bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
3463 // class simple.
3464
3465 Stub_type
3466 Reloc_stub::stub_type_for_reloc(
3467 unsigned int r_type,
3468 Arm_address location,
3469 Arm_address destination,
3470 bool target_is_thumb)
3471 {
3472 Stub_type stub_type = arm_stub_none;
3473
3474 // This is a bit ugly but we want to avoid using a templated class for
3475 // big and little endianities.
3476 bool may_use_blx;
3477 bool should_force_pic_veneer;
3478 bool thumb2;
3479 bool thumb_only;
3480 if (parameters->target().is_big_endian())
3481 {
3482 const Target_arm<true>* big_endian_target =
3483 Target_arm<true>::default_target();
3484 may_use_blx = big_endian_target->may_use_blx();
3485 should_force_pic_veneer = big_endian_target->should_force_pic_veneer();
3486 thumb2 = big_endian_target->using_thumb2();
3487 thumb_only = big_endian_target->using_thumb_only();
3488 }
3489 else
3490 {
3491 const Target_arm<false>* little_endian_target =
3492 Target_arm<false>::default_target();
3493 may_use_blx = little_endian_target->may_use_blx();
3494 should_force_pic_veneer = little_endian_target->should_force_pic_veneer();
3495 thumb2 = little_endian_target->using_thumb2();
3496 thumb_only = little_endian_target->using_thumb_only();
3497 }
3498
3499 int64_t branch_offset = (int64_t)destination - location;
3500
3501 if (r_type == elfcpp::R_ARM_THM_CALL || r_type == elfcpp::R_ARM_THM_JUMP24)
3502 {
3503 // Handle cases where:
3504 // - this call goes too far (different Thumb/Thumb2 max
3505 // distance)
3506 // - it's a Thumb->Arm call and blx is not available, or it's a
3507 // Thumb->Arm branch (not bl). A stub is needed in this case.
3508 if ((!thumb2
3509 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3510 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3511 || (thumb2
3512 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3513 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3514 || ((!target_is_thumb)
3515 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
3516 || (r_type == elfcpp::R_ARM_THM_JUMP24))))
3517 {
3518 if (target_is_thumb)
3519 {
3520 // Thumb to thumb.
3521 if (!thumb_only)
3522 {
3523 stub_type = (parameters->options().shared()
3524 || should_force_pic_veneer)
3525 // PIC stubs.
3526 ? ((may_use_blx
3527 && (r_type == elfcpp::R_ARM_THM_CALL))
3528 // V5T and above. Stub starts with ARM code, so
3529 // we must be able to switch mode before
3530 // reaching it, which is only possible for 'bl'
3531 // (ie R_ARM_THM_CALL relocation).
3532 ? arm_stub_long_branch_any_thumb_pic
3533 // On V4T, use Thumb code only.
3534 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3535
3536 // non-PIC stubs.
3537 : ((may_use_blx
3538 && (r_type == elfcpp::R_ARM_THM_CALL))
3539 ? arm_stub_long_branch_any_any // V5T and above.
3540 : arm_stub_long_branch_v4t_thumb_thumb); // V4T.
3541 }
3542 else
3543 {
3544 stub_type = (parameters->options().shared()
3545 || should_force_pic_veneer)
3546 ? arm_stub_long_branch_thumb_only_pic // PIC stub.
3547 : arm_stub_long_branch_thumb_only; // non-PIC stub.
3548 }
3549 }
3550 else
3551 {
3552 // Thumb to arm.
3553
3554 // FIXME: We should check that the input section is from an
3555 // object that has interwork enabled.
3556
3557 stub_type = (parameters->options().shared()
3558 || should_force_pic_veneer)
3559 // PIC stubs.
3560 ? ((may_use_blx
3561 && (r_type == elfcpp::R_ARM_THM_CALL))
3562 ? arm_stub_long_branch_any_arm_pic // V5T and above.
3563 : arm_stub_long_branch_v4t_thumb_arm_pic) // V4T.
3564
3565 // non-PIC stubs.
3566 : ((may_use_blx
3567 && (r_type == elfcpp::R_ARM_THM_CALL))
3568 ? arm_stub_long_branch_any_any // V5T and above.
3569 : arm_stub_long_branch_v4t_thumb_arm); // V4T.
3570
3571 // Handle v4t short branches.
3572 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3573 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3574 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3575 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3576 }
3577 }
3578 }
3579 else if (r_type == elfcpp::R_ARM_CALL
3580 || r_type == elfcpp::R_ARM_JUMP24
3581 || r_type == elfcpp::R_ARM_PLT32)
3582 {
3583 if (target_is_thumb)
3584 {
3585 // Arm to thumb.
3586
3587 // FIXME: We should check that the input section is from an
3588 // object that has interwork enabled.
3589
3590 // We have an extra 2-bytes reach because of
3591 // the mode change (bit 24 (H) of BLX encoding).
3592 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3593 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3594 || ((r_type == elfcpp::R_ARM_CALL) && !may_use_blx)
3595 || (r_type == elfcpp::R_ARM_JUMP24)
3596 || (r_type == elfcpp::R_ARM_PLT32))
3597 {
3598 stub_type = (parameters->options().shared()
3599 || should_force_pic_veneer)
3600 // PIC stubs.
3601 ? (may_use_blx
3602 ? arm_stub_long_branch_any_thumb_pic// V5T and above.
3603 : arm_stub_long_branch_v4t_arm_thumb_pic) // V4T stub.
3604
3605 // non-PIC stubs.
3606 : (may_use_blx
3607 ? arm_stub_long_branch_any_any // V5T and above.
3608 : arm_stub_long_branch_v4t_arm_thumb); // V4T.
3609 }
3610 }
3611 else
3612 {
3613 // Arm to arm.
3614 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3615 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3616 {
3617 stub_type = (parameters->options().shared()
3618 || should_force_pic_veneer)
3619 ? arm_stub_long_branch_any_arm_pic // PIC stubs.
3620 : arm_stub_long_branch_any_any; /// non-PIC.
3621 }
3622 }
3623 }
3624
3625 return stub_type;
3626 }
3627
3628 // Cortex_a8_stub methods.
3629
3630 // Return the instruction for a THUMB16_SPECIAL_TYPE instruction template.
3631 // I is the position of the instruction template in the stub template.
3632
3633 uint16_t
3634 Cortex_a8_stub::do_thumb16_special(size_t i)
3635 {
3636 // The only use of this is to copy condition code from a conditional
3637 // branch being worked around to the corresponding conditional branch in
3638 // to the stub.
3639 gold_assert(this->stub_template()->type() == arm_stub_a8_veneer_b_cond
3640 && i == 0);
3641 uint16_t data = this->stub_template()->insns()[i].data();
3642 gold_assert((data & 0xff00U) == 0xd000U);
3643 data |= ((this->original_insn_ >> 22) & 0xf) << 8;
3644 return data;
3645 }
3646
3647 // Stub_factory methods.
3648
3649 Stub_factory::Stub_factory()
3650 {
3651 // The instruction template sequences are declared as static
3652 // objects and initialized first time the constructor runs.
3653
3654 // Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
3655 // to reach the stub if necessary.
3656 static const Insn_template elf32_arm_stub_long_branch_any_any[] =
3657 {
3658 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
3659 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3660 // dcd R_ARM_ABS32(X)
3661 };
3662
3663 // V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
3664 // available.
3665 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb[] =
3666 {
3667 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3668 Insn_template::arm_insn(0xe12fff1c), // bx ip
3669 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3670 // dcd R_ARM_ABS32(X)
3671 };
3672
3673 // Thumb -> Thumb long branch stub. Used on M-profile architectures.
3674 static const Insn_template elf32_arm_stub_long_branch_thumb_only[] =
3675 {
3676 Insn_template::thumb16_insn(0xb401), // push {r0}
3677 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
3678 Insn_template::thumb16_insn(0x4684), // mov ip, r0
3679 Insn_template::thumb16_insn(0xbc01), // pop {r0}
3680 Insn_template::thumb16_insn(0x4760), // bx ip
3681 Insn_template::thumb16_insn(0xbf00), // nop
3682 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3683 // dcd R_ARM_ABS32(X)
3684 };
3685
3686 // V4T Thumb -> Thumb long branch stub. Using the stack is not
3687 // allowed.
3688 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
3689 {
3690 Insn_template::thumb16_insn(0x4778), // bx pc
3691 Insn_template::thumb16_insn(0x46c0), // nop
3692 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3693 Insn_template::arm_insn(0xe12fff1c), // bx ip
3694 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3695 // dcd R_ARM_ABS32(X)
3696 };
3697
3698 // V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
3699 // available.
3700 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm[] =
3701 {
3702 Insn_template::thumb16_insn(0x4778), // bx pc
3703 Insn_template::thumb16_insn(0x46c0), // nop
3704 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
3705 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
3706 // dcd R_ARM_ABS32(X)
3707 };
3708
3709 // V4T Thumb -> ARM short branch stub. Shorter variant of the above
3710 // one, when the destination is close enough.
3711 static const Insn_template elf32_arm_stub_short_branch_v4t_thumb_arm[] =
3712 {
3713 Insn_template::thumb16_insn(0x4778), // bx pc
3714 Insn_template::thumb16_insn(0x46c0), // nop
3715 Insn_template::arm_rel_insn(0xea000000, -8), // b (X-8)
3716 };
3717
3718 // ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
3719 // blx to reach the stub if necessary.
3720 static const Insn_template elf32_arm_stub_long_branch_any_arm_pic[] =
3721 {
3722 Insn_template::arm_insn(0xe59fc000), // ldr r12, [pc]
3723 Insn_template::arm_insn(0xe08ff00c), // add pc, pc, ip
3724 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
3725 // dcd R_ARM_REL32(X-4)
3726 };
3727
3728 // ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
3729 // blx to reach the stub if necessary. We can not add into pc;
3730 // it is not guaranteed to mode switch (different in ARMv6 and
3731 // ARMv7).
3732 static const Insn_template elf32_arm_stub_long_branch_any_thumb_pic[] =
3733 {
3734 Insn_template::arm_insn(0xe59fc004), // ldr r12, [pc, #4]
3735 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3736 Insn_template::arm_insn(0xe12fff1c), // bx ip
3737 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
3738 // dcd R_ARM_REL32(X)
3739 };
3740
3741 // V4T ARM -> ARM long branch stub, PIC.
3742 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
3743 {
3744 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
3745 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3746 Insn_template::arm_insn(0xe12fff1c), // bx ip
3747 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
3748 // dcd R_ARM_REL32(X)
3749 };
3750
3751 // V4T Thumb -> ARM long branch stub, PIC.
3752 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
3753 {
3754 Insn_template::thumb16_insn(0x4778), // bx pc
3755 Insn_template::thumb16_insn(0x46c0), // nop
3756 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
3757 Insn_template::arm_insn(0xe08cf00f), // add pc, ip, pc
3758 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
3759 // dcd R_ARM_REL32(X)
3760 };
3761
3762 // Thumb -> Thumb long branch stub, PIC. Used on M-profile
3763 // architectures.
3764 static const Insn_template elf32_arm_stub_long_branch_thumb_only_pic[] =
3765 {
3766 Insn_template::thumb16_insn(0xb401), // push {r0}
3767 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
3768 Insn_template::thumb16_insn(0x46fc), // mov ip, pc
3769 Insn_template::thumb16_insn(0x4484), // add ip, r0
3770 Insn_template::thumb16_insn(0xbc01), // pop {r0}
3771 Insn_template::thumb16_insn(0x4760), // bx ip
3772 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 4),
3773 // dcd R_ARM_REL32(X)
3774 };
3775
3776 // V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
3777 // allowed.
3778 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
3779 {
3780 Insn_template::thumb16_insn(0x4778), // bx pc
3781 Insn_template::thumb16_insn(0x46c0), // nop
3782 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
3783 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
3784 Insn_template::arm_insn(0xe12fff1c), // bx ip
3785 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
3786 // dcd R_ARM_REL32(X)
3787 };
3788
3789 // Cortex-A8 erratum-workaround stubs.
3790
3791 // Stub used for conditional branches (which may be beyond +/-1MB away,
3792 // so we can't use a conditional branch to reach this stub).
3793
3794 // original code:
3795 //
3796 // b<cond> X
3797 // after:
3798 //
3799 static const Insn_template elf32_arm_stub_a8_veneer_b_cond[] =
3800 {
3801 Insn_template::thumb16_bcond_insn(0xd001), // b<cond>.n true
3802 Insn_template::thumb32_b_insn(0xf000b800, -4), // b.w after
3803 Insn_template::thumb32_b_insn(0xf000b800, -4) // true:
3804 // b.w X
3805 };
3806
3807 // Stub used for b.w and bl.w instructions.
3808
3809 static const Insn_template elf32_arm_stub_a8_veneer_b[] =
3810 {
3811 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
3812 };
3813
3814 static const Insn_template elf32_arm_stub_a8_veneer_bl[] =
3815 {
3816 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
3817 };
3818
3819 // Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
3820 // instruction (which switches to ARM mode) to point to this stub. Jump to
3821 // the real destination using an ARM-mode branch.
3822 static const Insn_template elf32_arm_stub_a8_veneer_blx[] =
3823 {
3824 Insn_template::arm_rel_insn(0xea000000, -8) // b dest
3825 };
3826
3827 // Stub used to provide an interworking for R_ARM_V4BX relocation
3828 // (bx r[n] instruction).
3829 static const Insn_template elf32_arm_stub_v4_veneer_bx[] =
3830 {
3831 Insn_template::arm_insn(0xe3100001), // tst r<n>, #1
3832 Insn_template::arm_insn(0x01a0f000), // moveq pc, r<n>
3833 Insn_template::arm_insn(0xe12fff10) // bx r<n>
3834 };
3835
3836 // Fill in the stub template look-up table. Stub templates are constructed
3837 // per instance of Stub_factory for fast look-up without locking
3838 // in a thread-enabled environment.
3839
3840 this->stub_templates_[arm_stub_none] =
3841 new Stub_template(arm_stub_none, NULL, 0);
3842
3843 #define DEF_STUB(x) \
3844 do \
3845 { \
3846 size_t array_size \
3847 = sizeof(elf32_arm_stub_##x) / sizeof(elf32_arm_stub_##x[0]); \
3848 Stub_type type = arm_stub_##x; \
3849 this->stub_templates_[type] = \
3850 new Stub_template(type, elf32_arm_stub_##x, array_size); \
3851 } \
3852 while (0);
3853
3854 DEF_STUBS
3855 #undef DEF_STUB
3856 }
3857
3858 // Stub_table methods.
3859
3860 // Removel all Cortex-A8 stub.
3861
3862 template<bool big_endian>
3863 void
3864 Stub_table<big_endian>::remove_all_cortex_a8_stubs()
3865 {
3866 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
3867 p != this->cortex_a8_stubs_.end();
3868 ++p)
3869 delete p->second;
3870 this->cortex_a8_stubs_.clear();
3871 }
3872
3873 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
3874
3875 template<bool big_endian>
3876 void
3877 Stub_table<big_endian>::relocate_stub(
3878 Stub* stub,
3879 const Relocate_info<32, big_endian>* relinfo,
3880 Target_arm<big_endian>* arm_target,
3881 Output_section* output_section,
3882 unsigned char* view,
3883 Arm_address address,
3884 section_size_type view_size)
3885 {
3886 const Stub_template* stub_template = stub->stub_template();
3887 if (stub_template->reloc_count() != 0)
3888 {
3889 // Adjust view to cover the stub only.
3890 section_size_type offset = stub->offset();
3891 section_size_type stub_size = stub_template->size();
3892 gold_assert(offset + stub_size <= view_size);
3893
3894 arm_target->relocate_stub(stub, relinfo, output_section, view + offset,
3895 address + offset, stub_size);
3896 }
3897 }
3898
3899 // Relocate all stubs in this stub table.
3900
3901 template<bool big_endian>
3902 void
3903 Stub_table<big_endian>::relocate_stubs(
3904 const Relocate_info<32, big_endian>* relinfo,
3905 Target_arm<big_endian>* arm_target,
3906 Output_section* output_section,
3907 unsigned char* view,
3908 Arm_address address,
3909 section_size_type view_size)
3910 {
3911 // If we are passed a view bigger than the stub table's. we need to
3912 // adjust the view.
3913 gold_assert(address == this->address()
3914 && (view_size
3915 == static_cast<section_size_type>(this->data_size())));
3916
3917 // Relocate all relocation stubs.
3918 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
3919 p != this->reloc_stubs_.end();
3920 ++p)
3921 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
3922 address, view_size);
3923
3924 // Relocate all Cortex-A8 stubs.
3925 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
3926 p != this->cortex_a8_stubs_.end();
3927 ++p)
3928 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
3929 address, view_size);
3930
3931 // Relocate all ARM V4BX stubs.
3932 for (Arm_v4bx_stub_list::iterator p = this->arm_v4bx_stubs_.begin();
3933 p != this->arm_v4bx_stubs_.end();
3934 ++p)
3935 {
3936 if (*p != NULL)
3937 this->relocate_stub(*p, relinfo, arm_target, output_section, view,
3938 address, view_size);
3939 }
3940 }
3941
3942 // Write out the stubs to file.
3943
3944 template<bool big_endian>
3945 void
3946 Stub_table<big_endian>::do_write(Output_file* of)
3947 {
3948 off_t offset = this->offset();
3949 const section_size_type oview_size =
3950 convert_to_section_size_type(this->data_size());
3951 unsigned char* const oview = of->get_output_view(offset, oview_size);
3952
3953 // Write relocation stubs.
3954 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
3955 p != this->reloc_stubs_.end();
3956 ++p)
3957 {
3958 Reloc_stub* stub = p->second;
3959 Arm_address address = this->address() + stub->offset();
3960 gold_assert(address
3961 == align_address(address,
3962 stub->stub_template()->alignment()));
3963 stub->write(oview + stub->offset(), stub->stub_template()->size(),
3964 big_endian);
3965 }
3966
3967 // Write Cortex-A8 stubs.
3968 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
3969 p != this->cortex_a8_stubs_.end();
3970 ++p)
3971 {
3972 Cortex_a8_stub* stub = p->second;
3973 Arm_address address = this->address() + stub->offset();
3974 gold_assert(address
3975 == align_address(address,
3976 stub->stub_template()->alignment()));
3977 stub->write(oview + stub->offset(), stub->stub_template()->size(),
3978 big_endian);
3979 }
3980
3981 // Write ARM V4BX relocation stubs.
3982 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
3983 p != this->arm_v4bx_stubs_.end();
3984 ++p)
3985 {
3986 if (*p == NULL)
3987 continue;
3988
3989 Arm_address address = this->address() + (*p)->offset();
3990 gold_assert(address
3991 == align_address(address,
3992 (*p)->stub_template()->alignment()));
3993 (*p)->write(oview + (*p)->offset(), (*p)->stub_template()->size(),
3994 big_endian);
3995 }
3996
3997 of->write_output_view(this->offset(), oview_size, oview);
3998 }
3999
4000 // Update the data size and address alignment of the stub table at the end
4001 // of a relaxation pass. Return true if either the data size or the
4002 // alignment changed in this relaxation pass.
4003
4004 template<bool big_endian>
4005 bool
4006 Stub_table<big_endian>::update_data_size_and_addralign()
4007 {
4008 off_t size = 0;
4009 unsigned addralign = 1;
4010
4011 // Go over all stubs in table to compute data size and address alignment.
4012
4013 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4014 p != this->reloc_stubs_.end();
4015 ++p)
4016 {
4017 const Stub_template* stub_template = p->second->stub_template();
4018 addralign = std::max(addralign, stub_template->alignment());
4019 size = (align_address(size, stub_template->alignment())
4020 + stub_template->size());
4021 }
4022
4023 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4024 p != this->cortex_a8_stubs_.end();
4025 ++p)
4026 {
4027 const Stub_template* stub_template = p->second->stub_template();
4028 addralign = std::max(addralign, stub_template->alignment());
4029 size = (align_address(size, stub_template->alignment())
4030 + stub_template->size());
4031 }
4032
4033 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4034 p != this->arm_v4bx_stubs_.end();
4035 ++p)
4036 {
4037 if (*p == NULL)
4038 continue;
4039
4040 const Stub_template* stub_template = (*p)->stub_template();
4041 addralign = std::max(addralign, stub_template->alignment());
4042 size = (align_address(size, stub_template->alignment())
4043 + stub_template->size());
4044 }
4045
4046 // Check if either data size or alignment changed in this pass.
4047 // Update prev_data_size_ and prev_addralign_. These will be used
4048 // as the current data size and address alignment for the next pass.
4049 bool changed = size != this->prev_data_size_;
4050 this->prev_data_size_ = size;
4051
4052 if (addralign != this->prev_addralign_)
4053 changed = true;
4054 this->prev_addralign_ = addralign;
4055
4056 return changed;
4057 }
4058
4059 // Finalize the stubs. This sets the offsets of the stubs within the stub
4060 // table. It also marks all input sections needing Cortex-A8 workaround.
4061
4062 template<bool big_endian>
4063 void
4064 Stub_table<big_endian>::finalize_stubs()
4065 {
4066 off_t off = 0;
4067 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4068 p != this->reloc_stubs_.end();
4069 ++p)
4070 {
4071 Reloc_stub* stub = p->second;
4072 const Stub_template* stub_template = stub->stub_template();
4073 uint64_t stub_addralign = stub_template->alignment();
4074 off = align_address(off, stub_addralign);
4075 stub->set_offset(off);
4076 off += stub_template->size();
4077 }
4078
4079 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4080 p != this->cortex_a8_stubs_.end();
4081 ++p)
4082 {
4083 Cortex_a8_stub* stub = p->second;
4084 const Stub_template* stub_template = stub->stub_template();
4085 uint64_t stub_addralign = stub_template->alignment();
4086 off = align_address(off, stub_addralign);
4087 stub->set_offset(off);
4088 off += stub_template->size();
4089
4090 // Mark input section so that we can determine later if a code section
4091 // needs the Cortex-A8 workaround quickly.
4092 Arm_relobj<big_endian>* arm_relobj =
4093 Arm_relobj<big_endian>::as_arm_relobj(stub->relobj());
4094 arm_relobj->mark_section_for_cortex_a8_workaround(stub->shndx());
4095 }
4096
4097 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4098 p != this->arm_v4bx_stubs_.end();
4099 ++p)
4100 {
4101 if (*p == NULL)
4102 continue;
4103
4104 const Stub_template* stub_template = (*p)->stub_template();
4105 uint64_t stub_addralign = stub_template->alignment();
4106 off = align_address(off, stub_addralign);
4107 (*p)->set_offset(off);
4108 off += stub_template->size();
4109 }
4110
4111 gold_assert(off <= this->prev_data_size_);
4112 }
4113
4114 // Apply Cortex-A8 workaround to an address range between VIEW_ADDRESS
4115 // and VIEW_ADDRESS + VIEW_SIZE - 1. VIEW points to the mapped address
4116 // of the address range seen by the linker.
4117
4118 template<bool big_endian>
4119 void
4120 Stub_table<big_endian>::apply_cortex_a8_workaround_to_address_range(
4121 Target_arm<big_endian>* arm_target,
4122 unsigned char* view,
4123 Arm_address view_address,
4124 section_size_type view_size)
4125 {
4126 // Cortex-A8 stubs are sorted by addresses of branches being fixed up.
4127 for (Cortex_a8_stub_list::const_iterator p =
4128 this->cortex_a8_stubs_.lower_bound(view_address);
4129 ((p != this->cortex_a8_stubs_.end())
4130 && (p->first < (view_address + view_size)));
4131 ++p)
4132 {
4133 // We do not store the THUMB bit in the LSB of either the branch address
4134 // or the stub offset. There is no need to strip the LSB.
4135 Arm_address branch_address = p->first;
4136 const Cortex_a8_stub* stub = p->second;
4137 Arm_address stub_address = this->address() + stub->offset();
4138
4139 // Offset of the branch instruction relative to this view.
4140 section_size_type offset =
4141 convert_to_section_size_type(branch_address - view_address);
4142 gold_assert((offset + 4) <= view_size);
4143
4144 arm_target->apply_cortex_a8_workaround(stub, stub_address,
4145 view + offset, branch_address);
4146 }
4147 }
4148
4149 // Arm_input_section methods.
4150
4151 // Initialize an Arm_input_section.
4152
4153 template<bool big_endian>
4154 void
4155 Arm_input_section<big_endian>::init()
4156 {
4157 Relobj* relobj = this->relobj();
4158 unsigned int shndx = this->shndx();
4159
4160 // Cache these to speed up size and alignment queries. It is too slow
4161 // to call section_addraglin and section_size every time.
4162 this->original_addralign_ = relobj->section_addralign(shndx);
4163 this->original_size_ = relobj->section_size(shndx);
4164
4165 // We want to make this look like the original input section after
4166 // output sections are finalized.
4167 Output_section* os = relobj->output_section(shndx);
4168 off_t offset = relobj->output_section_offset(shndx);
4169 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
4170 this->set_address(os->address() + offset);
4171 this->set_file_offset(os->offset() + offset);
4172
4173 this->set_current_data_size(this->original_size_);
4174 this->finalize_data_size();
4175 }
4176
4177 template<bool big_endian>
4178 void
4179 Arm_input_section<big_endian>::do_write(Output_file* of)
4180 {
4181 // We have to write out the original section content.
4182 section_size_type section_size;
4183 const unsigned char* section_contents =
4184 this->relobj()->section_contents(this->shndx(), &section_size, false);
4185 of->write(this->offset(), section_contents, section_size);
4186
4187 // If this owns a stub table and it is not empty, write it.
4188 if (this->is_stub_table_owner() && !this->stub_table_->empty())
4189 this->stub_table_->write(of);
4190 }
4191
4192 // Finalize data size.
4193
4194 template<bool big_endian>
4195 void
4196 Arm_input_section<big_endian>::set_final_data_size()
4197 {
4198 // If this owns a stub table, finalize its data size as well.
4199 if (this->is_stub_table_owner())
4200 {
4201 uint64_t address = this->address();
4202
4203 // The stub table comes after the original section contents.
4204 address += this->original_size_;
4205 address = align_address(address, this->stub_table_->addralign());
4206 off_t offset = this->offset() + (address - this->address());
4207 this->stub_table_->set_address_and_file_offset(address, offset);
4208 address += this->stub_table_->data_size();
4209 gold_assert(address == this->address() + this->current_data_size());
4210 }
4211
4212 this->set_data_size(this->current_data_size());
4213 }
4214
4215 // Reset address and file offset.
4216
4217 template<bool big_endian>
4218 void
4219 Arm_input_section<big_endian>::do_reset_address_and_file_offset()
4220 {
4221 // Size of the original input section contents.
4222 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
4223
4224 // If this is a stub table owner, account for the stub table size.
4225 if (this->is_stub_table_owner())
4226 {
4227 Stub_table<big_endian>* stub_table = this->stub_table_;
4228
4229 // Reset the stub table's address and file offset. The
4230 // current data size for child will be updated after that.
4231 stub_table_->reset_address_and_file_offset();
4232 off = align_address(off, stub_table_->addralign());
4233 off += stub_table->current_data_size();
4234 }
4235
4236 this->set_current_data_size(off);
4237 }
4238
4239 // Arm_output_section methods.
4240
4241 // Create a stub group for input sections from BEGIN to END. OWNER
4242 // points to the input section to be the owner a new stub table.
4243
4244 template<bool big_endian>
4245 void
4246 Arm_output_section<big_endian>::create_stub_group(
4247 Input_section_list::const_iterator begin,
4248 Input_section_list::const_iterator end,
4249 Input_section_list::const_iterator owner,
4250 Target_arm<big_endian>* target,
4251 std::vector<Output_relaxed_input_section*>* new_relaxed_sections)
4252 {
4253 // Currently we convert ordinary input sections into relaxed sections only
4254 // at this point but we may want to support creating relaxed input section
4255 // very early. So we check here to see if owner is already a relaxed
4256 // section.
4257
4258 Arm_input_section<big_endian>* arm_input_section;
4259 if (owner->is_relaxed_input_section())
4260 {
4261 arm_input_section =
4262 Arm_input_section<big_endian>::as_arm_input_section(
4263 owner->relaxed_input_section());
4264 }
4265 else
4266 {
4267 gold_assert(owner->is_input_section());
4268 // Create a new relaxed input section.
4269 arm_input_section =
4270 target->new_arm_input_section(owner->relobj(), owner->shndx());
4271 new_relaxed_sections->push_back(arm_input_section);
4272 }
4273
4274 // Create a stub table.
4275 Stub_table<big_endian>* stub_table =
4276 target->new_stub_table(arm_input_section);
4277
4278 arm_input_section->set_stub_table(stub_table);
4279
4280 Input_section_list::const_iterator p = begin;
4281 Input_section_list::const_iterator prev_p;
4282
4283 // Look for input sections or relaxed input sections in [begin ... end].
4284 do
4285 {
4286 if (p->is_input_section() || p->is_relaxed_input_section())
4287 {
4288 // The stub table information for input sections live
4289 // in their objects.
4290 Arm_relobj<big_endian>* arm_relobj =
4291 Arm_relobj<big_endian>::as_arm_relobj(p->relobj());
4292 arm_relobj->set_stub_table(p->shndx(), stub_table);
4293 }
4294 prev_p = p++;
4295 }
4296 while (prev_p != end);
4297 }
4298
4299 // Group input sections for stub generation. GROUP_SIZE is roughly the limit
4300 // of stub groups. We grow a stub group by adding input section until the
4301 // size is just below GROUP_SIZE. The last input section will be converted
4302 // into a stub table. If STUB_ALWAYS_AFTER_BRANCH is false, we also add
4303 // input section after the stub table, effectively double the group size.
4304 //
4305 // This is similar to the group_sections() function in elf32-arm.c but is
4306 // implemented differently.
4307
4308 template<bool big_endian>
4309 void
4310 Arm_output_section<big_endian>::group_sections(
4311 section_size_type group_size,
4312 bool stubs_always_after_branch,
4313 Target_arm<big_endian>* target)
4314 {
4315 // We only care about sections containing code.
4316 if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
4317 return;
4318
4319 // States for grouping.
4320 typedef enum
4321 {
4322 // No group is being built.
4323 NO_GROUP,
4324 // A group is being built but the stub table is not found yet.
4325 // We keep group a stub group until the size is just under GROUP_SIZE.
4326 // The last input section in the group will be used as the stub table.
4327 FINDING_STUB_SECTION,
4328 // A group is being built and we have already found a stub table.
4329 // We enter this state to grow a stub group by adding input section
4330 // after the stub table. This effectively doubles the group size.
4331 HAS_STUB_SECTION
4332 } State;
4333
4334 // Any newly created relaxed sections are stored here.
4335 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
4336
4337 State state = NO_GROUP;
4338 section_size_type off = 0;
4339 section_size_type group_begin_offset = 0;
4340 section_size_type group_end_offset = 0;
4341 section_size_type stub_table_end_offset = 0;
4342 Input_section_list::const_iterator group_begin =
4343 this->input_sections().end();
4344 Input_section_list::const_iterator stub_table =
4345 this->input_sections().end();
4346 Input_section_list::const_iterator group_end = this->input_sections().end();
4347 for (Input_section_list::const_iterator p = this->input_sections().begin();
4348 p != this->input_sections().end();
4349 ++p)
4350 {
4351 section_size_type section_begin_offset =
4352 align_address(off, p->addralign());
4353 section_size_type section_end_offset =
4354 section_begin_offset + p->data_size();
4355
4356 // Check to see if we should group the previously seens sections.
4357 switch (state)
4358 {
4359 case NO_GROUP:
4360 break;
4361
4362 case FINDING_STUB_SECTION:
4363 // Adding this section makes the group larger than GROUP_SIZE.
4364 if (section_end_offset - group_begin_offset >= group_size)
4365 {
4366 if (stubs_always_after_branch)
4367 {
4368 gold_assert(group_end != this->input_sections().end());
4369 this->create_stub_group(group_begin, group_end, group_end,
4370 target, &new_relaxed_sections);
4371 state = NO_GROUP;
4372 }
4373 else
4374 {
4375 // But wait, there's more! Input sections up to
4376 // stub_group_size bytes after the stub table can be
4377 // handled by it too.
4378 state = HAS_STUB_SECTION;
4379 stub_table = group_end;
4380 stub_table_end_offset = group_end_offset;
4381 }
4382 }
4383 break;
4384
4385 case HAS_STUB_SECTION:
4386 // Adding this section makes the post stub-section group larger
4387 // than GROUP_SIZE.
4388 if (section_end_offset - stub_table_end_offset >= group_size)
4389 {
4390 gold_assert(group_end != this->input_sections().end());
4391 this->create_stub_group(group_begin, group_end, stub_table,
4392 target, &new_relaxed_sections);
4393 state = NO_GROUP;
4394 }
4395 break;
4396
4397 default:
4398 gold_unreachable();
4399 }
4400
4401 // If we see an input section and currently there is no group, start
4402 // a new one. Skip any empty sections.
4403 if ((p->is_input_section() || p->is_relaxed_input_section())
4404 && (p->relobj()->section_size(p->shndx()) != 0))
4405 {
4406 if (state == NO_GROUP)
4407 {
4408 state = FINDING_STUB_SECTION;
4409 group_begin = p;
4410 group_begin_offset = section_begin_offset;
4411 }
4412
4413 // Keep track of the last input section seen.
4414 group_end = p;
4415 group_end_offset = section_end_offset;
4416 }
4417
4418 off = section_end_offset;
4419 }
4420
4421 // Create a stub group for any ungrouped sections.
4422 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
4423 {
4424 gold_assert(group_end != this->input_sections().end());
4425 this->create_stub_group(group_begin, group_end,
4426 (state == FINDING_STUB_SECTION
4427 ? group_end
4428 : stub_table),
4429 target, &new_relaxed_sections);
4430 }
4431
4432 // Convert input section into relaxed input section in a batch.
4433 if (!new_relaxed_sections.empty())
4434 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
4435
4436 // Update the section offsets
4437 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
4438 {
4439 Arm_relobj<big_endian>* arm_relobj =
4440 Arm_relobj<big_endian>::as_arm_relobj(
4441 new_relaxed_sections[i]->relobj());
4442 unsigned int shndx = new_relaxed_sections[i]->shndx();
4443 // Tell Arm_relobj that this input section is converted.
4444 arm_relobj->convert_input_section_to_relaxed_section(shndx);
4445 }
4446 }
4447
4448 // Arm_relobj methods.
4449
4450 // Determine if we want to scan the SHNDX-th section for relocation stubs.
4451 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
4452
4453 template<bool big_endian>
4454 bool
4455 Arm_relobj<big_endian>::section_needs_reloc_stub_scanning(
4456 const elfcpp::Shdr<32, big_endian>& shdr,
4457 const Relobj::Output_sections& out_sections,
4458 const Symbol_table *symtab)
4459 {
4460 unsigned int sh_type = shdr.get_sh_type();
4461 if (sh_type != elfcpp::SHT_REL && sh_type != elfcpp::SHT_RELA)
4462 return false;
4463
4464 // Ignore empty section.
4465 off_t sh_size = shdr.get_sh_size();
4466 if (sh_size == 0)
4467 return false;
4468
4469 // Ignore reloc section with bad info. This error will be
4470 // reported in the final link.
4471 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
4472 if (index >= this->shnum())
4473 return false;
4474
4475 // This relocation section is against a section which we
4476 // discarded or if the section is folded into another
4477 // section due to ICF.
4478 if (out_sections[index] == NULL || symtab->is_section_folded(this, index))
4479 return false;
4480
4481 // Ignore reloc section with unexpected symbol table. The
4482 // error will be reported in the final link.
4483 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
4484 return false;
4485
4486 unsigned int reloc_size;
4487 if (sh_type == elfcpp::SHT_REL)
4488 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
4489 else
4490 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
4491
4492 // Ignore reloc section with unexpected entsize or uneven size.
4493 // The error will be reported in the final link.
4494 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
4495 return false;
4496
4497 return true;
4498 }
4499
4500 // Determine if we want to scan the SHNDX-th section for non-relocation stubs.
4501 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
4502
4503 template<bool big_endian>
4504 bool
4505 Arm_relobj<big_endian>::section_needs_cortex_a8_stub_scanning(
4506 const elfcpp::Shdr<32, big_endian>& shdr,
4507 unsigned int shndx,
4508 Output_section* os,
4509 const Symbol_table* symtab)
4510 {
4511 // We only scan non-empty code sections.
4512 if ((shdr.get_sh_flags() & elfcpp::SHF_EXECINSTR) == 0
4513 || shdr.get_sh_size() == 0)
4514 return false;
4515
4516 // Ignore discarded or ICF'ed sections.
4517 if (os == NULL || symtab->is_section_folded(this, shndx))
4518 return false;
4519
4520 // Find output address of section.
4521 Arm_address address = os->output_address(this, shndx, 0);
4522
4523 // If the section does not cross any 4K-boundaries, it does not need to
4524 // be scanned.
4525 if ((address & ~0xfffU) == ((address + shdr.get_sh_size() - 1) & ~0xfffU))
4526 return false;
4527
4528 return true;
4529 }
4530
4531 // Scan a section for Cortex-A8 workaround.
4532
4533 template<bool big_endian>
4534 void
4535 Arm_relobj<big_endian>::scan_section_for_cortex_a8_erratum(
4536 const elfcpp::Shdr<32, big_endian>& shdr,
4537 unsigned int shndx,
4538 Output_section* os,
4539 Target_arm<big_endian>* arm_target)
4540 {
4541 Arm_address output_address = os->output_address(this, shndx, 0);
4542
4543 // Get the section contents.
4544 section_size_type input_view_size = 0;
4545 const unsigned char* input_view =
4546 this->section_contents(shndx, &input_view_size, false);
4547
4548 // We need to go through the mapping symbols to determine what to
4549 // scan. There are two reasons. First, we should look at THUMB code and
4550 // THUMB code only. Second, we only want to look at the 4K-page boundary
4551 // to speed up the scanning.
4552
4553 // Look for the first mapping symbol in this section. It should be
4554 // at (shndx, 0).
4555 Mapping_symbol_position section_start(shndx, 0);
4556 typename Mapping_symbols_info::const_iterator p =
4557 this->mapping_symbols_info_.lower_bound(section_start);
4558
4559 if (p == this->mapping_symbols_info_.end()
4560 || p->first != section_start)
4561 {
4562 gold_warning(_("Cortex-A8 erratum scanning failed because there "
4563 "is no mapping symbols for section %u of %s"),
4564 shndx, this->name().c_str());
4565 return;
4566 }
4567
4568 while (p != this->mapping_symbols_info_.end()
4569 && p->first.first == shndx)
4570 {
4571 typename Mapping_symbols_info::const_iterator next =
4572 this->mapping_symbols_info_.upper_bound(p->first);
4573
4574 // Only scan part of a section with THUMB code.
4575 if (p->second == 't')
4576 {
4577 // Determine the end of this range.
4578 section_size_type span_start =
4579 convert_to_section_size_type(p->first.second);
4580 section_size_type span_end;
4581 if (next != this->mapping_symbols_info_.end()
4582 && next->first.first == shndx)
4583 span_end = convert_to_section_size_type(next->first.second);
4584 else
4585 span_end = convert_to_section_size_type(shdr.get_sh_size());
4586
4587 if (((span_start + output_address) & ~0xfffUL)
4588 != ((span_end + output_address - 1) & ~0xfffUL))
4589 {
4590 arm_target->scan_span_for_cortex_a8_erratum(this, shndx,
4591 span_start, span_end,
4592 input_view,
4593 output_address);
4594 }
4595 }
4596
4597 p = next;
4598 }
4599 }
4600
4601 // Scan relocations for stub generation.
4602
4603 template<bool big_endian>
4604 void
4605 Arm_relobj<big_endian>::scan_sections_for_stubs(
4606 Target_arm<big_endian>* arm_target,
4607 const Symbol_table* symtab,
4608 const Layout* layout)
4609 {
4610 unsigned int shnum = this->shnum();
4611 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
4612
4613 // Read the section headers.
4614 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
4615 shnum * shdr_size,
4616 true, true);
4617
4618 // To speed up processing, we set up hash tables for fast lookup of
4619 // input offsets to output addresses.
4620 this->initialize_input_to_output_maps();
4621
4622 const Relobj::Output_sections& out_sections(this->output_sections());
4623
4624 Relocate_info<32, big_endian> relinfo;
4625 relinfo.symtab = symtab;
4626 relinfo.layout = layout;
4627 relinfo.object = this;
4628
4629 // Do relocation stubs scanning.
4630 const unsigned char* p = pshdrs + shdr_size;
4631 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
4632 {
4633 const elfcpp::Shdr<32, big_endian> shdr(p);
4634 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab))
4635 {
4636 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
4637 Arm_address output_offset = this->get_output_section_offset(index);
4638 Arm_address output_address;
4639 if(output_offset != invalid_address)
4640 output_address = out_sections[index]->address() + output_offset;
4641 else
4642 {
4643 // Currently this only happens for a relaxed section.
4644 const Output_relaxed_input_section* poris =
4645 out_sections[index]->find_relaxed_input_section(this, index);
4646 gold_assert(poris != NULL);
4647 output_address = poris->address();
4648 }
4649
4650 // Get the relocations.
4651 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
4652 shdr.get_sh_size(),
4653 true, false);
4654
4655 // Get the section contents. This does work for the case in which
4656 // we modify the contents of an input section. We need to pass the
4657 // output view under such circumstances.
4658 section_size_type input_view_size = 0;
4659 const unsigned char* input_view =
4660 this->section_contents(index, &input_view_size, false);
4661
4662 relinfo.reloc_shndx = i;
4663 relinfo.data_shndx = index;
4664 unsigned int sh_type = shdr.get_sh_type();
4665 unsigned int reloc_size;
4666 if (sh_type == elfcpp::SHT_REL)
4667 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
4668 else
4669 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
4670
4671 Output_section* os = out_sections[index];
4672 arm_target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
4673 shdr.get_sh_size() / reloc_size,
4674 os,
4675 output_offset == invalid_address,
4676 input_view, output_address,
4677 input_view_size);
4678 }
4679 }
4680
4681 // Do Cortex-A8 erratum stubs scanning. This has to be done for a section
4682 // after its relocation section, if there is one, is processed for
4683 // relocation stubs. Merging this loop with the one above would have been
4684 // complicated since we would have had to make sure that relocation stub
4685 // scanning is done first.
4686 if (arm_target->fix_cortex_a8())
4687 {
4688 const unsigned char* p = pshdrs + shdr_size;
4689 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
4690 {
4691 const elfcpp::Shdr<32, big_endian> shdr(p);
4692 if (this->section_needs_cortex_a8_stub_scanning(shdr, i,
4693 out_sections[i],
4694 symtab))
4695 this->scan_section_for_cortex_a8_erratum(shdr, i, out_sections[i],
4696 arm_target);
4697 }
4698 }
4699
4700 // After we've done the relocations, we release the hash tables,
4701 // since we no longer need them.
4702 this->free_input_to_output_maps();
4703 }
4704
4705 // Count the local symbols. The ARM backend needs to know if a symbol
4706 // is a THUMB function or not. For global symbols, it is easy because
4707 // the Symbol object keeps the ELF symbol type. For local symbol it is
4708 // harder because we cannot access this information. So we override the
4709 // do_count_local_symbol in parent and scan local symbols to mark
4710 // THUMB functions. This is not the most efficient way but I do not want to
4711 // slow down other ports by calling a per symbol targer hook inside
4712 // Sized_relobj<size, big_endian>::do_count_local_symbols.
4713
4714 template<bool big_endian>
4715 void
4716 Arm_relobj<big_endian>::do_count_local_symbols(
4717 Stringpool_template<char>* pool,
4718 Stringpool_template<char>* dynpool)
4719 {
4720 // We need to fix-up the values of any local symbols whose type are
4721 // STT_ARM_TFUNC.
4722
4723 // Ask parent to count the local symbols.
4724 Sized_relobj<32, big_endian>::do_count_local_symbols(pool, dynpool);
4725 const unsigned int loccount = this->local_symbol_count();
4726 if (loccount == 0)
4727 return;
4728
4729 // Intialize the thumb function bit-vector.
4730 std::vector<bool> empty_vector(loccount, false);
4731 this->local_symbol_is_thumb_function_.swap(empty_vector);
4732
4733 // Read the symbol table section header.
4734 const unsigned int symtab_shndx = this->symtab_shndx();
4735 elfcpp::Shdr<32, big_endian>
4736 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
4737 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
4738
4739 // Read the local symbols.
4740 const int sym_size =elfcpp::Elf_sizes<32>::sym_size;
4741 gold_assert(loccount == symtabshdr.get_sh_info());
4742 off_t locsize = loccount * sym_size;
4743 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
4744 locsize, true, true);
4745
4746 // For mapping symbol processing, we need to read the symbol names.
4747 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
4748 if (strtab_shndx >= this->shnum())
4749 {
4750 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
4751 return;
4752 }
4753
4754 elfcpp::Shdr<32, big_endian>
4755 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
4756 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
4757 {
4758 this->error(_("symbol table name section has wrong type: %u"),
4759 static_cast<unsigned int>(strtabshdr.get_sh_type()));
4760 return;
4761 }
4762 const char* pnames =
4763 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
4764 strtabshdr.get_sh_size(),
4765 false, false));
4766
4767 // Loop over the local symbols and mark any local symbols pointing
4768 // to THUMB functions.
4769
4770 // Skip the first dummy symbol.
4771 psyms += sym_size;
4772 typename Sized_relobj<32, big_endian>::Local_values* plocal_values =
4773 this->local_values();
4774 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
4775 {
4776 elfcpp::Sym<32, big_endian> sym(psyms);
4777 elfcpp::STT st_type = sym.get_st_type();
4778 Symbol_value<32>& lv((*plocal_values)[i]);
4779 Arm_address input_value = lv.input_value();
4780
4781 // Check to see if this is a mapping symbol.
4782 const char* sym_name = pnames + sym.get_st_name();
4783 if (Target_arm<big_endian>::is_mapping_symbol_name(sym_name))
4784 {
4785 unsigned int input_shndx = sym.get_st_shndx();
4786
4787 // Strip of LSB in case this is a THUMB symbol.
4788 Mapping_symbol_position msp(input_shndx, input_value & ~1U);
4789 this->mapping_symbols_info_[msp] = sym_name[1];
4790 }
4791
4792 if (st_type == elfcpp::STT_ARM_TFUNC
4793 || (st_type == elfcpp::STT_FUNC && ((input_value & 1) != 0)))
4794 {
4795 // This is a THUMB function. Mark this and canonicalize the
4796 // symbol value by setting LSB.
4797 this->local_symbol_is_thumb_function_[i] = true;
4798 if ((input_value & 1) == 0)
4799 lv.set_input_value(input_value | 1);
4800 }
4801 }
4802 }
4803
4804 // Relocate sections.
4805 template<bool big_endian>
4806 void
4807 Arm_relobj<big_endian>::do_relocate_sections(
4808 const Symbol_table* symtab,
4809 const Layout* layout,
4810 const unsigned char* pshdrs,
4811 typename Sized_relobj<32, big_endian>::Views* pviews)
4812 {
4813 // Call parent to relocate sections.
4814 Sized_relobj<32, big_endian>::do_relocate_sections(symtab, layout, pshdrs,
4815 pviews);
4816
4817 // We do not generate stubs if doing a relocatable link.
4818 if (parameters->options().relocatable())
4819 return;
4820
4821 // Relocate stub tables.
4822 unsigned int shnum = this->shnum();
4823
4824 Target_arm<big_endian>* arm_target =
4825 Target_arm<big_endian>::default_target();
4826
4827 Relocate_info<32, big_endian> relinfo;
4828 relinfo.symtab = symtab;
4829 relinfo.layout = layout;
4830 relinfo.object = this;
4831
4832 for (unsigned int i = 1; i < shnum; ++i)
4833 {
4834 Arm_input_section<big_endian>* arm_input_section =
4835 arm_target->find_arm_input_section(this, i);
4836
4837 if (arm_input_section != NULL
4838 && arm_input_section->is_stub_table_owner()
4839 && !arm_input_section->stub_table()->empty())
4840 {
4841 // We cannot discard a section if it owns a stub table.
4842 Output_section* os = this->output_section(i);
4843 gold_assert(os != NULL);
4844
4845 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
4846 relinfo.reloc_shdr = NULL;
4847 relinfo.data_shndx = i;
4848 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<32>::shdr_size;
4849
4850 gold_assert((*pviews)[i].view != NULL);
4851
4852 // We are passed the output section view. Adjust it to cover the
4853 // stub table only.
4854 Stub_table<big_endian>* stub_table = arm_input_section->stub_table();
4855 gold_assert((stub_table->address() >= (*pviews)[i].address)
4856 && ((stub_table->address() + stub_table->data_size())
4857 <= (*pviews)[i].address + (*pviews)[i].view_size));
4858
4859 off_t offset = stub_table->address() - (*pviews)[i].address;
4860 unsigned char* view = (*pviews)[i].view + offset;
4861 Arm_address address = stub_table->address();
4862 section_size_type view_size = stub_table->data_size();
4863
4864 stub_table->relocate_stubs(&relinfo, arm_target, os, view, address,
4865 view_size);
4866 }
4867
4868 // Apply Cortex A8 workaround if applicable.
4869 if (this->section_has_cortex_a8_workaround(i))
4870 {
4871 unsigned char* view = (*pviews)[i].view;
4872 Arm_address view_address = (*pviews)[i].address;
4873 section_size_type view_size = (*pviews)[i].view_size;
4874 Stub_table<big_endian>* stub_table = this->stub_tables_[i];
4875
4876 // Adjust view to cover section.
4877 Output_section* os = this->output_section(i);
4878 gold_assert(os != NULL);
4879 Arm_address section_address = os->output_address(this, i, 0);
4880 uint64_t section_size = this->section_size(i);
4881
4882 gold_assert(section_address >= view_address
4883 && ((section_address + section_size)
4884 <= (view_address + view_size)));
4885
4886 unsigned char* section_view = view + (section_address - view_address);
4887
4888 // Apply the Cortex-A8 workaround to the output address range
4889 // corresponding to this input section.
4890 stub_table->apply_cortex_a8_workaround_to_address_range(
4891 arm_target,
4892 section_view,
4893 section_address,
4894 section_size);
4895 }
4896 }
4897 }
4898
4899 // Helper functions for both Arm_relobj and Arm_dynobj to read ARM
4900 // ABI information.
4901
4902 template<bool big_endian>
4903 Attributes_section_data*
4904 read_arm_attributes_section(
4905 Object* object,
4906 Read_symbols_data *sd)
4907 {
4908 // Read the attributes section if there is one.
4909 // We read from the end because gas seems to put it near the end of
4910 // the section headers.
4911 const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
4912 const unsigned char *ps =
4913 sd->section_headers->data() + shdr_size * (object->shnum() - 1);
4914 for (unsigned int i = object->shnum(); i > 0; --i, ps -= shdr_size)
4915 {
4916 elfcpp::Shdr<32, big_endian> shdr(ps);
4917 if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
4918 {
4919 section_offset_type section_offset = shdr.get_sh_offset();
4920 section_size_type section_size =
4921 convert_to_section_size_type(shdr.get_sh_size());
4922 File_view* view = object->get_lasting_view(section_offset,
4923 section_size, true, false);
4924 return new Attributes_section_data(view->data(), section_size);
4925 }
4926 }
4927 return NULL;
4928 }
4929
4930 // Read the symbol information.
4931
4932 template<bool big_endian>
4933 void
4934 Arm_relobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
4935 {
4936 // Call parent class to read symbol information.
4937 Sized_relobj<32, big_endian>::do_read_symbols(sd);
4938
4939 // Read processor-specific flags in ELF file header.
4940 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
4941 elfcpp::Elf_sizes<32>::ehdr_size,
4942 true, false);
4943 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
4944 this->processor_specific_flags_ = ehdr.get_e_flags();
4945 this->attributes_section_data_ =
4946 read_arm_attributes_section<big_endian>(this, sd);
4947 }
4948
4949 // Process relocations for garbage collection. The ARM target uses .ARM.exidx
4950 // sections for unwinding. These sections are referenced implicitly by
4951 // text sections linked in the section headers. If we ignore these implict
4952 // references, the .ARM.exidx sections and any .ARM.extab sections they use
4953 // will be garbage-collected incorrectly. Hence we override the same function
4954 // in the base class to handle these implicit references.
4955
4956 template<bool big_endian>
4957 void
4958 Arm_relobj<big_endian>::do_gc_process_relocs(Symbol_table* symtab,
4959 Layout* layout,
4960 Read_relocs_data* rd)
4961 {
4962 // First, call base class method to process relocations in this object.
4963 Sized_relobj<32, big_endian>::do_gc_process_relocs(symtab, layout, rd);
4964
4965 unsigned int shnum = this->shnum();
4966 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
4967 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
4968 shnum * shdr_size,
4969 true, true);
4970
4971 // Scan section headers for sections of type SHT_ARM_EXIDX. Add references
4972 // to these from the linked text sections.
4973 const unsigned char* ps = pshdrs + shdr_size;
4974 for (unsigned int i = 1; i < shnum; ++i, ps += shdr_size)
4975 {
4976 elfcpp::Shdr<32, big_endian> shdr(ps);
4977 if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
4978 {
4979 // Found an .ARM.exidx section, add it to the set of reachable
4980 // sections from its linked text section.
4981 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_link());
4982 symtab->gc()->add_reference(this, text_shndx, this, i);
4983 }
4984 }
4985 }
4986
4987 // Arm_dynobj methods.
4988
4989 // Read the symbol information.
4990
4991 template<bool big_endian>
4992 void
4993 Arm_dynobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
4994 {
4995 // Call parent class to read symbol information.
4996 Sized_dynobj<32, big_endian>::do_read_symbols(sd);
4997
4998 // Read processor-specific flags in ELF file header.
4999 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
5000 elfcpp::Elf_sizes<32>::ehdr_size,
5001 true, false);
5002 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
5003 this->processor_specific_flags_ = ehdr.get_e_flags();
5004 this->attributes_section_data_ =
5005 read_arm_attributes_section<big_endian>(this, sd);
5006 }
5007
5008 // Stub_addend_reader methods.
5009
5010 // Read the addend of a REL relocation of type R_TYPE at VIEW.
5011
5012 template<bool big_endian>
5013 elfcpp::Elf_types<32>::Elf_Swxword
5014 Stub_addend_reader<elfcpp::SHT_REL, big_endian>::operator()(
5015 unsigned int r_type,
5016 const unsigned char* view,
5017 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const
5018 {
5019 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
5020
5021 switch (r_type)
5022 {
5023 case elfcpp::R_ARM_CALL:
5024 case elfcpp::R_ARM_JUMP24:
5025 case elfcpp::R_ARM_PLT32:
5026 {
5027 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5028 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
5029 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5030 return utils::sign_extend<26>(val << 2);
5031 }
5032
5033 case elfcpp::R_ARM_THM_CALL:
5034 case elfcpp::R_ARM_THM_JUMP24:
5035 case elfcpp::R_ARM_THM_XPC22:
5036 {
5037 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
5038 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
5039 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
5040 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
5041 return RelocFuncs::thumb32_branch_offset(upper_insn, lower_insn);
5042 }
5043
5044 case elfcpp::R_ARM_THM_JUMP19:
5045 {
5046 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
5047 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
5048 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
5049 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
5050 return RelocFuncs::thumb32_cond_branch_offset(upper_insn, lower_insn);
5051 }
5052
5053 default:
5054 gold_unreachable();
5055 }
5056 }
5057
5058 // A class to handle the PLT data.
5059
5060 template<bool big_endian>
5061 class Output_data_plt_arm : public Output_section_data
5062 {
5063 public:
5064 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
5065 Reloc_section;
5066
5067 Output_data_plt_arm(Layout*, Output_data_space*);
5068
5069 // Add an entry to the PLT.
5070 void
5071 add_entry(Symbol* gsym);
5072
5073 // Return the .rel.plt section data.
5074 const Reloc_section*
5075 rel_plt() const
5076 { return this->rel_; }
5077
5078 protected:
5079 void
5080 do_adjust_output_section(Output_section* os);
5081
5082 // Write to a map file.
5083 void
5084 do_print_to_mapfile(Mapfile* mapfile) const
5085 { mapfile->print_output_data(this, _("** PLT")); }
5086
5087 private:
5088 // Template for the first PLT entry.
5089 static const uint32_t first_plt_entry[5];
5090
5091 // Template for subsequent PLT entries.
5092 static const uint32_t plt_entry[3];
5093
5094 // Set the final size.
5095 void
5096 set_final_data_size()
5097 {
5098 this->set_data_size(sizeof(first_plt_entry)
5099 + this->count_ * sizeof(plt_entry));
5100 }
5101
5102 // Write out the PLT data.
5103 void
5104 do_write(Output_file*);
5105
5106 // The reloc section.
5107 Reloc_section* rel_;
5108 // The .got.plt section.
5109 Output_data_space* got_plt_;
5110 // The number of PLT entries.
5111 unsigned int count_;
5112 };
5113
5114 // Create the PLT section. The ordinary .got section is an argument,
5115 // since we need to refer to the start. We also create our own .got
5116 // section just for PLT entries.
5117
5118 template<bool big_endian>
5119 Output_data_plt_arm<big_endian>::Output_data_plt_arm(Layout* layout,
5120 Output_data_space* got_plt)
5121 : Output_section_data(4), got_plt_(got_plt), count_(0)
5122 {
5123 this->rel_ = new Reloc_section(false);
5124 layout->add_output_section_data(".rel.plt", elfcpp::SHT_REL,
5125 elfcpp::SHF_ALLOC, this->rel_, true, false,
5126 false, false);
5127 }
5128
5129 template<bool big_endian>
5130 void
5131 Output_data_plt_arm<big_endian>::do_adjust_output_section(Output_section* os)
5132 {
5133 os->set_entsize(0);
5134 }
5135
5136 // Add an entry to the PLT.
5137
5138 template<bool big_endian>
5139 void
5140 Output_data_plt_arm<big_endian>::add_entry(Symbol* gsym)
5141 {
5142 gold_assert(!gsym->has_plt_offset());
5143
5144 // Note that when setting the PLT offset we skip the initial
5145 // reserved PLT entry.
5146 gsym->set_plt_offset((this->count_) * sizeof(plt_entry)
5147 + sizeof(first_plt_entry));
5148
5149 ++this->count_;
5150
5151 section_offset_type got_offset = this->got_plt_->current_data_size();
5152
5153 // Every PLT entry needs a GOT entry which points back to the PLT
5154 // entry (this will be changed by the dynamic linker, normally
5155 // lazily when the function is called).
5156 this->got_plt_->set_current_data_size(got_offset + 4);
5157
5158 // Every PLT entry needs a reloc.
5159 gsym->set_needs_dynsym_entry();
5160 this->rel_->add_global(gsym, elfcpp::R_ARM_JUMP_SLOT, this->got_plt_,
5161 got_offset);
5162
5163 // Note that we don't need to save the symbol. The contents of the
5164 // PLT are independent of which symbols are used. The symbols only
5165 // appear in the relocations.
5166 }
5167
5168 // ARM PLTs.
5169 // FIXME: This is not very flexible. Right now this has only been tested
5170 // on armv5te. If we are to support additional architecture features like
5171 // Thumb-2 or BE8, we need to make this more flexible like GNU ld.
5172
5173 // The first entry in the PLT.
5174 template<bool big_endian>
5175 const uint32_t Output_data_plt_arm<big_endian>::first_plt_entry[5] =
5176 {
5177 0xe52de004, // str lr, [sp, #-4]!
5178 0xe59fe004, // ldr lr, [pc, #4]
5179 0xe08fe00e, // add lr, pc, lr
5180 0xe5bef008, // ldr pc, [lr, #8]!
5181 0x00000000, // &GOT[0] - .
5182 };
5183
5184 // Subsequent entries in the PLT.
5185
5186 template<bool big_endian>
5187 const uint32_t Output_data_plt_arm<big_endian>::plt_entry[3] =
5188 {
5189 0xe28fc600, // add ip, pc, #0xNN00000
5190 0xe28cca00, // add ip, ip, #0xNN000
5191 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
5192 };
5193
5194 // Write out the PLT. This uses the hand-coded instructions above,
5195 // and adjusts them as needed. This is all specified by the arm ELF
5196 // Processor Supplement.
5197
5198 template<bool big_endian>
5199 void
5200 Output_data_plt_arm<big_endian>::do_write(Output_file* of)
5201 {
5202 const off_t offset = this->offset();
5203 const section_size_type oview_size =
5204 convert_to_section_size_type(this->data_size());
5205 unsigned char* const oview = of->get_output_view(offset, oview_size);
5206
5207 const off_t got_file_offset = this->got_plt_->offset();
5208 const section_size_type got_size =
5209 convert_to_section_size_type(this->got_plt_->data_size());
5210 unsigned char* const got_view = of->get_output_view(got_file_offset,
5211 got_size);
5212 unsigned char* pov = oview;
5213
5214 Arm_address plt_address = this->address();
5215 Arm_address got_address = this->got_plt_->address();
5216
5217 // Write first PLT entry. All but the last word are constants.
5218 const size_t num_first_plt_words = (sizeof(first_plt_entry)
5219 / sizeof(plt_entry[0]));
5220 for (size_t i = 0; i < num_first_plt_words - 1; i++)
5221 elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
5222 // Last word in first PLT entry is &GOT[0] - .
5223 elfcpp::Swap<32, big_endian>::writeval(pov + 16,
5224 got_address - (plt_address + 16));
5225 pov += sizeof(first_plt_entry);
5226
5227 unsigned char* got_pov = got_view;
5228
5229 memset(got_pov, 0, 12);
5230 got_pov += 12;
5231
5232 const int rel_size = elfcpp::Elf_sizes<32>::rel_size;
5233 unsigned int plt_offset = sizeof(first_plt_entry);
5234 unsigned int plt_rel_offset = 0;
5235 unsigned int got_offset = 12;
5236 const unsigned int count = this->count_;
5237 for (unsigned int i = 0;
5238 i < count;
5239 ++i,
5240 pov += sizeof(plt_entry),
5241 got_pov += 4,
5242 plt_offset += sizeof(plt_entry),
5243 plt_rel_offset += rel_size,
5244 got_offset += 4)
5245 {
5246 // Set and adjust the PLT entry itself.
5247 int32_t offset = ((got_address + got_offset)
5248 - (plt_address + plt_offset + 8));
5249
5250 gold_assert(offset >= 0 && offset < 0x0fffffff);
5251 uint32_t plt_insn0 = plt_entry[0] | ((offset >> 20) & 0xff);
5252 elfcpp::Swap<32, big_endian>::writeval(pov, plt_insn0);
5253 uint32_t plt_insn1 = plt_entry[1] | ((offset >> 12) & 0xff);
5254 elfcpp::Swap<32, big_endian>::writeval(pov + 4, plt_insn1);
5255 uint32_t plt_insn2 = plt_entry[2] | (offset & 0xfff);
5256 elfcpp::Swap<32, big_endian>::writeval(pov + 8, plt_insn2);
5257
5258 // Set the entry in the GOT.
5259 elfcpp::Swap<32, big_endian>::writeval(got_pov, plt_address);
5260 }
5261
5262 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
5263 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
5264
5265 of->write_output_view(offset, oview_size, oview);
5266 of->write_output_view(got_file_offset, got_size, got_view);
5267 }
5268
5269 // Create a PLT entry for a global symbol.
5270
5271 template<bool big_endian>
5272 void
5273 Target_arm<big_endian>::make_plt_entry(Symbol_table* symtab, Layout* layout,
5274 Symbol* gsym)
5275 {
5276 if (gsym->has_plt_offset())
5277 return;
5278
5279 if (this->plt_ == NULL)
5280 {
5281 // Create the GOT sections first.
5282 this->got_section(symtab, layout);
5283
5284 this->plt_ = new Output_data_plt_arm<big_endian>(layout, this->got_plt_);
5285 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
5286 (elfcpp::SHF_ALLOC
5287 | elfcpp::SHF_EXECINSTR),
5288 this->plt_, false, false, false, false);
5289 }
5290 this->plt_->add_entry(gsym);
5291 }
5292
5293 // Report an unsupported relocation against a local symbol.
5294
5295 template<bool big_endian>
5296 void
5297 Target_arm<big_endian>::Scan::unsupported_reloc_local(
5298 Sized_relobj<32, big_endian>* object,
5299 unsigned int r_type)
5300 {
5301 gold_error(_("%s: unsupported reloc %u against local symbol"),
5302 object->name().c_str(), r_type);
5303 }
5304
5305 // We are about to emit a dynamic relocation of type R_TYPE. If the
5306 // dynamic linker does not support it, issue an error. The GNU linker
5307 // only issues a non-PIC error for an allocated read-only section.
5308 // Here we know the section is allocated, but we don't know that it is
5309 // read-only. But we check for all the relocation types which the
5310 // glibc dynamic linker supports, so it seems appropriate to issue an
5311 // error even if the section is not read-only.
5312
5313 template<bool big_endian>
5314 void
5315 Target_arm<big_endian>::Scan::check_non_pic(Relobj* object,
5316 unsigned int r_type)
5317 {
5318 switch (r_type)
5319 {
5320 // These are the relocation types supported by glibc for ARM.
5321 case elfcpp::R_ARM_RELATIVE:
5322 case elfcpp::R_ARM_COPY:
5323 case elfcpp::R_ARM_GLOB_DAT:
5324 case elfcpp::R_ARM_JUMP_SLOT:
5325 case elfcpp::R_ARM_ABS32:
5326 case elfcpp::R_ARM_ABS32_NOI:
5327 case elfcpp::R_ARM_PC24:
5328 // FIXME: The following 3 types are not supported by Android's dynamic
5329 // linker.
5330 case elfcpp::R_ARM_TLS_DTPMOD32:
5331 case elfcpp::R_ARM_TLS_DTPOFF32:
5332 case elfcpp::R_ARM_TLS_TPOFF32:
5333 return;
5334
5335 default:
5336 // This prevents us from issuing more than one error per reloc
5337 // section. But we can still wind up issuing more than one
5338 // error per object file.
5339 if (this->issued_non_pic_error_)
5340 return;
5341 object->error(_("requires unsupported dynamic reloc; "
5342 "recompile with -fPIC"));
5343 this->issued_non_pic_error_ = true;
5344 return;
5345
5346 case elfcpp::R_ARM_NONE:
5347 gold_unreachable();
5348 }
5349 }
5350
5351 // Scan a relocation for a local symbol.
5352 // FIXME: This only handles a subset of relocation types used by Android
5353 // on ARM v5te devices.
5354
5355 template<bool big_endian>
5356 inline void
5357 Target_arm<big_endian>::Scan::local(Symbol_table* symtab,
5358 Layout* layout,
5359 Target_arm* target,
5360 Sized_relobj<32, big_endian>* object,
5361 unsigned int data_shndx,
5362 Output_section* output_section,
5363 const elfcpp::Rel<32, big_endian>& reloc,
5364 unsigned int r_type,
5365 const elfcpp::Sym<32, big_endian>&)
5366 {
5367 r_type = get_real_reloc_type(r_type);
5368 switch (r_type)
5369 {
5370 case elfcpp::R_ARM_NONE:
5371 break;
5372
5373 case elfcpp::R_ARM_ABS32:
5374 case elfcpp::R_ARM_ABS32_NOI:
5375 // If building a shared library (or a position-independent
5376 // executable), we need to create a dynamic relocation for
5377 // this location. The relocation applied at link time will
5378 // apply the link-time value, so we flag the location with
5379 // an R_ARM_RELATIVE relocation so the dynamic loader can
5380 // relocate it easily.
5381 if (parameters->options().output_is_position_independent())
5382 {
5383 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5384 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
5385 // If we are to add more other reloc types than R_ARM_ABS32,
5386 // we need to add check_non_pic(object, r_type) here.
5387 rel_dyn->add_local_relative(object, r_sym, elfcpp::R_ARM_RELATIVE,
5388 output_section, data_shndx,
5389 reloc.get_r_offset());
5390 }
5391 break;
5392
5393 case elfcpp::R_ARM_REL32:
5394 case elfcpp::R_ARM_THM_CALL:
5395 case elfcpp::R_ARM_CALL:
5396 case elfcpp::R_ARM_PREL31:
5397 case elfcpp::R_ARM_JUMP24:
5398 case elfcpp::R_ARM_THM_JUMP24:
5399 case elfcpp::R_ARM_THM_JUMP19:
5400 case elfcpp::R_ARM_PLT32:
5401 case elfcpp::R_ARM_THM_ABS5:
5402 case elfcpp::R_ARM_ABS8:
5403 case elfcpp::R_ARM_ABS12:
5404 case elfcpp::R_ARM_ABS16:
5405 case elfcpp::R_ARM_BASE_ABS:
5406 case elfcpp::R_ARM_MOVW_ABS_NC:
5407 case elfcpp::R_ARM_MOVT_ABS:
5408 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
5409 case elfcpp::R_ARM_THM_MOVT_ABS:
5410 case elfcpp::R_ARM_MOVW_PREL_NC:
5411 case elfcpp::R_ARM_MOVT_PREL:
5412 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
5413 case elfcpp::R_ARM_THM_MOVT_PREL:
5414 case elfcpp::R_ARM_THM_JUMP6:
5415 case elfcpp::R_ARM_THM_JUMP8:
5416 case elfcpp::R_ARM_THM_JUMP11:
5417 case elfcpp::R_ARM_V4BX:
5418 break;
5419
5420 case elfcpp::R_ARM_GOTOFF32:
5421 // We need a GOT section:
5422 target->got_section(symtab, layout);
5423 break;
5424
5425 case elfcpp::R_ARM_BASE_PREL:
5426 // FIXME: What about this?
5427 break;
5428
5429 case elfcpp::R_ARM_GOT_BREL:
5430 case elfcpp::R_ARM_GOT_PREL:
5431 {
5432 // The symbol requires a GOT entry.
5433 Output_data_got<32, big_endian>* got =
5434 target->got_section(symtab, layout);
5435 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
5436 if (got->add_local(object, r_sym, GOT_TYPE_STANDARD))
5437 {
5438 // If we are generating a shared object, we need to add a
5439 // dynamic RELATIVE relocation for this symbol's GOT entry.
5440 if (parameters->options().output_is_position_independent())
5441 {
5442 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5443 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
5444 rel_dyn->add_local_relative(
5445 object, r_sym, elfcpp::R_ARM_RELATIVE, got,
5446 object->local_got_offset(r_sym, GOT_TYPE_STANDARD));
5447 }
5448 }
5449 }
5450 break;
5451
5452 case elfcpp::R_ARM_TARGET1:
5453 // This should have been mapped to another type already.
5454 // Fall through.
5455 case elfcpp::R_ARM_COPY:
5456 case elfcpp::R_ARM_GLOB_DAT:
5457 case elfcpp::R_ARM_JUMP_SLOT:
5458 case elfcpp::R_ARM_RELATIVE:
5459 // These are relocations which should only be seen by the
5460 // dynamic linker, and should never be seen here.
5461 gold_error(_("%s: unexpected reloc %u in object file"),
5462 object->name().c_str(), r_type);
5463 break;
5464
5465 default:
5466 unsupported_reloc_local(object, r_type);
5467 break;
5468 }
5469 }
5470
5471 // Report an unsupported relocation against a global symbol.
5472
5473 template<bool big_endian>
5474 void
5475 Target_arm<big_endian>::Scan::unsupported_reloc_global(
5476 Sized_relobj<32, big_endian>* object,
5477 unsigned int r_type,
5478 Symbol* gsym)
5479 {
5480 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
5481 object->name().c_str(), r_type, gsym->demangled_name().c_str());
5482 }
5483
5484 // Scan a relocation for a global symbol.
5485 // FIXME: This only handles a subset of relocation types used by Android
5486 // on ARM v5te devices.
5487
5488 template<bool big_endian>
5489 inline void
5490 Target_arm<big_endian>::Scan::global(Symbol_table* symtab,
5491 Layout* layout,
5492 Target_arm* target,
5493 Sized_relobj<32, big_endian>* object,
5494 unsigned int data_shndx,
5495 Output_section* output_section,
5496 const elfcpp::Rel<32, big_endian>& reloc,
5497 unsigned int r_type,
5498 Symbol* gsym)
5499 {
5500 r_type = get_real_reloc_type(r_type);
5501 switch (r_type)
5502 {
5503 case elfcpp::R_ARM_NONE:
5504 break;
5505
5506 case elfcpp::R_ARM_ABS32:
5507 case elfcpp::R_ARM_ABS32_NOI:
5508 {
5509 // Make a dynamic relocation if necessary.
5510 if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF))
5511 {
5512 if (target->may_need_copy_reloc(gsym))
5513 {
5514 target->copy_reloc(symtab, layout, object,
5515 data_shndx, output_section, gsym, reloc);
5516 }
5517 else if (gsym->can_use_relative_reloc(false))
5518 {
5519 // If we are to add more other reloc types than R_ARM_ABS32,
5520 // we need to add check_non_pic(object, r_type) here.
5521 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5522 rel_dyn->add_global_relative(gsym, elfcpp::R_ARM_RELATIVE,
5523 output_section, object,
5524 data_shndx, reloc.get_r_offset());
5525 }
5526 else
5527 {
5528 // If we are to add more other reloc types than R_ARM_ABS32,
5529 // we need to add check_non_pic(object, r_type) here.
5530 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5531 rel_dyn->add_global(gsym, r_type, output_section, object,
5532 data_shndx, reloc.get_r_offset());
5533 }
5534 }
5535 }
5536 break;
5537
5538 case elfcpp::R_ARM_MOVW_ABS_NC:
5539 case elfcpp::R_ARM_MOVT_ABS:
5540 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
5541 case elfcpp::R_ARM_THM_MOVT_ABS:
5542 case elfcpp::R_ARM_MOVW_PREL_NC:
5543 case elfcpp::R_ARM_MOVT_PREL:
5544 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
5545 case elfcpp::R_ARM_THM_MOVT_PREL:
5546 case elfcpp::R_ARM_THM_JUMP6:
5547 case elfcpp::R_ARM_THM_JUMP8:
5548 case elfcpp::R_ARM_THM_JUMP11:
5549 case elfcpp::R_ARM_V4BX:
5550 break;
5551
5552 case elfcpp::R_ARM_THM_ABS5:
5553 case elfcpp::R_ARM_ABS8:
5554 case elfcpp::R_ARM_ABS12:
5555 case elfcpp::R_ARM_ABS16:
5556 case elfcpp::R_ARM_BASE_ABS:
5557 {
5558 // No dynamic relocs of this kinds.
5559 // Report the error in case of PIC.
5560 int flags = Symbol::NON_PIC_REF;
5561 if (gsym->type() == elfcpp::STT_FUNC
5562 || gsym->type() == elfcpp::STT_ARM_TFUNC)
5563 flags |= Symbol::FUNCTION_CALL;
5564 if (gsym->needs_dynamic_reloc(flags))
5565 check_non_pic(object, r_type);
5566 }
5567 break;
5568
5569 case elfcpp::R_ARM_REL32:
5570 case elfcpp::R_ARM_PREL31:
5571 {
5572 // Make a dynamic relocation if necessary.
5573 int flags = Symbol::NON_PIC_REF;
5574 if (gsym->needs_dynamic_reloc(flags))
5575 {
5576 if (target->may_need_copy_reloc(gsym))
5577 {
5578 target->copy_reloc(symtab, layout, object,
5579 data_shndx, output_section, gsym, reloc);
5580 }
5581 else
5582 {
5583 check_non_pic(object, r_type);
5584 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5585 rel_dyn->add_global(gsym, r_type, output_section, object,
5586 data_shndx, reloc.get_r_offset());
5587 }
5588 }
5589 }
5590 break;
5591
5592 case elfcpp::R_ARM_JUMP24:
5593 case elfcpp::R_ARM_THM_JUMP24:
5594 case elfcpp::R_ARM_THM_JUMP19:
5595 case elfcpp::R_ARM_CALL:
5596 case elfcpp::R_ARM_THM_CALL:
5597
5598 if (Target_arm<big_endian>::Scan::symbol_needs_plt_entry(gsym))
5599 target->make_plt_entry(symtab, layout, gsym);
5600 else
5601 {
5602 // Check to see if this is a function that would need a PLT
5603 // but does not get one because the function symbol is untyped.
5604 // This happens in assembly code missing a proper .type directive.
5605 if ((!gsym->is_undefined() || parameters->options().shared())
5606 && !parameters->doing_static_link()
5607 && gsym->type() == elfcpp::STT_NOTYPE
5608 && (gsym->is_from_dynobj()
5609 || gsym->is_undefined()
5610 || gsym->is_preemptible()))
5611 gold_error(_("%s is not a function."),
5612 gsym->demangled_name().c_str());
5613 }
5614 break;
5615
5616 case elfcpp::R_ARM_PLT32:
5617 // If the symbol is fully resolved, this is just a relative
5618 // local reloc. Otherwise we need a PLT entry.
5619 if (gsym->final_value_is_known())
5620 break;
5621 // If building a shared library, we can also skip the PLT entry
5622 // if the symbol is defined in the output file and is protected
5623 // or hidden.
5624 if (gsym->is_defined()
5625 && !gsym->is_from_dynobj()
5626 && !gsym->is_preemptible())
5627 break;
5628 target->make_plt_entry(symtab, layout, gsym);
5629 break;
5630
5631 case elfcpp::R_ARM_GOTOFF32:
5632 // We need a GOT section.
5633 target->got_section(symtab, layout);
5634 break;
5635
5636 case elfcpp::R_ARM_BASE_PREL:
5637 // FIXME: What about this?
5638 break;
5639
5640 case elfcpp::R_ARM_GOT_BREL:
5641 case elfcpp::R_ARM_GOT_PREL:
5642 {
5643 // The symbol requires a GOT entry.
5644 Output_data_got<32, big_endian>* got =
5645 target->got_section(symtab, layout);
5646 if (gsym->final_value_is_known())
5647 got->add_global(gsym, GOT_TYPE_STANDARD);
5648 else
5649 {
5650 // If this symbol is not fully resolved, we need to add a
5651 // GOT entry with a dynamic relocation.
5652 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
5653 if (gsym->is_from_dynobj()
5654 || gsym->is_undefined()
5655 || gsym->is_preemptible())
5656 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
5657 rel_dyn, elfcpp::R_ARM_GLOB_DAT);
5658 else
5659 {
5660 if (got->add_global(gsym, GOT_TYPE_STANDARD))
5661 rel_dyn->add_global_relative(
5662 gsym, elfcpp::R_ARM_RELATIVE, got,
5663 gsym->got_offset(GOT_TYPE_STANDARD));
5664 }
5665 }
5666 }
5667 break;
5668
5669 case elfcpp::R_ARM_TARGET1:
5670 // This should have been mapped to another type already.
5671 // Fall through.
5672 case elfcpp::R_ARM_COPY:
5673 case elfcpp::R_ARM_GLOB_DAT:
5674 case elfcpp::R_ARM_JUMP_SLOT:
5675 case elfcpp::R_ARM_RELATIVE:
5676 // These are relocations which should only be seen by the
5677 // dynamic linker, and should never be seen here.
5678 gold_error(_("%s: unexpected reloc %u in object file"),
5679 object->name().c_str(), r_type);
5680 break;
5681
5682 default:
5683 unsupported_reloc_global(object, r_type, gsym);
5684 break;
5685 }
5686 }
5687
5688 // Process relocations for gc.
5689
5690 template<bool big_endian>
5691 void
5692 Target_arm<big_endian>::gc_process_relocs(Symbol_table* symtab,
5693 Layout* layout,
5694 Sized_relobj<32, big_endian>* object,
5695 unsigned int data_shndx,
5696 unsigned int,
5697 const unsigned char* prelocs,
5698 size_t reloc_count,
5699 Output_section* output_section,
5700 bool needs_special_offset_handling,
5701 size_t local_symbol_count,
5702 const unsigned char* plocal_symbols)
5703 {
5704 typedef Target_arm<big_endian> Arm;
5705 typedef typename Target_arm<big_endian>::Scan Scan;
5706
5707 gold::gc_process_relocs<32, big_endian, Arm, elfcpp::SHT_REL, Scan>(
5708 symtab,
5709 layout,
5710 this,
5711 object,
5712 data_shndx,
5713 prelocs,
5714 reloc_count,
5715 output_section,
5716 needs_special_offset_handling,
5717 local_symbol_count,
5718 plocal_symbols);
5719 }
5720
5721 // Scan relocations for a section.
5722
5723 template<bool big_endian>
5724 void
5725 Target_arm<big_endian>::scan_relocs(Symbol_table* symtab,
5726 Layout* layout,
5727 Sized_relobj<32, big_endian>* object,
5728 unsigned int data_shndx,
5729 unsigned int sh_type,
5730 const unsigned char* prelocs,
5731 size_t reloc_count,
5732 Output_section* output_section,
5733 bool needs_special_offset_handling,
5734 size_t local_symbol_count,
5735 const unsigned char* plocal_symbols)
5736 {
5737 typedef typename Target_arm<big_endian>::Scan Scan;
5738 if (sh_type == elfcpp::SHT_RELA)
5739 {
5740 gold_error(_("%s: unsupported RELA reloc section"),
5741 object->name().c_str());
5742 return;
5743 }
5744
5745 gold::scan_relocs<32, big_endian, Target_arm, elfcpp::SHT_REL, Scan>(
5746 symtab,
5747 layout,
5748 this,
5749 object,
5750 data_shndx,
5751 prelocs,
5752 reloc_count,
5753 output_section,
5754 needs_special_offset_handling,
5755 local_symbol_count,
5756 plocal_symbols);
5757 }
5758
5759 // Finalize the sections.
5760
5761 template<bool big_endian>
5762 void
5763 Target_arm<big_endian>::do_finalize_sections(
5764 Layout* layout,
5765 const Input_objects* input_objects,
5766 Symbol_table* symtab)
5767 {
5768 // Merge processor-specific flags.
5769 for (Input_objects::Relobj_iterator p = input_objects->relobj_begin();
5770 p != input_objects->relobj_end();
5771 ++p)
5772 {
5773 Arm_relobj<big_endian>* arm_relobj =
5774 Arm_relobj<big_endian>::as_arm_relobj(*p);
5775 this->merge_processor_specific_flags(
5776 arm_relobj->name(),
5777 arm_relobj->processor_specific_flags());
5778 this->merge_object_attributes(arm_relobj->name().c_str(),
5779 arm_relobj->attributes_section_data());
5780
5781 }
5782
5783 for (Input_objects::Dynobj_iterator p = input_objects->dynobj_begin();
5784 p != input_objects->dynobj_end();
5785 ++p)
5786 {
5787 Arm_dynobj<big_endian>* arm_dynobj =
5788 Arm_dynobj<big_endian>::as_arm_dynobj(*p);
5789 this->merge_processor_specific_flags(
5790 arm_dynobj->name(),
5791 arm_dynobj->processor_specific_flags());
5792 this->merge_object_attributes(arm_dynobj->name().c_str(),
5793 arm_dynobj->attributes_section_data());
5794 }
5795
5796 // Check BLX use.
5797 const Object_attribute* cpu_arch_attr =
5798 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
5799 if (cpu_arch_attr->int_value() > elfcpp::TAG_CPU_ARCH_V4)
5800 this->set_may_use_blx(true);
5801
5802 // Check if we need to use Cortex-A8 workaround.
5803 if (parameters->options().user_set_fix_cortex_a8())
5804 this->fix_cortex_a8_ = parameters->options().fix_cortex_a8();
5805 else
5806 {
5807 // If neither --fix-cortex-a8 nor --no-fix-cortex-a8 is used, turn on
5808 // Cortex-A8 erratum workaround for ARMv7-A or ARMv7 with unknown
5809 // profile.
5810 const Object_attribute* cpu_arch_profile_attr =
5811 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
5812 this->fix_cortex_a8_ =
5813 (cpu_arch_attr->int_value() == elfcpp::TAG_CPU_ARCH_V7
5814 && (cpu_arch_profile_attr->int_value() == 'A'
5815 || cpu_arch_profile_attr->int_value() == 0));
5816 }
5817
5818 // Check if we can use V4BX interworking.
5819 // The V4BX interworking stub contains BX instruction,
5820 // which is not specified for some profiles.
5821 if (this->fix_v4bx() == 2 && !this->may_use_blx())
5822 gold_error(_("unable to provide V4BX reloc interworking fix up; "
5823 "the target profile does not support BX instruction"));
5824
5825 // Fill in some more dynamic tags.
5826 const Reloc_section* rel_plt = (this->plt_ == NULL
5827 ? NULL
5828 : this->plt_->rel_plt());
5829 layout->add_target_dynamic_tags(true, this->got_plt_, rel_plt,
5830 this->rel_dyn_, true);
5831
5832 // Emit any relocs we saved in an attempt to avoid generating COPY
5833 // relocs.
5834 if (this->copy_relocs_.any_saved_relocs())
5835 this->copy_relocs_.emit(this->rel_dyn_section(layout));
5836
5837 // Handle the .ARM.exidx section.
5838 Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
5839 if (exidx_section != NULL
5840 && exidx_section->type() == elfcpp::SHT_ARM_EXIDX
5841 && !parameters->options().relocatable())
5842 {
5843 // Create __exidx_start and __exdix_end symbols.
5844 symtab->define_in_output_data("__exidx_start", NULL,
5845 Symbol_table::PREDEFINED,
5846 exidx_section, 0, 0, elfcpp::STT_OBJECT,
5847 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
5848 false, true);
5849 symtab->define_in_output_data("__exidx_end", NULL,
5850 Symbol_table::PREDEFINED,
5851 exidx_section, 0, 0, elfcpp::STT_OBJECT,
5852 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
5853 true, true);
5854
5855 // For the ARM target, we need to add a PT_ARM_EXIDX segment for
5856 // the .ARM.exidx section.
5857 if (!layout->script_options()->saw_phdrs_clause())
5858 {
5859 gold_assert(layout->find_output_segment(elfcpp::PT_ARM_EXIDX, 0, 0)
5860 == NULL);
5861 Output_segment* exidx_segment =
5862 layout->make_output_segment(elfcpp::PT_ARM_EXIDX, elfcpp::PF_R);
5863 exidx_segment->add_output_section(exidx_section, elfcpp::PF_R,
5864 false);
5865 }
5866 }
5867
5868 // Create an .ARM.attributes section if there is not one already.
5869 Output_attributes_section_data* attributes_section =
5870 new Output_attributes_section_data(*this->attributes_section_data_);
5871 layout->add_output_section_data(".ARM.attributes",
5872 elfcpp::SHT_ARM_ATTRIBUTES, 0,
5873 attributes_section, false, false, false,
5874 false);
5875 }
5876
5877 // Return whether a direct absolute static relocation needs to be applied.
5878 // In cases where Scan::local() or Scan::global() has created
5879 // a dynamic relocation other than R_ARM_RELATIVE, the addend
5880 // of the relocation is carried in the data, and we must not
5881 // apply the static relocation.
5882
5883 template<bool big_endian>
5884 inline bool
5885 Target_arm<big_endian>::Relocate::should_apply_static_reloc(
5886 const Sized_symbol<32>* gsym,
5887 int ref_flags,
5888 bool is_32bit,
5889 Output_section* output_section)
5890 {
5891 // If the output section is not allocated, then we didn't call
5892 // scan_relocs, we didn't create a dynamic reloc, and we must apply
5893 // the reloc here.
5894 if ((output_section->flags() & elfcpp::SHF_ALLOC) == 0)
5895 return true;
5896
5897 // For local symbols, we will have created a non-RELATIVE dynamic
5898 // relocation only if (a) the output is position independent,
5899 // (b) the relocation is absolute (not pc- or segment-relative), and
5900 // (c) the relocation is not 32 bits wide.
5901 if (gsym == NULL)
5902 return !(parameters->options().output_is_position_independent()
5903 && (ref_flags & Symbol::ABSOLUTE_REF)
5904 && !is_32bit);
5905
5906 // For global symbols, we use the same helper routines used in the
5907 // scan pass. If we did not create a dynamic relocation, or if we
5908 // created a RELATIVE dynamic relocation, we should apply the static
5909 // relocation.
5910 bool has_dyn = gsym->needs_dynamic_reloc(ref_flags);
5911 bool is_rel = (ref_flags & Symbol::ABSOLUTE_REF)
5912 && gsym->can_use_relative_reloc(ref_flags
5913 & Symbol::FUNCTION_CALL);
5914 return !has_dyn || is_rel;
5915 }
5916
5917 // Perform a relocation.
5918
5919 template<bool big_endian>
5920 inline bool
5921 Target_arm<big_endian>::Relocate::relocate(
5922 const Relocate_info<32, big_endian>* relinfo,
5923 Target_arm* target,
5924 Output_section *output_section,
5925 size_t relnum,
5926 const elfcpp::Rel<32, big_endian>& rel,
5927 unsigned int r_type,
5928 const Sized_symbol<32>* gsym,
5929 const Symbol_value<32>* psymval,
5930 unsigned char* view,
5931 Arm_address address,
5932 section_size_type /* view_size */ )
5933 {
5934 typedef Arm_relocate_functions<big_endian> Arm_relocate_functions;
5935
5936 r_type = get_real_reloc_type(r_type);
5937
5938 const Arm_relobj<big_endian>* object =
5939 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
5940
5941 // If the final branch target of a relocation is THUMB instruction, this
5942 // is 1. Otherwise it is 0.
5943 Arm_address thumb_bit = 0;
5944 Symbol_value<32> symval;
5945 bool is_weakly_undefined_without_plt = false;
5946 if (relnum != Target_arm<big_endian>::fake_relnum_for_stubs)
5947 {
5948 if (gsym != NULL)
5949 {
5950 // This is a global symbol. Determine if we use PLT and if the
5951 // final target is THUMB.
5952 if (gsym->use_plt_offset(reloc_is_non_pic(r_type)))
5953 {
5954 // This uses a PLT, change the symbol value.
5955 symval.set_output_value(target->plt_section()->address()
5956 + gsym->plt_offset());
5957 psymval = &symval;
5958 }
5959 else if (gsym->is_weak_undefined())
5960 {
5961 // This is a weakly undefined symbol and we do not use PLT
5962 // for this relocation. A branch targeting this symbol will
5963 // be converted into an NOP.
5964 is_weakly_undefined_without_plt = true;
5965 }
5966 else
5967 {
5968 // Set thumb bit if symbol:
5969 // -Has type STT_ARM_TFUNC or
5970 // -Has type STT_FUNC, is defined and with LSB in value set.
5971 thumb_bit =
5972 (((gsym->type() == elfcpp::STT_ARM_TFUNC)
5973 || (gsym->type() == elfcpp::STT_FUNC
5974 && !gsym->is_undefined()
5975 && ((psymval->value(object, 0) & 1) != 0)))
5976 ? 1
5977 : 0);
5978 }
5979 }
5980 else
5981 {
5982 // This is a local symbol. Determine if the final target is THUMB.
5983 // We saved this information when all the local symbols were read.
5984 elfcpp::Elf_types<32>::Elf_WXword r_info = rel.get_r_info();
5985 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
5986 thumb_bit = object->local_symbol_is_thumb_function(r_sym) ? 1 : 0;
5987 }
5988 }
5989 else
5990 {
5991 // This is a fake relocation synthesized for a stub. It does not have
5992 // a real symbol. We just look at the LSB of the symbol value to
5993 // determine if the target is THUMB or not.
5994 thumb_bit = ((psymval->value(object, 0) & 1) != 0);
5995 }
5996
5997 // Strip LSB if this points to a THUMB target.
5998 if (thumb_bit != 0
5999 && Target_arm<big_endian>::reloc_uses_thumb_bit(r_type)
6000 && ((psymval->value(object, 0) & 1) != 0))
6001 {
6002 Arm_address stripped_value =
6003 psymval->value(object, 0) & ~static_cast<Arm_address>(1);
6004 symval.set_output_value(stripped_value);
6005 psymval = &symval;
6006 }
6007
6008 // Get the GOT offset if needed.
6009 // The GOT pointer points to the end of the GOT section.
6010 // We need to subtract the size of the GOT section to get
6011 // the actual offset to use in the relocation.
6012 bool have_got_offset = false;
6013 unsigned int got_offset = 0;
6014 switch (r_type)
6015 {
6016 case elfcpp::R_ARM_GOT_BREL:
6017 case elfcpp::R_ARM_GOT_PREL:
6018 if (gsym != NULL)
6019 {
6020 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
6021 got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
6022 - target->got_size());
6023 }
6024 else
6025 {
6026 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
6027 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
6028 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
6029 - target->got_size());
6030 }
6031 have_got_offset = true;
6032 break;
6033
6034 default:
6035 break;
6036 }
6037
6038 // To look up relocation stubs, we need to pass the symbol table index of
6039 // a local symbol.
6040 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
6041
6042 typename Arm_relocate_functions::Status reloc_status =
6043 Arm_relocate_functions::STATUS_OKAY;
6044 switch (r_type)
6045 {
6046 case elfcpp::R_ARM_NONE:
6047 break;
6048
6049 case elfcpp::R_ARM_ABS8:
6050 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
6051 output_section))
6052 reloc_status = Arm_relocate_functions::abs8(view, object, psymval);
6053 break;
6054
6055 case elfcpp::R_ARM_ABS12:
6056 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
6057 output_section))
6058 reloc_status = Arm_relocate_functions::abs12(view, object, psymval);
6059 break;
6060
6061 case elfcpp::R_ARM_ABS16:
6062 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
6063 output_section))
6064 reloc_status = Arm_relocate_functions::abs16(view, object, psymval);
6065 break;
6066
6067 case elfcpp::R_ARM_ABS32:
6068 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6069 output_section))
6070 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
6071 thumb_bit);
6072 break;
6073
6074 case elfcpp::R_ARM_ABS32_NOI:
6075 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6076 output_section))
6077 // No thumb bit for this relocation: (S + A)
6078 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
6079 0);
6080 break;
6081
6082 case elfcpp::R_ARM_MOVW_ABS_NC:
6083 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6084 output_section))
6085 reloc_status = Arm_relocate_functions::movw_abs_nc(view, object,
6086 psymval,
6087 thumb_bit);
6088 else
6089 gold_error(_("relocation R_ARM_MOVW_ABS_NC cannot be used when making"
6090 "a shared object; recompile with -fPIC"));
6091 break;
6092
6093 case elfcpp::R_ARM_MOVT_ABS:
6094 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6095 output_section))
6096 reloc_status = Arm_relocate_functions::movt_abs(view, object, psymval);
6097 else
6098 gold_error(_("relocation R_ARM_MOVT_ABS cannot be used when making"
6099 "a shared object; recompile with -fPIC"));
6100 break;
6101
6102 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
6103 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6104 output_section))
6105 reloc_status = Arm_relocate_functions::thm_movw_abs_nc(view, object,
6106 psymval,
6107 thumb_bit);
6108 else
6109 gold_error(_("relocation R_ARM_THM_MOVW_ABS_NC cannot be used when"
6110 "making a shared object; recompile with -fPIC"));
6111 break;
6112
6113 case elfcpp::R_ARM_THM_MOVT_ABS:
6114 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6115 output_section))
6116 reloc_status = Arm_relocate_functions::thm_movt_abs(view, object,
6117 psymval);
6118 else
6119 gold_error(_("relocation R_ARM_THM_MOVT_ABS cannot be used when"
6120 "making a shared object; recompile with -fPIC"));
6121 break;
6122
6123 case elfcpp::R_ARM_MOVW_PREL_NC:
6124 reloc_status = Arm_relocate_functions::movw_prel_nc(view, object,
6125 psymval, address,
6126 thumb_bit);
6127 break;
6128
6129 case elfcpp::R_ARM_MOVT_PREL:
6130 reloc_status = Arm_relocate_functions::movt_prel(view, object,
6131 psymval, address);
6132 break;
6133
6134 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
6135 reloc_status = Arm_relocate_functions::thm_movw_prel_nc(view, object,
6136 psymval, address,
6137 thumb_bit);
6138 break;
6139
6140 case elfcpp::R_ARM_THM_MOVT_PREL:
6141 reloc_status = Arm_relocate_functions::thm_movt_prel(view, object,
6142 psymval, address);
6143 break;
6144
6145 case elfcpp::R_ARM_REL32:
6146 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
6147 address, thumb_bit);
6148 break;
6149
6150 case elfcpp::R_ARM_THM_ABS5:
6151 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
6152 output_section))
6153 reloc_status = Arm_relocate_functions::thm_abs5(view, object, psymval);
6154 break;
6155
6156 case elfcpp::R_ARM_THM_CALL:
6157 reloc_status =
6158 Arm_relocate_functions::thm_call(relinfo, view, gsym, object, r_sym,
6159 psymval, address, thumb_bit,
6160 is_weakly_undefined_without_plt);
6161 break;
6162
6163 case elfcpp::R_ARM_XPC25:
6164 reloc_status =
6165 Arm_relocate_functions::xpc25(relinfo, view, gsym, object, r_sym,
6166 psymval, address, thumb_bit,
6167 is_weakly_undefined_without_plt);
6168 break;
6169
6170 case elfcpp::R_ARM_THM_XPC22:
6171 reloc_status =
6172 Arm_relocate_functions::thm_xpc22(relinfo, view, gsym, object, r_sym,
6173 psymval, address, thumb_bit,
6174 is_weakly_undefined_without_plt);
6175 break;
6176
6177 case elfcpp::R_ARM_GOTOFF32:
6178 {
6179 Arm_address got_origin;
6180 got_origin = target->got_plt_section()->address();
6181 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
6182 got_origin, thumb_bit);
6183 }
6184 break;
6185
6186 case elfcpp::R_ARM_BASE_PREL:
6187 {
6188 uint32_t origin;
6189 // Get the addressing origin of the output segment defining the
6190 // symbol gsym (AAELF 4.6.1.2 Relocation types)
6191 gold_assert(gsym != NULL);
6192 if (gsym->source() == Symbol::IN_OUTPUT_SEGMENT)
6193 origin = gsym->output_segment()->vaddr();
6194 else if (gsym->source () == Symbol::IN_OUTPUT_DATA)
6195 origin = gsym->output_data()->address();
6196 else
6197 {
6198 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6199 _("cannot find origin of R_ARM_BASE_PREL"));
6200 return true;
6201 }
6202 reloc_status = Arm_relocate_functions::base_prel(view, origin, address);
6203 }
6204 break;
6205
6206 case elfcpp::R_ARM_BASE_ABS:
6207 {
6208 if (!should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
6209 output_section))
6210 break;
6211
6212 uint32_t origin;
6213 // Get the addressing origin of the output segment defining
6214 // the symbol gsym (AAELF 4.6.1.2 Relocation types).
6215 if (gsym == NULL)
6216 // R_ARM_BASE_ABS with the NULL symbol will give the
6217 // absolute address of the GOT origin (GOT_ORG) (see ARM IHI
6218 // 0044C (AAELF): 4.6.1.8 Proxy generating relocations).
6219 origin = target->got_plt_section()->address();
6220 else if (gsym->source() == Symbol::IN_OUTPUT_SEGMENT)
6221 origin = gsym->output_segment()->vaddr();
6222 else if (gsym->source () == Symbol::IN_OUTPUT_DATA)
6223 origin = gsym->output_data()->address();
6224 else
6225 {
6226 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6227 _("cannot find origin of R_ARM_BASE_ABS"));
6228 return true;
6229 }
6230
6231 reloc_status = Arm_relocate_functions::base_abs(view, origin);
6232 }
6233 break;
6234
6235 case elfcpp::R_ARM_GOT_BREL:
6236 gold_assert(have_got_offset);
6237 reloc_status = Arm_relocate_functions::got_brel(view, got_offset);
6238 break;
6239
6240 case elfcpp::R_ARM_GOT_PREL:
6241 gold_assert(have_got_offset);
6242 // Get the address origin for GOT PLT, which is allocated right
6243 // after the GOT section, to calculate an absolute address of
6244 // the symbol GOT entry (got_origin + got_offset).
6245 Arm_address got_origin;
6246 got_origin = target->got_plt_section()->address();
6247 reloc_status = Arm_relocate_functions::got_prel(view,
6248 got_origin + got_offset,
6249 address);
6250 break;
6251
6252 case elfcpp::R_ARM_PLT32:
6253 gold_assert(gsym == NULL
6254 || gsym->has_plt_offset()
6255 || gsym->final_value_is_known()
6256 || (gsym->is_defined()
6257 && !gsym->is_from_dynobj()
6258 && !gsym->is_preemptible()));
6259 reloc_status =
6260 Arm_relocate_functions::plt32(relinfo, view, gsym, object, r_sym,
6261 psymval, address, thumb_bit,
6262 is_weakly_undefined_without_plt);
6263 break;
6264
6265 case elfcpp::R_ARM_CALL:
6266 reloc_status =
6267 Arm_relocate_functions::call(relinfo, view, gsym, object, r_sym,
6268 psymval, address, thumb_bit,
6269 is_weakly_undefined_without_plt);
6270 break;
6271
6272 case elfcpp::R_ARM_JUMP24:
6273 reloc_status =
6274 Arm_relocate_functions::jump24(relinfo, view, gsym, object, r_sym,
6275 psymval, address, thumb_bit,
6276 is_weakly_undefined_without_plt);
6277 break;
6278
6279 case elfcpp::R_ARM_THM_JUMP24:
6280 reloc_status =
6281 Arm_relocate_functions::thm_jump24(relinfo, view, gsym, object, r_sym,
6282 psymval, address, thumb_bit,
6283 is_weakly_undefined_without_plt);
6284 break;
6285
6286 case elfcpp::R_ARM_THM_JUMP19:
6287 reloc_status =
6288 Arm_relocate_functions::thm_jump19(view, object, psymval, address,
6289 thumb_bit);
6290 break;
6291
6292 case elfcpp::R_ARM_THM_JUMP6:
6293 reloc_status =
6294 Arm_relocate_functions::thm_jump6(view, object, psymval, address);
6295 break;
6296
6297 case elfcpp::R_ARM_THM_JUMP8:
6298 reloc_status =
6299 Arm_relocate_functions::thm_jump8(view, object, psymval, address);
6300 break;
6301
6302 case elfcpp::R_ARM_THM_JUMP11:
6303 reloc_status =
6304 Arm_relocate_functions::thm_jump11(view, object, psymval, address);
6305 break;
6306
6307 case elfcpp::R_ARM_PREL31:
6308 reloc_status = Arm_relocate_functions::prel31(view, object, psymval,
6309 address, thumb_bit);
6310 break;
6311
6312 case elfcpp::R_ARM_V4BX:
6313 if (target->fix_v4bx() > 0)
6314 reloc_status =
6315 Arm_relocate_functions::v4bx(relinfo, view, object, address,
6316 (target->fix_v4bx() == 2));
6317 break;
6318
6319 case elfcpp::R_ARM_TARGET1:
6320 // This should have been mapped to another type already.
6321 // Fall through.
6322 case elfcpp::R_ARM_COPY:
6323 case elfcpp::R_ARM_GLOB_DAT:
6324 case elfcpp::R_ARM_JUMP_SLOT:
6325 case elfcpp::R_ARM_RELATIVE:
6326 // These are relocations which should only be seen by the
6327 // dynamic linker, and should never be seen here.
6328 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6329 _("unexpected reloc %u in object file"),
6330 r_type);
6331 break;
6332
6333 default:
6334 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6335 _("unsupported reloc %u"),
6336 r_type);
6337 break;
6338 }
6339
6340 // Report any errors.
6341 switch (reloc_status)
6342 {
6343 case Arm_relocate_functions::STATUS_OKAY:
6344 break;
6345 case Arm_relocate_functions::STATUS_OVERFLOW:
6346 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
6347 _("relocation overflow in relocation %u"),
6348 r_type);
6349 break;
6350 case Arm_relocate_functions::STATUS_BAD_RELOC:
6351 gold_error_at_location(
6352 relinfo,
6353 relnum,
6354 rel.get_r_offset(),
6355 _("unexpected opcode while processing relocation %u"),
6356 r_type);
6357 break;
6358 default:
6359 gold_unreachable();
6360 }
6361
6362 return true;
6363 }
6364
6365 // Relocate section data.
6366
6367 template<bool big_endian>
6368 void
6369 Target_arm<big_endian>::relocate_section(
6370 const Relocate_info<32, big_endian>* relinfo,
6371 unsigned int sh_type,
6372 const unsigned char* prelocs,
6373 size_t reloc_count,
6374 Output_section* output_section,
6375 bool needs_special_offset_handling,
6376 unsigned char* view,
6377 Arm_address address,
6378 section_size_type view_size,
6379 const Reloc_symbol_changes* reloc_symbol_changes)
6380 {
6381 typedef typename Target_arm<big_endian>::Relocate Arm_relocate;
6382 gold_assert(sh_type == elfcpp::SHT_REL);
6383
6384 Arm_input_section<big_endian>* arm_input_section =
6385 this->find_arm_input_section(relinfo->object, relinfo->data_shndx);
6386
6387 // This is an ARM input section and the view covers the whole output
6388 // section.
6389 if (arm_input_section != NULL)
6390 {
6391 gold_assert(needs_special_offset_handling);
6392 Arm_address section_address = arm_input_section->address();
6393 section_size_type section_size = arm_input_section->data_size();
6394
6395 gold_assert((arm_input_section->address() >= address)
6396 && ((arm_input_section->address()
6397 + arm_input_section->data_size())
6398 <= (address + view_size)));
6399
6400 off_t offset = section_address - address;
6401 view += offset;
6402 address += offset;
6403 view_size = section_size;
6404 }
6405
6406 gold::relocate_section<32, big_endian, Target_arm, elfcpp::SHT_REL,
6407 Arm_relocate>(
6408 relinfo,
6409 this,
6410 prelocs,
6411 reloc_count,
6412 output_section,
6413 needs_special_offset_handling,
6414 view,
6415 address,
6416 view_size,
6417 reloc_symbol_changes);
6418 }
6419
6420 // Return the size of a relocation while scanning during a relocatable
6421 // link.
6422
6423 template<bool big_endian>
6424 unsigned int
6425 Target_arm<big_endian>::Relocatable_size_for_reloc::get_size_for_reloc(
6426 unsigned int r_type,
6427 Relobj* object)
6428 {
6429 r_type = get_real_reloc_type(r_type);
6430 switch (r_type)
6431 {
6432 case elfcpp::R_ARM_NONE:
6433 return 0;
6434
6435 case elfcpp::R_ARM_ABS8:
6436 return 1;
6437
6438 case elfcpp::R_ARM_ABS16:
6439 case elfcpp::R_ARM_THM_ABS5:
6440 case elfcpp::R_ARM_THM_JUMP6:
6441 case elfcpp::R_ARM_THM_JUMP8:
6442 case elfcpp::R_ARM_THM_JUMP11:
6443 return 2;
6444
6445 case elfcpp::R_ARM_ABS32:
6446 case elfcpp::R_ARM_ABS32_NOI:
6447 case elfcpp::R_ARM_ABS12:
6448 case elfcpp::R_ARM_BASE_ABS:
6449 case elfcpp::R_ARM_REL32:
6450 case elfcpp::R_ARM_THM_CALL:
6451 case elfcpp::R_ARM_GOTOFF32:
6452 case elfcpp::R_ARM_BASE_PREL:
6453 case elfcpp::R_ARM_GOT_BREL:
6454 case elfcpp::R_ARM_GOT_PREL:
6455 case elfcpp::R_ARM_PLT32:
6456 case elfcpp::R_ARM_CALL:
6457 case elfcpp::R_ARM_JUMP24:
6458 case elfcpp::R_ARM_PREL31:
6459 case elfcpp::R_ARM_MOVW_ABS_NC:
6460 case elfcpp::R_ARM_MOVT_ABS:
6461 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
6462 case elfcpp::R_ARM_THM_MOVT_ABS:
6463 case elfcpp::R_ARM_MOVW_PREL_NC:
6464 case elfcpp::R_ARM_MOVT_PREL:
6465 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
6466 case elfcpp::R_ARM_THM_MOVT_PREL:
6467 case elfcpp::R_ARM_V4BX:
6468 return 4;
6469
6470 case elfcpp::R_ARM_TARGET1:
6471 // This should have been mapped to another type already.
6472 // Fall through.
6473 case elfcpp::R_ARM_COPY:
6474 case elfcpp::R_ARM_GLOB_DAT:
6475 case elfcpp::R_ARM_JUMP_SLOT:
6476 case elfcpp::R_ARM_RELATIVE:
6477 // These are relocations which should only be seen by the
6478 // dynamic linker, and should never be seen here.
6479 gold_error(_("%s: unexpected reloc %u in object file"),
6480 object->name().c_str(), r_type);
6481 return 0;
6482
6483 default:
6484 object->error(_("unsupported reloc %u in object file"), r_type);
6485 return 0;
6486 }
6487 }
6488
6489 // Scan the relocs during a relocatable link.
6490
6491 template<bool big_endian>
6492 void
6493 Target_arm<big_endian>::scan_relocatable_relocs(
6494 Symbol_table* symtab,
6495 Layout* layout,
6496 Sized_relobj<32, big_endian>* object,
6497 unsigned int data_shndx,
6498 unsigned int sh_type,
6499 const unsigned char* prelocs,
6500 size_t reloc_count,
6501 Output_section* output_section,
6502 bool needs_special_offset_handling,
6503 size_t local_symbol_count,
6504 const unsigned char* plocal_symbols,
6505 Relocatable_relocs* rr)
6506 {
6507 gold_assert(sh_type == elfcpp::SHT_REL);
6508
6509 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_REL,
6510 Relocatable_size_for_reloc> Scan_relocatable_relocs;
6511
6512 gold::scan_relocatable_relocs<32, big_endian, elfcpp::SHT_REL,
6513 Scan_relocatable_relocs>(
6514 symtab,
6515 layout,
6516 object,
6517 data_shndx,
6518 prelocs,
6519 reloc_count,
6520 output_section,
6521 needs_special_offset_handling,
6522 local_symbol_count,
6523 plocal_symbols,
6524 rr);
6525 }
6526
6527 // Relocate a section during a relocatable link.
6528
6529 template<bool big_endian>
6530 void
6531 Target_arm<big_endian>::relocate_for_relocatable(
6532 const Relocate_info<32, big_endian>* relinfo,
6533 unsigned int sh_type,
6534 const unsigned char* prelocs,
6535 size_t reloc_count,
6536 Output_section* output_section,
6537 off_t offset_in_output_section,
6538 const Relocatable_relocs* rr,
6539 unsigned char* view,
6540 Arm_address view_address,
6541 section_size_type view_size,
6542 unsigned char* reloc_view,
6543 section_size_type reloc_view_size)
6544 {
6545 gold_assert(sh_type == elfcpp::SHT_REL);
6546
6547 gold::relocate_for_relocatable<32, big_endian, elfcpp::SHT_REL>(
6548 relinfo,
6549 prelocs,
6550 reloc_count,
6551 output_section,
6552 offset_in_output_section,
6553 rr,
6554 view,
6555 view_address,
6556 view_size,
6557 reloc_view,
6558 reloc_view_size);
6559 }
6560
6561 // Return the value to use for a dynamic symbol which requires special
6562 // treatment. This is how we support equality comparisons of function
6563 // pointers across shared library boundaries, as described in the
6564 // processor specific ABI supplement.
6565
6566 template<bool big_endian>
6567 uint64_t
6568 Target_arm<big_endian>::do_dynsym_value(const Symbol* gsym) const
6569 {
6570 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6571 return this->plt_section()->address() + gsym->plt_offset();
6572 }
6573
6574 // Map platform-specific relocs to real relocs
6575 //
6576 template<bool big_endian>
6577 unsigned int
6578 Target_arm<big_endian>::get_real_reloc_type (unsigned int r_type)
6579 {
6580 switch (r_type)
6581 {
6582 case elfcpp::R_ARM_TARGET1:
6583 // This is either R_ARM_ABS32 or R_ARM_REL32;
6584 return elfcpp::R_ARM_ABS32;
6585
6586 case elfcpp::R_ARM_TARGET2:
6587 // This can be any reloc type but ususally is R_ARM_GOT_PREL
6588 return elfcpp::R_ARM_GOT_PREL;
6589
6590 default:
6591 return r_type;
6592 }
6593 }
6594
6595 // Whether if two EABI versions V1 and V2 are compatible.
6596
6597 template<bool big_endian>
6598 bool
6599 Target_arm<big_endian>::are_eabi_versions_compatible(
6600 elfcpp::Elf_Word v1,
6601 elfcpp::Elf_Word v2)
6602 {
6603 // v4 and v5 are the same spec before and after it was released,
6604 // so allow mixing them.
6605 if ((v1 == elfcpp::EF_ARM_EABI_VER4 && v2 == elfcpp::EF_ARM_EABI_VER5)
6606 || (v1 == elfcpp::EF_ARM_EABI_VER5 && v2 == elfcpp::EF_ARM_EABI_VER4))
6607 return true;
6608
6609 return v1 == v2;
6610 }
6611
6612 // Combine FLAGS from an input object called NAME and the processor-specific
6613 // flags in the ELF header of the output. Much of this is adapted from the
6614 // processor-specific flags merging code in elf32_arm_merge_private_bfd_data
6615 // in bfd/elf32-arm.c.
6616
6617 template<bool big_endian>
6618 void
6619 Target_arm<big_endian>::merge_processor_specific_flags(
6620 const std::string& name,
6621 elfcpp::Elf_Word flags)
6622 {
6623 if (this->are_processor_specific_flags_set())
6624 {
6625 elfcpp::Elf_Word out_flags = this->processor_specific_flags();
6626
6627 // Nothing to merge if flags equal to those in output.
6628 if (flags == out_flags)
6629 return;
6630
6631 // Complain about various flag mismatches.
6632 elfcpp::Elf_Word version1 = elfcpp::arm_eabi_version(flags);
6633 elfcpp::Elf_Word version2 = elfcpp::arm_eabi_version(out_flags);
6634 if (!this->are_eabi_versions_compatible(version1, version2))
6635 gold_error(_("Source object %s has EABI version %d but output has "
6636 "EABI version %d."),
6637 name.c_str(),
6638 (flags & elfcpp::EF_ARM_EABIMASK) >> 24,
6639 (out_flags & elfcpp::EF_ARM_EABIMASK) >> 24);
6640 }
6641 else
6642 {
6643 // If the input is the default architecture and had the default
6644 // flags then do not bother setting the flags for the output
6645 // architecture, instead allow future merges to do this. If no
6646 // future merges ever set these flags then they will retain their
6647 // uninitialised values, which surprise surprise, correspond
6648 // to the default values.
6649 if (flags == 0)
6650 return;
6651
6652 // This is the first time, just copy the flags.
6653 // We only copy the EABI version for now.
6654 this->set_processor_specific_flags(flags & elfcpp::EF_ARM_EABIMASK);
6655 }
6656 }
6657
6658 // Adjust ELF file header.
6659 template<bool big_endian>
6660 void
6661 Target_arm<big_endian>::do_adjust_elf_header(
6662 unsigned char* view,
6663 int len) const
6664 {
6665 gold_assert(len == elfcpp::Elf_sizes<32>::ehdr_size);
6666
6667 elfcpp::Ehdr<32, big_endian> ehdr(view);
6668 unsigned char e_ident[elfcpp::EI_NIDENT];
6669 memcpy(e_ident, ehdr.get_e_ident(), elfcpp::EI_NIDENT);
6670
6671 if (elfcpp::arm_eabi_version(this->processor_specific_flags())
6672 == elfcpp::EF_ARM_EABI_UNKNOWN)
6673 e_ident[elfcpp::EI_OSABI] = elfcpp::ELFOSABI_ARM;
6674 else
6675 e_ident[elfcpp::EI_OSABI] = 0;
6676 e_ident[elfcpp::EI_ABIVERSION] = 0;
6677
6678 // FIXME: Do EF_ARM_BE8 adjustment.
6679
6680 elfcpp::Ehdr_write<32, big_endian> oehdr(view);
6681 oehdr.put_e_ident(e_ident);
6682 }
6683
6684 // do_make_elf_object to override the same function in the base class.
6685 // We need to use a target-specific sub-class of Sized_relobj<32, big_endian>
6686 // to store ARM specific information. Hence we need to have our own
6687 // ELF object creation.
6688
6689 template<bool big_endian>
6690 Object*
6691 Target_arm<big_endian>::do_make_elf_object(
6692 const std::string& name,
6693 Input_file* input_file,
6694 off_t offset, const elfcpp::Ehdr<32, big_endian>& ehdr)
6695 {
6696 int et = ehdr.get_e_type();
6697 if (et == elfcpp::ET_REL)
6698 {
6699 Arm_relobj<big_endian>* obj =
6700 new Arm_relobj<big_endian>(name, input_file, offset, ehdr);
6701 obj->setup();
6702 return obj;
6703 }
6704 else if (et == elfcpp::ET_DYN)
6705 {
6706 Sized_dynobj<32, big_endian>* obj =
6707 new Arm_dynobj<big_endian>(name, input_file, offset, ehdr);
6708 obj->setup();
6709 return obj;
6710 }
6711 else
6712 {
6713 gold_error(_("%s: unsupported ELF file type %d"),
6714 name.c_str(), et);
6715 return NULL;
6716 }
6717 }
6718
6719 // Read the architecture from the Tag_also_compatible_with attribute, if any.
6720 // Returns -1 if no architecture could be read.
6721 // This is adapted from get_secondary_compatible_arch() in bfd/elf32-arm.c.
6722
6723 template<bool big_endian>
6724 int
6725 Target_arm<big_endian>::get_secondary_compatible_arch(
6726 const Attributes_section_data* pasd)
6727 {
6728 const Object_attribute *known_attributes =
6729 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
6730
6731 // Note: the tag and its argument below are uleb128 values, though
6732 // currently-defined values fit in one byte for each.
6733 const std::string& sv =
6734 known_attributes[elfcpp::Tag_also_compatible_with].string_value();
6735 if (sv.size() == 2
6736 && sv.data()[0] == elfcpp::Tag_CPU_arch
6737 && (sv.data()[1] & 128) != 128)
6738 return sv.data()[1];
6739
6740 // This tag is "safely ignorable", so don't complain if it looks funny.
6741 return -1;
6742 }
6743
6744 // Set, or unset, the architecture of the Tag_also_compatible_with attribute.
6745 // The tag is removed if ARCH is -1.
6746 // This is adapted from set_secondary_compatible_arch() in bfd/elf32-arm.c.
6747
6748 template<bool big_endian>
6749 void
6750 Target_arm<big_endian>::set_secondary_compatible_arch(
6751 Attributes_section_data* pasd,
6752 int arch)
6753 {
6754 Object_attribute *known_attributes =
6755 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
6756
6757 if (arch == -1)
6758 {
6759 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value("");
6760 return;
6761 }
6762
6763 // Note: the tag and its argument below are uleb128 values, though
6764 // currently-defined values fit in one byte for each.
6765 char sv[3];
6766 sv[0] = elfcpp::Tag_CPU_arch;
6767 gold_assert(arch != 0);
6768 sv[1] = arch;
6769 sv[2] = '\0';
6770
6771 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value(sv);
6772 }
6773
6774 // Combine two values for Tag_CPU_arch, taking secondary compatibility tags
6775 // into account.
6776 // This is adapted from tag_cpu_arch_combine() in bfd/elf32-arm.c.
6777
6778 template<bool big_endian>
6779 int
6780 Target_arm<big_endian>::tag_cpu_arch_combine(
6781 const char* name,
6782 int oldtag,
6783 int* secondary_compat_out,
6784 int newtag,
6785 int secondary_compat)
6786 {
6787 #define T(X) elfcpp::TAG_CPU_ARCH_##X
6788 static const int v6t2[] =
6789 {
6790 T(V6T2), // PRE_V4.
6791 T(V6T2), // V4.
6792 T(V6T2), // V4T.
6793 T(V6T2), // V5T.
6794 T(V6T2), // V5TE.
6795 T(V6T2), // V5TEJ.
6796 T(V6T2), // V6.
6797 T(V7), // V6KZ.
6798 T(V6T2) // V6T2.
6799 };
6800 static const int v6k[] =
6801 {
6802 T(V6K), // PRE_V4.
6803 T(V6K), // V4.
6804 T(V6K), // V4T.
6805 T(V6K), // V5T.
6806 T(V6K), // V5TE.
6807 T(V6K), // V5TEJ.
6808 T(V6K), // V6.
6809 T(V6KZ), // V6KZ.
6810 T(V7), // V6T2.
6811 T(V6K) // V6K.
6812 };
6813 static const int v7[] =
6814 {
6815 T(V7), // PRE_V4.
6816 T(V7), // V4.
6817 T(V7), // V4T.
6818 T(V7), // V5T.
6819 T(V7), // V5TE.
6820 T(V7), // V5TEJ.
6821 T(V7), // V6.
6822 T(V7), // V6KZ.
6823 T(V7), // V6T2.
6824 T(V7), // V6K.
6825 T(V7) // V7.
6826 };
6827 static const int v6_m[] =
6828 {
6829 -1, // PRE_V4.
6830 -1, // V4.
6831 T(V6K), // V4T.
6832 T(V6K), // V5T.
6833 T(V6K), // V5TE.
6834 T(V6K), // V5TEJ.
6835 T(V6K), // V6.
6836 T(V6KZ), // V6KZ.
6837 T(V7), // V6T2.
6838 T(V6K), // V6K.
6839 T(V7), // V7.
6840 T(V6_M) // V6_M.
6841 };
6842 static const int v6s_m[] =
6843 {
6844 -1, // PRE_V4.
6845 -1, // V4.
6846 T(V6K), // V4T.
6847 T(V6K), // V5T.
6848 T(V6K), // V5TE.
6849 T(V6K), // V5TEJ.
6850 T(V6K), // V6.
6851 T(V6KZ), // V6KZ.
6852 T(V7), // V6T2.
6853 T(V6K), // V6K.
6854 T(V7), // V7.
6855 T(V6S_M), // V6_M.
6856 T(V6S_M) // V6S_M.
6857 };
6858 static const int v7e_m[] =
6859 {
6860 -1, // PRE_V4.
6861 -1, // V4.
6862 T(V7E_M), // V4T.
6863 T(V7E_M), // V5T.
6864 T(V7E_M), // V5TE.
6865 T(V7E_M), // V5TEJ.
6866 T(V7E_M), // V6.
6867 T(V7E_M), // V6KZ.
6868 T(V7E_M), // V6T2.
6869 T(V7E_M), // V6K.
6870 T(V7E_M), // V7.
6871 T(V7E_M), // V6_M.
6872 T(V7E_M), // V6S_M.
6873 T(V7E_M) // V7E_M.
6874 };
6875 static const int v4t_plus_v6_m[] =
6876 {
6877 -1, // PRE_V4.
6878 -1, // V4.
6879 T(V4T), // V4T.
6880 T(V5T), // V5T.
6881 T(V5TE), // V5TE.
6882 T(V5TEJ), // V5TEJ.
6883 T(V6), // V6.
6884 T(V6KZ), // V6KZ.
6885 T(V6T2), // V6T2.
6886 T(V6K), // V6K.
6887 T(V7), // V7.
6888 T(V6_M), // V6_M.
6889 T(V6S_M), // V6S_M.
6890 T(V7E_M), // V7E_M.
6891 T(V4T_PLUS_V6_M) // V4T plus V6_M.
6892 };
6893 static const int *comb[] =
6894 {
6895 v6t2,
6896 v6k,
6897 v7,
6898 v6_m,
6899 v6s_m,
6900 v7e_m,
6901 // Pseudo-architecture.
6902 v4t_plus_v6_m
6903 };
6904
6905 // Check we've not got a higher architecture than we know about.
6906
6907 if (oldtag >= elfcpp::MAX_TAG_CPU_ARCH || newtag >= elfcpp::MAX_TAG_CPU_ARCH)
6908 {
6909 gold_error(_("%s: unknown CPU architecture"), name);
6910 return -1;
6911 }
6912
6913 // Override old tag if we have a Tag_also_compatible_with on the output.
6914
6915 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
6916 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
6917 oldtag = T(V4T_PLUS_V6_M);
6918
6919 // And override the new tag if we have a Tag_also_compatible_with on the
6920 // input.
6921
6922 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
6923 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
6924 newtag = T(V4T_PLUS_V6_M);
6925
6926 // Architectures before V6KZ add features monotonically.
6927 int tagh = std::max(oldtag, newtag);
6928 if (tagh <= elfcpp::TAG_CPU_ARCH_V6KZ)
6929 return tagh;
6930
6931 int tagl = std::min(oldtag, newtag);
6932 int result = comb[tagh - T(V6T2)][tagl];
6933
6934 // Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
6935 // as the canonical version.
6936 if (result == T(V4T_PLUS_V6_M))
6937 {
6938 result = T(V4T);
6939 *secondary_compat_out = T(V6_M);
6940 }
6941 else
6942 *secondary_compat_out = -1;
6943
6944 if (result == -1)
6945 {
6946 gold_error(_("%s: conflicting CPU architectures %d/%d"),
6947 name, oldtag, newtag);
6948 return -1;
6949 }
6950
6951 return result;
6952 #undef T
6953 }
6954
6955 // Helper to print AEABI enum tag value.
6956
6957 template<bool big_endian>
6958 std::string
6959 Target_arm<big_endian>::aeabi_enum_name(unsigned int value)
6960 {
6961 static const char *aeabi_enum_names[] =
6962 { "", "variable-size", "32-bit", "" };
6963 const size_t aeabi_enum_names_size =
6964 sizeof(aeabi_enum_names) / sizeof(aeabi_enum_names[0]);
6965
6966 if (value < aeabi_enum_names_size)
6967 return std::string(aeabi_enum_names[value]);
6968 else
6969 {
6970 char buffer[100];
6971 sprintf(buffer, "<unknown value %u>", value);
6972 return std::string(buffer);
6973 }
6974 }
6975
6976 // Return the string value to store in TAG_CPU_name.
6977
6978 template<bool big_endian>
6979 std::string
6980 Target_arm<big_endian>::tag_cpu_name_value(unsigned int value)
6981 {
6982 static const char *name_table[] = {
6983 // These aren't real CPU names, but we can't guess
6984 // that from the architecture version alone.
6985 "Pre v4",
6986 "ARM v4",
6987 "ARM v4T",
6988 "ARM v5T",
6989 "ARM v5TE",
6990 "ARM v5TEJ",
6991 "ARM v6",
6992 "ARM v6KZ",
6993 "ARM v6T2",
6994 "ARM v6K",
6995 "ARM v7",
6996 "ARM v6-M",
6997 "ARM v6S-M",
6998 "ARM v7E-M"
6999 };
7000 const size_t name_table_size = sizeof(name_table) / sizeof(name_table[0]);
7001
7002 if (value < name_table_size)
7003 return std::string(name_table[value]);
7004 else
7005 {
7006 char buffer[100];
7007 sprintf(buffer, "<unknown CPU value %u>", value);
7008 return std::string(buffer);
7009 }
7010 }
7011
7012 // Merge object attributes from input file called NAME with those of the
7013 // output. The input object attributes are in the object pointed by PASD.
7014
7015 template<bool big_endian>
7016 void
7017 Target_arm<big_endian>::merge_object_attributes(
7018 const char* name,
7019 const Attributes_section_data* pasd)
7020 {
7021 // Return if there is no attributes section data.
7022 if (pasd == NULL)
7023 return;
7024
7025 // If output has no object attributes, just copy.
7026 if (this->attributes_section_data_ == NULL)
7027 {
7028 this->attributes_section_data_ = new Attributes_section_data(*pasd);
7029 return;
7030 }
7031
7032 const int vendor = Object_attribute::OBJ_ATTR_PROC;
7033 const Object_attribute* in_attr = pasd->known_attributes(vendor);
7034 Object_attribute* out_attr =
7035 this->attributes_section_data_->known_attributes(vendor);
7036
7037 // This needs to happen before Tag_ABI_FP_number_model is merged. */
7038 if (in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
7039 != out_attr[elfcpp::Tag_ABI_VFP_args].int_value())
7040 {
7041 // Ignore mismatches if the object doesn't use floating point. */
7042 if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value() == 0)
7043 out_attr[elfcpp::Tag_ABI_VFP_args].set_int_value(
7044 in_attr[elfcpp::Tag_ABI_VFP_args].int_value());
7045 else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value() != 0)
7046 gold_error(_("%s uses VFP register arguments, output does not"),
7047 name);
7048 }
7049
7050 for (int i = 4; i < Vendor_object_attributes::NUM_KNOWN_ATTRIBUTES; ++i)
7051 {
7052 // Merge this attribute with existing attributes.
7053 switch (i)
7054 {
7055 case elfcpp::Tag_CPU_raw_name:
7056 case elfcpp::Tag_CPU_name:
7057 // These are merged after Tag_CPU_arch.
7058 break;
7059
7060 case elfcpp::Tag_ABI_optimization_goals:
7061 case elfcpp::Tag_ABI_FP_optimization_goals:
7062 // Use the first value seen.
7063 break;
7064
7065 case elfcpp::Tag_CPU_arch:
7066 {
7067 unsigned int saved_out_attr = out_attr->int_value();
7068 // Merge Tag_CPU_arch and Tag_also_compatible_with.
7069 int secondary_compat =
7070 this->get_secondary_compatible_arch(pasd);
7071 int secondary_compat_out =
7072 this->get_secondary_compatible_arch(
7073 this->attributes_section_data_);
7074 out_attr[i].set_int_value(
7075 tag_cpu_arch_combine(name, out_attr[i].int_value(),
7076 &secondary_compat_out,
7077 in_attr[i].int_value(),
7078 secondary_compat));
7079 this->set_secondary_compatible_arch(this->attributes_section_data_,
7080 secondary_compat_out);
7081
7082 // Merge Tag_CPU_name and Tag_CPU_raw_name.
7083 if (out_attr[i].int_value() == saved_out_attr)
7084 ; // Leave the names alone.
7085 else if (out_attr[i].int_value() == in_attr[i].int_value())
7086 {
7087 // The output architecture has been changed to match the
7088 // input architecture. Use the input names.
7089 out_attr[elfcpp::Tag_CPU_name].set_string_value(
7090 in_attr[elfcpp::Tag_CPU_name].string_value());
7091 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value(
7092 in_attr[elfcpp::Tag_CPU_raw_name].string_value());
7093 }
7094 else
7095 {
7096 out_attr[elfcpp::Tag_CPU_name].set_string_value("");
7097 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value("");
7098 }
7099
7100 // If we still don't have a value for Tag_CPU_name,
7101 // make one up now. Tag_CPU_raw_name remains blank.
7102 if (out_attr[elfcpp::Tag_CPU_name].string_value() == "")
7103 {
7104 const std::string cpu_name =
7105 this->tag_cpu_name_value(out_attr[i].int_value());
7106 // FIXME: If we see an unknown CPU, this will be set
7107 // to "<unknown CPU n>", where n is the attribute value.
7108 // This is different from BFD, which leaves the name alone.
7109 out_attr[elfcpp::Tag_CPU_name].set_string_value(cpu_name);
7110 }
7111 }
7112 break;
7113
7114 case elfcpp::Tag_ARM_ISA_use:
7115 case elfcpp::Tag_THUMB_ISA_use:
7116 case elfcpp::Tag_WMMX_arch:
7117 case elfcpp::Tag_Advanced_SIMD_arch:
7118 // ??? Do Advanced_SIMD (NEON) and WMMX conflict?
7119 case elfcpp::Tag_ABI_FP_rounding:
7120 case elfcpp::Tag_ABI_FP_exceptions:
7121 case elfcpp::Tag_ABI_FP_user_exceptions:
7122 case elfcpp::Tag_ABI_FP_number_model:
7123 case elfcpp::Tag_VFP_HP_extension:
7124 case elfcpp::Tag_CPU_unaligned_access:
7125 case elfcpp::Tag_T2EE_use:
7126 case elfcpp::Tag_Virtualization_use:
7127 case elfcpp::Tag_MPextension_use:
7128 // Use the largest value specified.
7129 if (in_attr[i].int_value() > out_attr[i].int_value())
7130 out_attr[i].set_int_value(in_attr[i].int_value());
7131 break;
7132
7133 case elfcpp::Tag_ABI_align8_preserved:
7134 case elfcpp::Tag_ABI_PCS_RO_data:
7135 // Use the smallest value specified.
7136 if (in_attr[i].int_value() < out_attr[i].int_value())
7137 out_attr[i].set_int_value(in_attr[i].int_value());
7138 break;
7139
7140 case elfcpp::Tag_ABI_align8_needed:
7141 if ((in_attr[i].int_value() > 0 || out_attr[i].int_value() > 0)
7142 && (in_attr[elfcpp::Tag_ABI_align8_preserved].int_value() == 0
7143 || (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
7144 == 0)))
7145 {
7146 // This error message should be enabled once all non-conformant
7147 // binaries in the toolchain have had the attributes set
7148 // properly.
7149 // gold_error(_("output 8-byte data alignment conflicts with %s"),
7150 // name);
7151 }
7152 // Fall through.
7153 case elfcpp::Tag_ABI_FP_denormal:
7154 case elfcpp::Tag_ABI_PCS_GOT_use:
7155 {
7156 // These tags have 0 = don't care, 1 = strong requirement,
7157 // 2 = weak requirement.
7158 static const int order_021[3] = {0, 2, 1};
7159
7160 // Use the "greatest" from the sequence 0, 2, 1, or the largest
7161 // value if greater than 2 (for future-proofing).
7162 if ((in_attr[i].int_value() > 2
7163 && in_attr[i].int_value() > out_attr[i].int_value())
7164 || (in_attr[i].int_value() <= 2
7165 && out_attr[i].int_value() <= 2
7166 && (order_021[in_attr[i].int_value()]
7167 > order_021[out_attr[i].int_value()])))
7168 out_attr[i].set_int_value(in_attr[i].int_value());
7169 }
7170 break;
7171
7172 case elfcpp::Tag_CPU_arch_profile:
7173 if (out_attr[i].int_value() != in_attr[i].int_value())
7174 {
7175 // 0 will merge with anything.
7176 // 'A' and 'S' merge to 'A'.
7177 // 'R' and 'S' merge to 'R'.
7178 // 'M' and 'A|R|S' is an error.
7179 if (out_attr[i].int_value() == 0
7180 || (out_attr[i].int_value() == 'S'
7181 && (in_attr[i].int_value() == 'A'
7182 || in_attr[i].int_value() == 'R')))
7183 out_attr[i].set_int_value(in_attr[i].int_value());
7184 else if (in_attr[i].int_value() == 0
7185 || (in_attr[i].int_value() == 'S'
7186 && (out_attr[i].int_value() == 'A'
7187 || out_attr[i].int_value() == 'R')))
7188 ; // Do nothing.
7189 else
7190 {
7191 gold_error
7192 (_("conflicting architecture profiles %c/%c"),
7193 in_attr[i].int_value() ? in_attr[i].int_value() : '0',
7194 out_attr[i].int_value() ? out_attr[i].int_value() : '0');
7195 }
7196 }
7197 break;
7198 case elfcpp::Tag_VFP_arch:
7199 {
7200 static const struct
7201 {
7202 int ver;
7203 int regs;
7204 } vfp_versions[7] =
7205 {
7206 {0, 0},
7207 {1, 16},
7208 {2, 16},
7209 {3, 32},
7210 {3, 16},
7211 {4, 32},
7212 {4, 16}
7213 };
7214
7215 // Values greater than 6 aren't defined, so just pick the
7216 // biggest.
7217 if (in_attr[i].int_value() > 6
7218 && in_attr[i].int_value() > out_attr[i].int_value())
7219 {
7220 *out_attr = *in_attr;
7221 break;
7222 }
7223 // The output uses the superset of input features
7224 // (ISA version) and registers.
7225 int ver = std::max(vfp_versions[in_attr[i].int_value()].ver,
7226 vfp_versions[out_attr[i].int_value()].ver);
7227 int regs = std::max(vfp_versions[in_attr[i].int_value()].regs,
7228 vfp_versions[out_attr[i].int_value()].regs);
7229 // This assumes all possible supersets are also a valid
7230 // options.
7231 int newval;
7232 for (newval = 6; newval > 0; newval--)
7233 {
7234 if (regs == vfp_versions[newval].regs
7235 && ver == vfp_versions[newval].ver)
7236 break;
7237 }
7238 out_attr[i].set_int_value(newval);
7239 }
7240 break;
7241 case elfcpp::Tag_PCS_config:
7242 if (out_attr[i].int_value() == 0)
7243 out_attr[i].set_int_value(in_attr[i].int_value());
7244 else if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
7245 {
7246 // It's sometimes ok to mix different configs, so this is only
7247 // a warning.
7248 gold_warning(_("%s: conflicting platform configuration"), name);
7249 }
7250 break;
7251 case elfcpp::Tag_ABI_PCS_R9_use:
7252 if (in_attr[i].int_value() != out_attr[i].int_value()
7253 && out_attr[i].int_value() != elfcpp::AEABI_R9_unused
7254 && in_attr[i].int_value() != elfcpp::AEABI_R9_unused)
7255 {
7256 gold_error(_("%s: conflicting use of R9"), name);
7257 }
7258 if (out_attr[i].int_value() == elfcpp::AEABI_R9_unused)
7259 out_attr[i].set_int_value(in_attr[i].int_value());
7260 break;
7261 case elfcpp::Tag_ABI_PCS_RW_data:
7262 if (in_attr[i].int_value() == elfcpp::AEABI_PCS_RW_data_SBrel
7263 && (in_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
7264 != elfcpp::AEABI_R9_SB)
7265 && (out_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
7266 != elfcpp::AEABI_R9_unused))
7267 {
7268 gold_error(_("%s: SB relative addressing conflicts with use "
7269 "of R9"),
7270 name);
7271 }
7272 // Use the smallest value specified.
7273 if (in_attr[i].int_value() < out_attr[i].int_value())
7274 out_attr[i].set_int_value(in_attr[i].int_value());
7275 break;
7276 case elfcpp::Tag_ABI_PCS_wchar_t:
7277 // FIXME: Make it possible to turn off this warning.
7278 if (out_attr[i].int_value()
7279 && in_attr[i].int_value()
7280 && out_attr[i].int_value() != in_attr[i].int_value())
7281 {
7282 gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
7283 "use %u-byte wchar_t; use of wchar_t values "
7284 "across objects may fail"),
7285 name, in_attr[i].int_value(),
7286 out_attr[i].int_value());
7287 }
7288 else if (in_attr[i].int_value() && !out_attr[i].int_value())
7289 out_attr[i].set_int_value(in_attr[i].int_value());
7290 break;
7291 case elfcpp::Tag_ABI_enum_size:
7292 if (in_attr[i].int_value() != elfcpp::AEABI_enum_unused)
7293 {
7294 if (out_attr[i].int_value() == elfcpp::AEABI_enum_unused
7295 || out_attr[i].int_value() == elfcpp::AEABI_enum_forced_wide)
7296 {
7297 // The existing object is compatible with anything.
7298 // Use whatever requirements the new object has.
7299 out_attr[i].set_int_value(in_attr[i].int_value());
7300 }
7301 // FIXME: Make it possible to turn off this warning.
7302 else if (in_attr[i].int_value() != elfcpp::AEABI_enum_forced_wide
7303 && out_attr[i].int_value() != in_attr[i].int_value())
7304 {
7305 unsigned int in_value = in_attr[i].int_value();
7306 unsigned int out_value = out_attr[i].int_value();
7307 gold_warning(_("%s uses %s enums yet the output is to use "
7308 "%s enums; use of enum values across objects "
7309 "may fail"),
7310 name,
7311 this->aeabi_enum_name(in_value).c_str(),
7312 this->aeabi_enum_name(out_value).c_str());
7313 }
7314 }
7315 break;
7316 case elfcpp::Tag_ABI_VFP_args:
7317 // Aready done.
7318 break;
7319 case elfcpp::Tag_ABI_WMMX_args:
7320 if (in_attr[i].int_value() != out_attr[i].int_value())
7321 {
7322 gold_error(_("%s uses iWMMXt register arguments, output does "
7323 "not"),
7324 name);
7325 }
7326 break;
7327 case Object_attribute::Tag_compatibility:
7328 // Merged in target-independent code.
7329 break;
7330 case elfcpp::Tag_ABI_HardFP_use:
7331 // 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP).
7332 if ((in_attr[i].int_value() == 1 && out_attr[i].int_value() == 2)
7333 || (in_attr[i].int_value() == 2 && out_attr[i].int_value() == 1))
7334 out_attr[i].set_int_value(3);
7335 else if (in_attr[i].int_value() > out_attr[i].int_value())
7336 out_attr[i].set_int_value(in_attr[i].int_value());
7337 break;
7338 case elfcpp::Tag_ABI_FP_16bit_format:
7339 if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
7340 {
7341 if (in_attr[i].int_value() != out_attr[i].int_value())
7342 gold_error(_("fp16 format mismatch between %s and output"),
7343 name);
7344 }
7345 if (in_attr[i].int_value() != 0)
7346 out_attr[i].set_int_value(in_attr[i].int_value());
7347 break;
7348
7349 case elfcpp::Tag_nodefaults:
7350 // This tag is set if it exists, but the value is unused (and is
7351 // typically zero). We don't actually need to do anything here -
7352 // the merge happens automatically when the type flags are merged
7353 // below.
7354 break;
7355 case elfcpp::Tag_also_compatible_with:
7356 // Already done in Tag_CPU_arch.
7357 break;
7358 case elfcpp::Tag_conformance:
7359 // Keep the attribute if it matches. Throw it away otherwise.
7360 // No attribute means no claim to conform.
7361 if (in_attr[i].string_value() != out_attr[i].string_value())
7362 out_attr[i].set_string_value("");
7363 break;
7364
7365 default:
7366 {
7367 const char* err_object = NULL;
7368
7369 // The "known_obj_attributes" table does contain some undefined
7370 // attributes. Ensure that there are unused.
7371 if (out_attr[i].int_value() != 0
7372 || out_attr[i].string_value() != "")
7373 err_object = "output";
7374 else if (in_attr[i].int_value() != 0
7375 || in_attr[i].string_value() != "")
7376 err_object = name;
7377
7378 if (err_object != NULL)
7379 {
7380 // Attribute numbers >=64 (mod 128) can be safely ignored.
7381 if ((i & 127) < 64)
7382 gold_error(_("%s: unknown mandatory EABI object attribute "
7383 "%d"),
7384 err_object, i);
7385 else
7386 gold_warning(_("%s: unknown EABI object attribute %d"),
7387 err_object, i);
7388 }
7389
7390 // Only pass on attributes that match in both inputs.
7391 if (!in_attr[i].matches(out_attr[i]))
7392 {
7393 out_attr[i].set_int_value(0);
7394 out_attr[i].set_string_value("");
7395 }
7396 }
7397 }
7398
7399 // If out_attr was copied from in_attr then it won't have a type yet.
7400 if (in_attr[i].type() && !out_attr[i].type())
7401 out_attr[i].set_type(in_attr[i].type());
7402 }
7403
7404 // Merge Tag_compatibility attributes and any common GNU ones.
7405 this->attributes_section_data_->merge(name, pasd);
7406
7407 // Check for any attributes not known on ARM.
7408 typedef Vendor_object_attributes::Other_attributes Other_attributes;
7409 const Other_attributes* in_other_attributes = pasd->other_attributes(vendor);
7410 Other_attributes::const_iterator in_iter = in_other_attributes->begin();
7411 Other_attributes* out_other_attributes =
7412 this->attributes_section_data_->other_attributes(vendor);
7413 Other_attributes::iterator out_iter = out_other_attributes->begin();
7414
7415 while (in_iter != in_other_attributes->end()
7416 || out_iter != out_other_attributes->end())
7417 {
7418 const char* err_object = NULL;
7419 int err_tag = 0;
7420
7421 // The tags for each list are in numerical order.
7422 // If the tags are equal, then merge.
7423 if (out_iter != out_other_attributes->end()
7424 && (in_iter == in_other_attributes->end()
7425 || in_iter->first > out_iter->first))
7426 {
7427 // This attribute only exists in output. We can't merge, and we
7428 // don't know what the tag means, so delete it.
7429 err_object = "output";
7430 err_tag = out_iter->first;
7431 int saved_tag = out_iter->first;
7432 delete out_iter->second;
7433 out_other_attributes->erase(out_iter);
7434 out_iter = out_other_attributes->upper_bound(saved_tag);
7435 }
7436 else if (in_iter != in_other_attributes->end()
7437 && (out_iter != out_other_attributes->end()
7438 || in_iter->first < out_iter->first))
7439 {
7440 // This attribute only exists in input. We can't merge, and we
7441 // don't know what the tag means, so ignore it.
7442 err_object = name;
7443 err_tag = in_iter->first;
7444 ++in_iter;
7445 }
7446 else // The tags are equal.
7447 {
7448 // As present, all attributes in the list are unknown, and
7449 // therefore can't be merged meaningfully.
7450 err_object = "output";
7451 err_tag = out_iter->first;
7452
7453 // Only pass on attributes that match in both inputs.
7454 if (!in_iter->second->matches(*(out_iter->second)))
7455 {
7456 // No match. Delete the attribute.
7457 int saved_tag = out_iter->first;
7458 delete out_iter->second;
7459 out_other_attributes->erase(out_iter);
7460 out_iter = out_other_attributes->upper_bound(saved_tag);
7461 }
7462 else
7463 {
7464 // Matched. Keep the attribute and move to the next.
7465 ++out_iter;
7466 ++in_iter;
7467 }
7468 }
7469
7470 if (err_object)
7471 {
7472 // Attribute numbers >=64 (mod 128) can be safely ignored. */
7473 if ((err_tag & 127) < 64)
7474 {
7475 gold_error(_("%s: unknown mandatory EABI object attribute %d"),
7476 err_object, err_tag);
7477 }
7478 else
7479 {
7480 gold_warning(_("%s: unknown EABI object attribute %d"),
7481 err_object, err_tag);
7482 }
7483 }
7484 }
7485 }
7486
7487 // Return whether a relocation type used the LSB to distinguish THUMB
7488 // addresses.
7489 template<bool big_endian>
7490 bool
7491 Target_arm<big_endian>::reloc_uses_thumb_bit(unsigned int r_type)
7492 {
7493 switch (r_type)
7494 {
7495 case elfcpp::R_ARM_PC24:
7496 case elfcpp::R_ARM_ABS32:
7497 case elfcpp::R_ARM_REL32:
7498 case elfcpp::R_ARM_SBREL32:
7499 case elfcpp::R_ARM_THM_CALL:
7500 case elfcpp::R_ARM_GLOB_DAT:
7501 case elfcpp::R_ARM_JUMP_SLOT:
7502 case elfcpp::R_ARM_GOTOFF32:
7503 case elfcpp::R_ARM_PLT32:
7504 case elfcpp::R_ARM_CALL:
7505 case elfcpp::R_ARM_JUMP24:
7506 case elfcpp::R_ARM_THM_JUMP24:
7507 case elfcpp::R_ARM_SBREL31:
7508 case elfcpp::R_ARM_PREL31:
7509 case elfcpp::R_ARM_MOVW_ABS_NC:
7510 case elfcpp::R_ARM_MOVW_PREL_NC:
7511 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
7512 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
7513 case elfcpp::R_ARM_THM_JUMP19:
7514 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
7515 case elfcpp::R_ARM_ALU_PC_G0_NC:
7516 case elfcpp::R_ARM_ALU_PC_G0:
7517 case elfcpp::R_ARM_ALU_PC_G1_NC:
7518 case elfcpp::R_ARM_ALU_PC_G1:
7519 case elfcpp::R_ARM_ALU_PC_G2:
7520 case elfcpp::R_ARM_ALU_SB_G0_NC:
7521 case elfcpp::R_ARM_ALU_SB_G0:
7522 case elfcpp::R_ARM_ALU_SB_G1_NC:
7523 case elfcpp::R_ARM_ALU_SB_G1:
7524 case elfcpp::R_ARM_ALU_SB_G2:
7525 case elfcpp::R_ARM_MOVW_BREL_NC:
7526 case elfcpp::R_ARM_MOVW_BREL:
7527 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
7528 case elfcpp::R_ARM_THM_MOVW_BREL:
7529 return true;
7530 default:
7531 return false;
7532 }
7533 }
7534
7535 // Stub-generation methods for Target_arm.
7536
7537 // Make a new Arm_input_section object.
7538
7539 template<bool big_endian>
7540 Arm_input_section<big_endian>*
7541 Target_arm<big_endian>::new_arm_input_section(
7542 Relobj* relobj,
7543 unsigned int shndx)
7544 {
7545 Section_id sid(relobj, shndx);
7546
7547 Arm_input_section<big_endian>* arm_input_section =
7548 new Arm_input_section<big_endian>(relobj, shndx);
7549 arm_input_section->init();
7550
7551 // Register new Arm_input_section in map for look-up.
7552 std::pair<typename Arm_input_section_map::iterator, bool> ins =
7553 this->arm_input_section_map_.insert(std::make_pair(sid, arm_input_section));
7554
7555 // Make sure that it we have not created another Arm_input_section
7556 // for this input section already.
7557 gold_assert(ins.second);
7558
7559 return arm_input_section;
7560 }
7561
7562 // Find the Arm_input_section object corresponding to the SHNDX-th input
7563 // section of RELOBJ.
7564
7565 template<bool big_endian>
7566 Arm_input_section<big_endian>*
7567 Target_arm<big_endian>::find_arm_input_section(
7568 Relobj* relobj,
7569 unsigned int shndx) const
7570 {
7571 Section_id sid(relobj, shndx);
7572 typename Arm_input_section_map::const_iterator p =
7573 this->arm_input_section_map_.find(sid);
7574 return (p != this->arm_input_section_map_.end()) ? p->second : NULL;
7575 }
7576
7577 // Make a new stub table.
7578
7579 template<bool big_endian>
7580 Stub_table<big_endian>*
7581 Target_arm<big_endian>::new_stub_table(Arm_input_section<big_endian>* owner)
7582 {
7583 Stub_table<big_endian>* stub_table =
7584 new Stub_table<big_endian>(owner);
7585 this->stub_tables_.push_back(stub_table);
7586
7587 stub_table->set_address(owner->address() + owner->data_size());
7588 stub_table->set_file_offset(owner->offset() + owner->data_size());
7589 stub_table->finalize_data_size();
7590
7591 return stub_table;
7592 }
7593
7594 // Scan a relocation for stub generation.
7595
7596 template<bool big_endian>
7597 void
7598 Target_arm<big_endian>::scan_reloc_for_stub(
7599 const Relocate_info<32, big_endian>* relinfo,
7600 unsigned int r_type,
7601 const Sized_symbol<32>* gsym,
7602 unsigned int r_sym,
7603 const Symbol_value<32>* psymval,
7604 elfcpp::Elf_types<32>::Elf_Swxword addend,
7605 Arm_address address)
7606 {
7607 typedef typename Target_arm<big_endian>::Relocate Relocate;
7608
7609 const Arm_relobj<big_endian>* arm_relobj =
7610 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
7611
7612 if (r_type == elfcpp::R_ARM_V4BX)
7613 {
7614 const uint32_t reg = (addend & 0xf);
7615 if (this->fix_v4bx() == 2 && reg < 0xf)
7616 {
7617 // Try looking up an existing stub from a stub table.
7618 Stub_table<big_endian>* stub_table =
7619 arm_relobj->stub_table(relinfo->data_shndx);
7620 gold_assert(stub_table != NULL);
7621
7622 if (stub_table->find_arm_v4bx_stub(reg) == NULL)
7623 {
7624 // create a new stub and add it to stub table.
7625 Arm_v4bx_stub* stub =
7626 this->stub_factory().make_arm_v4bx_stub(reg);
7627 gold_assert(stub != NULL);
7628 stub_table->add_arm_v4bx_stub(stub);
7629 }
7630 }
7631
7632 return;
7633 }
7634
7635 bool target_is_thumb;
7636 Symbol_value<32> symval;
7637 if (gsym != NULL)
7638 {
7639 // This is a global symbol. Determine if we use PLT and if the
7640 // final target is THUMB.
7641 if (gsym->use_plt_offset(Relocate::reloc_is_non_pic(r_type)))
7642 {
7643 // This uses a PLT, change the symbol value.
7644 symval.set_output_value(this->plt_section()->address()
7645 + gsym->plt_offset());
7646 psymval = &symval;
7647 target_is_thumb = false;
7648 }
7649 else if (gsym->is_undefined())
7650 // There is no need to generate a stub symbol is undefined.
7651 return;
7652 else
7653 {
7654 target_is_thumb =
7655 ((gsym->type() == elfcpp::STT_ARM_TFUNC)
7656 || (gsym->type() == elfcpp::STT_FUNC
7657 && !gsym->is_undefined()
7658 && ((psymval->value(arm_relobj, 0) & 1) != 0)));
7659 }
7660 }
7661 else
7662 {
7663 // This is a local symbol. Determine if the final target is THUMB.
7664 target_is_thumb = arm_relobj->local_symbol_is_thumb_function(r_sym);
7665 }
7666
7667 // Strip LSB if this points to a THUMB target.
7668 if (target_is_thumb
7669 && Target_arm<big_endian>::reloc_uses_thumb_bit(r_type)
7670 && ((psymval->value(arm_relobj, 0) & 1) != 0))
7671 {
7672 Arm_address stripped_value =
7673 psymval->value(arm_relobj, 0) & ~static_cast<Arm_address>(1);
7674 symval.set_output_value(stripped_value);
7675 psymval = &symval;
7676 }
7677
7678 // Get the symbol value.
7679 Symbol_value<32>::Value value = psymval->value(arm_relobj, 0);
7680
7681 // Owing to pipelining, the PC relative branches below actually skip
7682 // two instructions when the branch offset is 0.
7683 Arm_address destination;
7684 switch (r_type)
7685 {
7686 case elfcpp::R_ARM_CALL:
7687 case elfcpp::R_ARM_JUMP24:
7688 case elfcpp::R_ARM_PLT32:
7689 // ARM branches.
7690 destination = value + addend + 8;
7691 break;
7692 case elfcpp::R_ARM_THM_CALL:
7693 case elfcpp::R_ARM_THM_XPC22:
7694 case elfcpp::R_ARM_THM_JUMP24:
7695 case elfcpp::R_ARM_THM_JUMP19:
7696 // THUMB branches.
7697 destination = value + addend + 4;
7698 break;
7699 default:
7700 gold_unreachable();
7701 }
7702
7703 Reloc_stub* stub = NULL;
7704 Stub_type stub_type =
7705 Reloc_stub::stub_type_for_reloc(r_type, address, destination,
7706 target_is_thumb);
7707 if (stub_type != arm_stub_none)
7708 {
7709 // Try looking up an existing stub from a stub table.
7710 Stub_table<big_endian>* stub_table =
7711 arm_relobj->stub_table(relinfo->data_shndx);
7712 gold_assert(stub_table != NULL);
7713
7714 // Locate stub by destination.
7715 Reloc_stub::Key stub_key(stub_type, gsym, arm_relobj, r_sym, addend);
7716
7717 // Create a stub if there is not one already
7718 stub = stub_table->find_reloc_stub(stub_key);
7719 if (stub == NULL)
7720 {
7721 // create a new stub and add it to stub table.
7722 stub = this->stub_factory().make_reloc_stub(stub_type);
7723 stub_table->add_reloc_stub(stub, stub_key);
7724 }
7725
7726 // Record the destination address.
7727 stub->set_destination_address(destination
7728 | (target_is_thumb ? 1 : 0));
7729 }
7730
7731 // For Cortex-A8, we need to record a relocation at 4K page boundary.
7732 if (this->fix_cortex_a8_
7733 && (r_type == elfcpp::R_ARM_THM_JUMP24
7734 || r_type == elfcpp::R_ARM_THM_JUMP19
7735 || r_type == elfcpp::R_ARM_THM_CALL
7736 || r_type == elfcpp::R_ARM_THM_XPC22)
7737 && (address & 0xfffU) == 0xffeU)
7738 {
7739 // Found a candidate. Note we haven't checked the destination is
7740 // within 4K here: if we do so (and don't create a record) we can't
7741 // tell that a branch should have been relocated when scanning later.
7742 this->cortex_a8_relocs_info_[address] =
7743 new Cortex_a8_reloc(stub, r_type,
7744 destination | (target_is_thumb ? 1 : 0));
7745 }
7746 }
7747
7748 // This function scans a relocation sections for stub generation.
7749 // The template parameter Relocate must be a class type which provides
7750 // a single function, relocate(), which implements the machine
7751 // specific part of a relocation.
7752
7753 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
7754 // SHT_REL or SHT_RELA.
7755
7756 // PRELOCS points to the relocation data. RELOC_COUNT is the number
7757 // of relocs. OUTPUT_SECTION is the output section.
7758 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
7759 // mapped to output offsets.
7760
7761 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
7762 // VIEW_SIZE is the size. These refer to the input section, unless
7763 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
7764 // the output section.
7765
7766 template<bool big_endian>
7767 template<int sh_type>
7768 void inline
7769 Target_arm<big_endian>::scan_reloc_section_for_stubs(
7770 const Relocate_info<32, big_endian>* relinfo,
7771 const unsigned char* prelocs,
7772 size_t reloc_count,
7773 Output_section* output_section,
7774 bool needs_special_offset_handling,
7775 const unsigned char* view,
7776 elfcpp::Elf_types<32>::Elf_Addr view_address,
7777 section_size_type)
7778 {
7779 typedef typename Reloc_types<sh_type, 32, big_endian>::Reloc Reltype;
7780 const int reloc_size =
7781 Reloc_types<sh_type, 32, big_endian>::reloc_size;
7782
7783 Arm_relobj<big_endian>* arm_object =
7784 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
7785 unsigned int local_count = arm_object->local_symbol_count();
7786
7787 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
7788
7789 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
7790 {
7791 Reltype reloc(prelocs);
7792
7793 typename elfcpp::Elf_types<32>::Elf_WXword r_info = reloc.get_r_info();
7794 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
7795 unsigned int r_type = elfcpp::elf_r_type<32>(r_info);
7796
7797 r_type = this->get_real_reloc_type(r_type);
7798
7799 // Only a few relocation types need stubs.
7800 if ((r_type != elfcpp::R_ARM_CALL)
7801 && (r_type != elfcpp::R_ARM_JUMP24)
7802 && (r_type != elfcpp::R_ARM_PLT32)
7803 && (r_type != elfcpp::R_ARM_THM_CALL)
7804 && (r_type != elfcpp::R_ARM_THM_XPC22)
7805 && (r_type != elfcpp::R_ARM_THM_JUMP24)
7806 && (r_type != elfcpp::R_ARM_THM_JUMP19)
7807 && (r_type != elfcpp::R_ARM_V4BX))
7808 continue;
7809
7810 section_offset_type offset =
7811 convert_to_section_size_type(reloc.get_r_offset());
7812
7813 if (needs_special_offset_handling)
7814 {
7815 offset = output_section->output_offset(relinfo->object,
7816 relinfo->data_shndx,
7817 offset);
7818 if (offset == -1)
7819 continue;
7820 }
7821
7822 if (r_type == elfcpp::R_ARM_V4BX)
7823 {
7824 // Get the BX instruction.
7825 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
7826 const Valtype* wv = reinterpret_cast<const Valtype*>(view + offset);
7827 elfcpp::Elf_types<32>::Elf_Swxword insn =
7828 elfcpp::Swap<32, big_endian>::readval(wv);
7829 this->scan_reloc_for_stub(relinfo, r_type, NULL, 0, NULL,
7830 insn, NULL);
7831 continue;
7832 }
7833
7834 // Get the addend.
7835 Stub_addend_reader<sh_type, big_endian> stub_addend_reader;
7836 elfcpp::Elf_types<32>::Elf_Swxword addend =
7837 stub_addend_reader(r_type, view + offset, reloc);
7838
7839 const Sized_symbol<32>* sym;
7840
7841 Symbol_value<32> symval;
7842 const Symbol_value<32> *psymval;
7843 if (r_sym < local_count)
7844 {
7845 sym = NULL;
7846 psymval = arm_object->local_symbol(r_sym);
7847
7848 // If the local symbol belongs to a section we are discarding,
7849 // and that section is a debug section, try to find the
7850 // corresponding kept section and map this symbol to its
7851 // counterpart in the kept section. The symbol must not
7852 // correspond to a section we are folding.
7853 bool is_ordinary;
7854 unsigned int shndx = psymval->input_shndx(&is_ordinary);
7855 if (is_ordinary
7856 && shndx != elfcpp::SHN_UNDEF
7857 && !arm_object->is_section_included(shndx)
7858 && !(relinfo->symtab->is_section_folded(arm_object, shndx)))
7859 {
7860 if (comdat_behavior == CB_UNDETERMINED)
7861 {
7862 std::string name =
7863 arm_object->section_name(relinfo->data_shndx);
7864 comdat_behavior = get_comdat_behavior(name.c_str());
7865 }
7866 if (comdat_behavior == CB_PRETEND)
7867 {
7868 bool found;
7869 typename elfcpp::Elf_types<32>::Elf_Addr value =
7870 arm_object->map_to_kept_section(shndx, &found);
7871 if (found)
7872 symval.set_output_value(value + psymval->input_value());
7873 else
7874 symval.set_output_value(0);
7875 }
7876 else
7877 {
7878 symval.set_output_value(0);
7879 }
7880 symval.set_no_output_symtab_entry();
7881 psymval = &symval;
7882 }
7883 }
7884 else
7885 {
7886 const Symbol* gsym = arm_object->global_symbol(r_sym);
7887 gold_assert(gsym != NULL);
7888 if (gsym->is_forwarder())
7889 gsym = relinfo->symtab->resolve_forwards(gsym);
7890
7891 sym = static_cast<const Sized_symbol<32>*>(gsym);
7892 if (sym->has_symtab_index())
7893 symval.set_output_symtab_index(sym->symtab_index());
7894 else
7895 symval.set_no_output_symtab_entry();
7896
7897 // We need to compute the would-be final value of this global
7898 // symbol.
7899 const Symbol_table* symtab = relinfo->symtab;
7900 const Sized_symbol<32>* sized_symbol =
7901 symtab->get_sized_symbol<32>(gsym);
7902 Symbol_table::Compute_final_value_status status;
7903 Arm_address value =
7904 symtab->compute_final_value<32>(sized_symbol, &status);
7905
7906 // Skip this if the symbol has not output section.
7907 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
7908 continue;
7909
7910 symval.set_output_value(value);
7911 psymval = &symval;
7912 }
7913
7914 // If symbol is a section symbol, we don't know the actual type of
7915 // destination. Give up.
7916 if (psymval->is_section_symbol())
7917 continue;
7918
7919 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
7920 addend, view_address + offset);
7921 }
7922 }
7923
7924 // Scan an input section for stub generation.
7925
7926 template<bool big_endian>
7927 void
7928 Target_arm<big_endian>::scan_section_for_stubs(
7929 const Relocate_info<32, big_endian>* relinfo,
7930 unsigned int sh_type,
7931 const unsigned char* prelocs,
7932 size_t reloc_count,
7933 Output_section* output_section,
7934 bool needs_special_offset_handling,
7935 const unsigned char* view,
7936 Arm_address view_address,
7937 section_size_type view_size)
7938 {
7939 if (sh_type == elfcpp::SHT_REL)
7940 this->scan_reloc_section_for_stubs<elfcpp::SHT_REL>(
7941 relinfo,
7942 prelocs,
7943 reloc_count,
7944 output_section,
7945 needs_special_offset_handling,
7946 view,
7947 view_address,
7948 view_size);
7949 else if (sh_type == elfcpp::SHT_RELA)
7950 // We do not support RELA type relocations yet. This is provided for
7951 // completeness.
7952 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
7953 relinfo,
7954 prelocs,
7955 reloc_count,
7956 output_section,
7957 needs_special_offset_handling,
7958 view,
7959 view_address,
7960 view_size);
7961 else
7962 gold_unreachable();
7963 }
7964
7965 // Group input sections for stub generation.
7966 //
7967 // We goup input sections in an output sections so that the total size,
7968 // including any padding space due to alignment is smaller than GROUP_SIZE
7969 // unless the only input section in group is bigger than GROUP_SIZE already.
7970 // Then an ARM stub table is created to follow the last input section
7971 // in group. For each group an ARM stub table is created an is placed
7972 // after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
7973 // extend the group after the stub table.
7974
7975 template<bool big_endian>
7976 void
7977 Target_arm<big_endian>::group_sections(
7978 Layout* layout,
7979 section_size_type group_size,
7980 bool stubs_always_after_branch)
7981 {
7982 // Group input sections and insert stub table
7983 Layout::Section_list section_list;
7984 layout->get_allocated_sections(&section_list);
7985 for (Layout::Section_list::const_iterator p = section_list.begin();
7986 p != section_list.end();
7987 ++p)
7988 {
7989 Arm_output_section<big_endian>* output_section =
7990 Arm_output_section<big_endian>::as_arm_output_section(*p);
7991 output_section->group_sections(group_size, stubs_always_after_branch,
7992 this);
7993 }
7994 }
7995
7996 // Relaxation hook. This is where we do stub generation.
7997
7998 template<bool big_endian>
7999 bool
8000 Target_arm<big_endian>::do_relax(
8001 int pass,
8002 const Input_objects* input_objects,
8003 Symbol_table* symtab,
8004 Layout* layout)
8005 {
8006 // No need to generate stubs if this is a relocatable link.
8007 gold_assert(!parameters->options().relocatable());
8008
8009 // If this is the first pass, we need to group input sections into
8010 // stub groups.
8011 if (pass == 1)
8012 {
8013 // Determine the stub group size. The group size is the absolute
8014 // value of the parameter --stub-group-size. If --stub-group-size
8015 // is passed a negative value, we restict stubs to be always after
8016 // the stubbed branches.
8017 int32_t stub_group_size_param =
8018 parameters->options().stub_group_size();
8019 bool stubs_always_after_branch = stub_group_size_param < 0;
8020 section_size_type stub_group_size = abs(stub_group_size_param);
8021
8022 // The Cortex-A8 erratum fix depends on stubs not being in the same 4K
8023 // page as the first half of a 32-bit branch straddling two 4K pages.
8024 // This is a crude way of enforcing that.
8025 if (this->fix_cortex_a8_)
8026 stubs_always_after_branch = true;
8027
8028 if (stub_group_size == 1)
8029 {
8030 // Default value.
8031 // Thumb branch range is +-4MB has to be used as the default
8032 // maximum size (a given section can contain both ARM and Thumb
8033 // code, so the worst case has to be taken into account).
8034 //
8035 // This value is 24K less than that, which allows for 2025
8036 // 12-byte stubs. If we exceed that, then we will fail to link.
8037 // The user will have to relink with an explicit group size
8038 // option.
8039 stub_group_size = 4170000;
8040 }
8041
8042 group_sections(layout, stub_group_size, stubs_always_after_branch);
8043 }
8044
8045 // The Cortex-A8 stubs are sensitive to layout of code sections. At the
8046 // beginning of each relaxation pass, just blow away all the stubs.
8047 // Alternatively, we could selectively remove only the stubs and reloc
8048 // information for code sections that have moved since the last pass.
8049 // That would require more book-keeping.
8050 typedef typename Stub_table_list::iterator Stub_table_iterator;
8051 if (this->fix_cortex_a8_)
8052 {
8053 // Clear all Cortex-A8 reloc information.
8054 for (typename Cortex_a8_relocs_info::const_iterator p =
8055 this->cortex_a8_relocs_info_.begin();
8056 p != this->cortex_a8_relocs_info_.end();
8057 ++p)
8058 delete p->second;
8059 this->cortex_a8_relocs_info_.clear();
8060
8061 // Remove all Cortex-A8 stubs.
8062 for (Stub_table_iterator sp = this->stub_tables_.begin();
8063 sp != this->stub_tables_.end();
8064 ++sp)
8065 (*sp)->remove_all_cortex_a8_stubs();
8066 }
8067
8068 // Scan relocs for relocation stubs
8069 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
8070 op != input_objects->relobj_end();
8071 ++op)
8072 {
8073 Arm_relobj<big_endian>* arm_relobj =
8074 Arm_relobj<big_endian>::as_arm_relobj(*op);
8075 arm_relobj->scan_sections_for_stubs(this, symtab, layout);
8076 }
8077
8078 // Check all stub tables to see if any of them have their data sizes
8079 // or addresses alignments changed. These are the only things that
8080 // matter.
8081 bool any_stub_table_changed = false;
8082 for (Stub_table_iterator sp = this->stub_tables_.begin();
8083 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
8084 ++sp)
8085 {
8086 if ((*sp)->update_data_size_and_addralign())
8087 any_stub_table_changed = true;
8088 }
8089
8090 // Finalize the stubs in the last relaxation pass.
8091 if (!any_stub_table_changed)
8092 for (Stub_table_iterator sp = this->stub_tables_.begin();
8093 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
8094 ++sp)
8095 (*sp)->finalize_stubs();
8096
8097 return any_stub_table_changed;
8098 }
8099
8100 // Relocate a stub.
8101
8102 template<bool big_endian>
8103 void
8104 Target_arm<big_endian>::relocate_stub(
8105 Stub* stub,
8106 const Relocate_info<32, big_endian>* relinfo,
8107 Output_section* output_section,
8108 unsigned char* view,
8109 Arm_address address,
8110 section_size_type view_size)
8111 {
8112 Relocate relocate;
8113 const Stub_template* stub_template = stub->stub_template();
8114 for (size_t i = 0; i < stub_template->reloc_count(); i++)
8115 {
8116 size_t reloc_insn_index = stub_template->reloc_insn_index(i);
8117 const Insn_template* insn = &stub_template->insns()[reloc_insn_index];
8118
8119 unsigned int r_type = insn->r_type();
8120 section_size_type reloc_offset = stub_template->reloc_offset(i);
8121 section_size_type reloc_size = insn->size();
8122 gold_assert(reloc_offset + reloc_size <= view_size);
8123
8124 // This is the address of the stub destination.
8125 Arm_address target = stub->reloc_target(i) + insn->reloc_addend();
8126 Symbol_value<32> symval;
8127 symval.set_output_value(target);
8128
8129 // Synthesize a fake reloc just in case. We don't have a symbol so
8130 // we use 0.
8131 unsigned char reloc_buffer[elfcpp::Elf_sizes<32>::rel_size];
8132 memset(reloc_buffer, 0, sizeof(reloc_buffer));
8133 elfcpp::Rel_write<32, big_endian> reloc_write(reloc_buffer);
8134 reloc_write.put_r_offset(reloc_offset);
8135 reloc_write.put_r_info(elfcpp::elf_r_info<32>(0, r_type));
8136 elfcpp::Rel<32, big_endian> rel(reloc_buffer);
8137
8138 relocate.relocate(relinfo, this, output_section,
8139 this->fake_relnum_for_stubs, rel, r_type,
8140 NULL, &symval, view + reloc_offset,
8141 address + reloc_offset, reloc_size);
8142 }
8143 }
8144
8145 // Determine whether an object attribute tag takes an integer, a
8146 // string or both.
8147
8148 template<bool big_endian>
8149 int
8150 Target_arm<big_endian>::do_attribute_arg_type(int tag) const
8151 {
8152 if (tag == Object_attribute::Tag_compatibility)
8153 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
8154 | Object_attribute::ATTR_TYPE_FLAG_STR_VAL);
8155 else if (tag == elfcpp::Tag_nodefaults)
8156 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
8157 | Object_attribute::ATTR_TYPE_FLAG_NO_DEFAULT);
8158 else if (tag == elfcpp::Tag_CPU_raw_name || tag == elfcpp::Tag_CPU_name)
8159 return Object_attribute::ATTR_TYPE_FLAG_STR_VAL;
8160 else if (tag < 32)
8161 return Object_attribute::ATTR_TYPE_FLAG_INT_VAL;
8162 else
8163 return ((tag & 1) != 0
8164 ? Object_attribute::ATTR_TYPE_FLAG_STR_VAL
8165 : Object_attribute::ATTR_TYPE_FLAG_INT_VAL);
8166 }
8167
8168 // Reorder attributes.
8169 //
8170 // The ABI defines that Tag_conformance should be emitted first, and that
8171 // Tag_nodefaults should be second (if either is defined). This sets those
8172 // two positions, and bumps up the position of all the remaining tags to
8173 // compensate.
8174
8175 template<bool big_endian>
8176 int
8177 Target_arm<big_endian>::do_attributes_order(int num) const
8178 {
8179 // Reorder the known object attributes in output. We want to move
8180 // Tag_conformance to position 4 and Tag_conformance to position 5
8181 // and shift eveything between 4 .. Tag_conformance - 1 to make room.
8182 if (num == 4)
8183 return elfcpp::Tag_conformance;
8184 if (num == 5)
8185 return elfcpp::Tag_nodefaults;
8186 if ((num - 2) < elfcpp::Tag_nodefaults)
8187 return num - 2;
8188 if ((num - 1) < elfcpp::Tag_conformance)
8189 return num - 1;
8190 return num;
8191 }
8192
8193 // Scan a span of THUMB code for Cortex-A8 erratum.
8194
8195 template<bool big_endian>
8196 void
8197 Target_arm<big_endian>::scan_span_for_cortex_a8_erratum(
8198 Arm_relobj<big_endian>* arm_relobj,
8199 unsigned int shndx,
8200 section_size_type span_start,
8201 section_size_type span_end,
8202 const unsigned char* view,
8203 Arm_address address)
8204 {
8205 // Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
8206 //
8207 // The opcode is BLX.W, BL.W, B.W, Bcc.W
8208 // The branch target is in the same 4KB region as the
8209 // first half of the branch.
8210 // The instruction before the branch is a 32-bit
8211 // length non-branch instruction.
8212 section_size_type i = span_start;
8213 bool last_was_32bit = false;
8214 bool last_was_branch = false;
8215 while (i < span_end)
8216 {
8217 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
8218 const Valtype* wv = reinterpret_cast<const Valtype*>(view + i);
8219 uint32_t insn = elfcpp::Swap<16, big_endian>::readval(wv);
8220 bool is_blx = false, is_b = false;
8221 bool is_bl = false, is_bcc = false;
8222
8223 bool insn_32bit = (insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000;
8224 if (insn_32bit)
8225 {
8226 // Load the rest of the insn (in manual-friendly order).
8227 insn = (insn << 16) | elfcpp::Swap<16, big_endian>::readval(wv + 1);
8228
8229 // Encoding T4: B<c>.W.
8230 is_b = (insn & 0xf800d000U) == 0xf0009000U;
8231 // Encoding T1: BL<c>.W.
8232 is_bl = (insn & 0xf800d000U) == 0xf000d000U;
8233 // Encoding T2: BLX<c>.W.
8234 is_blx = (insn & 0xf800d000U) == 0xf000c000U;
8235 // Encoding T3: B<c>.W (not permitted in IT block).
8236 is_bcc = ((insn & 0xf800d000U) == 0xf0008000U
8237 && (insn & 0x07f00000U) != 0x03800000U);
8238 }
8239
8240 bool is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
8241
8242 // If this instruction is a 32-bit THUMB branch that crosses a 4K
8243 // page boundary and it follows 32-bit non-branch instruction,
8244 // we need to work around.
8245 if (is_32bit_branch
8246 && ((address + i) & 0xfffU) == 0xffeU
8247 && last_was_32bit
8248 && !last_was_branch)
8249 {
8250 // Check to see if there is a relocation stub for this branch.
8251 bool force_target_arm = false;
8252 bool force_target_thumb = false;
8253 const Cortex_a8_reloc* cortex_a8_reloc = NULL;
8254 Cortex_a8_relocs_info::const_iterator p =
8255 this->cortex_a8_relocs_info_.find(address + i);
8256
8257 if (p != this->cortex_a8_relocs_info_.end())
8258 {
8259 cortex_a8_reloc = p->second;
8260 bool target_is_thumb = (cortex_a8_reloc->destination() & 1) != 0;
8261
8262 if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
8263 && !target_is_thumb)
8264 force_target_arm = true;
8265 else if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
8266 && target_is_thumb)
8267 force_target_thumb = true;
8268 }
8269
8270 off_t offset;
8271 Stub_type stub_type = arm_stub_none;
8272
8273 // Check if we have an offending branch instruction.
8274 uint16_t upper_insn = (insn >> 16) & 0xffffU;
8275 uint16_t lower_insn = insn & 0xffffU;
8276 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
8277
8278 if (cortex_a8_reloc != NULL
8279 && cortex_a8_reloc->reloc_stub() != NULL)
8280 // We've already made a stub for this instruction, e.g.
8281 // it's a long branch or a Thumb->ARM stub. Assume that
8282 // stub will suffice to work around the A8 erratum (see
8283 // setting of always_after_branch above).
8284 ;
8285 else if (is_bcc)
8286 {
8287 offset = RelocFuncs::thumb32_cond_branch_offset(upper_insn,
8288 lower_insn);
8289 stub_type = arm_stub_a8_veneer_b_cond;
8290 }
8291 else if (is_b || is_bl || is_blx)
8292 {
8293 offset = RelocFuncs::thumb32_branch_offset(upper_insn,
8294 lower_insn);
8295 if (is_blx)
8296 offset &= ~3;
8297
8298 stub_type = (is_blx
8299 ? arm_stub_a8_veneer_blx
8300 : (is_bl
8301 ? arm_stub_a8_veneer_bl
8302 : arm_stub_a8_veneer_b));
8303 }
8304
8305 if (stub_type != arm_stub_none)
8306 {
8307 Arm_address pc_for_insn = address + i + 4;
8308
8309 // The original instruction is a BL, but the target is
8310 // an ARM instruction. If we were not making a stub,
8311 // the BL would have been converted to a BLX. Use the
8312 // BLX stub instead in that case.
8313 if (this->may_use_blx() && force_target_arm
8314 && stub_type == arm_stub_a8_veneer_bl)
8315 {
8316 stub_type = arm_stub_a8_veneer_blx;
8317 is_blx = true;
8318 is_bl = false;
8319 }
8320 // Conversely, if the original instruction was
8321 // BLX but the target is Thumb mode, use the BL stub.
8322 else if (force_target_thumb
8323 && stub_type == arm_stub_a8_veneer_blx)
8324 {
8325 stub_type = arm_stub_a8_veneer_bl;
8326 is_blx = false;
8327 is_bl = true;
8328 }
8329
8330 if (is_blx)
8331 pc_for_insn &= ~3;
8332
8333 // If we found a relocation, use the proper destination,
8334 // not the offset in the (unrelocated) instruction.
8335 // Note this is always done if we switched the stub type above.
8336 if (cortex_a8_reloc != NULL)
8337 offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
8338
8339 Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
8340
8341 // Add a new stub if destination address in in the same page.
8342 if (((address + i) & ~0xfffU) == (target & ~0xfffU))
8343 {
8344 Cortex_a8_stub* stub =
8345 this->stub_factory_.make_cortex_a8_stub(stub_type,
8346 arm_relobj, shndx,
8347 address + i,
8348 target, insn);
8349 Stub_table<big_endian>* stub_table =
8350 arm_relobj->stub_table(shndx);
8351 gold_assert(stub_table != NULL);
8352 stub_table->add_cortex_a8_stub(address + i, stub);
8353 }
8354 }
8355 }
8356
8357 i += insn_32bit ? 4 : 2;
8358 last_was_32bit = insn_32bit;
8359 last_was_branch = is_32bit_branch;
8360 }
8361 }
8362
8363 // Apply the Cortex-A8 workaround.
8364
8365 template<bool big_endian>
8366 void
8367 Target_arm<big_endian>::apply_cortex_a8_workaround(
8368 const Cortex_a8_stub* stub,
8369 Arm_address stub_address,
8370 unsigned char* insn_view,
8371 Arm_address insn_address)
8372 {
8373 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
8374 Valtype* wv = reinterpret_cast<Valtype*>(insn_view);
8375 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
8376 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
8377 off_t branch_offset = stub_address - (insn_address + 4);
8378
8379 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
8380 switch (stub->stub_template()->type())
8381 {
8382 case arm_stub_a8_veneer_b_cond:
8383 gold_assert(!utils::has_overflow<21>(branch_offset));
8384 upper_insn = RelocFuncs::thumb32_cond_branch_upper(upper_insn,
8385 branch_offset);
8386 lower_insn = RelocFuncs::thumb32_cond_branch_lower(lower_insn,
8387 branch_offset);
8388 break;
8389
8390 case arm_stub_a8_veneer_b:
8391 case arm_stub_a8_veneer_bl:
8392 case arm_stub_a8_veneer_blx:
8393 if ((lower_insn & 0x5000U) == 0x4000U)
8394 // For a BLX instruction, make sure that the relocation is
8395 // rounded up to a word boundary. This follows the semantics of
8396 // the instruction which specifies that bit 1 of the target
8397 // address will come from bit 1 of the base address.
8398 branch_offset = (branch_offset + 2) & ~3;
8399
8400 // Put BRANCH_OFFSET back into the insn.
8401 gold_assert(!utils::has_overflow<25>(branch_offset));
8402 upper_insn = RelocFuncs::thumb32_branch_upper(upper_insn, branch_offset);
8403 lower_insn = RelocFuncs::thumb32_branch_lower(lower_insn, branch_offset);
8404 break;
8405
8406 default:
8407 gold_unreachable();
8408 }
8409
8410 // Put the relocated value back in the object file:
8411 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
8412 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
8413 }
8414
8415 template<bool big_endian>
8416 class Target_selector_arm : public Target_selector
8417 {
8418 public:
8419 Target_selector_arm()
8420 : Target_selector(elfcpp::EM_ARM, 32, big_endian,
8421 (big_endian ? "elf32-bigarm" : "elf32-littlearm"))
8422 { }
8423
8424 Target*
8425 do_instantiate_target()
8426 { return new Target_arm<big_endian>(); }
8427 };
8428
8429 Target_selector_arm<false> target_selector_arm;
8430 Target_selector_arm<true> target_selector_armbe;
8431
8432 } // End anonymous namespace.
This page took 0.333164 seconds and 4 git commands to generate.