2010-05-23 Doug Kwan <dougkwan@google.com>
[deliverable/binutils-gdb.git] / gold / arm.cc
1 // arm.cc -- arm target support for gold.
2
3 // Copyright 2009, 2010 Free Software Foundation, Inc.
4 // Written by Doug Kwan <dougkwan@google.com> based on the i386 code
5 // by Ian Lance Taylor <iant@google.com>.
6 // This file also contains borrowed and adapted code from
7 // bfd/elf32-arm.c.
8
9 // This file is part of gold.
10
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 3 of the License, or
14 // (at your option) any later version.
15
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
20
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 // MA 02110-1301, USA.
25
26 #include "gold.h"
27
28 #include <cstring>
29 #include <limits>
30 #include <cstdio>
31 #include <string>
32 #include <algorithm>
33 #include <map>
34 #include <utility>
35 #include <set>
36
37 #include "elfcpp.h"
38 #include "parameters.h"
39 #include "reloc.h"
40 #include "arm.h"
41 #include "object.h"
42 #include "symtab.h"
43 #include "layout.h"
44 #include "output.h"
45 #include "copy-relocs.h"
46 #include "target.h"
47 #include "target-reloc.h"
48 #include "target-select.h"
49 #include "tls.h"
50 #include "defstd.h"
51 #include "gc.h"
52 #include "attributes.h"
53 #include "arm-reloc-property.h"
54
55 namespace
56 {
57
58 using namespace gold;
59
60 template<bool big_endian>
61 class Output_data_plt_arm;
62
63 template<bool big_endian>
64 class Stub_table;
65
66 template<bool big_endian>
67 class Arm_input_section;
68
69 class Arm_exidx_cantunwind;
70
71 class Arm_exidx_merged_section;
72
73 class Arm_exidx_fixup;
74
75 template<bool big_endian>
76 class Arm_output_section;
77
78 class Arm_exidx_input_section;
79
80 template<bool big_endian>
81 class Arm_relobj;
82
83 template<bool big_endian>
84 class Arm_relocate_functions;
85
86 template<bool big_endian>
87 class Arm_output_data_got;
88
89 template<bool big_endian>
90 class Target_arm;
91
92 // For convenience.
93 typedef elfcpp::Elf_types<32>::Elf_Addr Arm_address;
94
95 // Maximum branch offsets for ARM, THUMB and THUMB2.
96 const int32_t ARM_MAX_FWD_BRANCH_OFFSET = ((((1 << 23) - 1) << 2) + 8);
97 const int32_t ARM_MAX_BWD_BRANCH_OFFSET = ((-((1 << 23) << 2)) + 8);
98 const int32_t THM_MAX_FWD_BRANCH_OFFSET = ((1 << 22) -2 + 4);
99 const int32_t THM_MAX_BWD_BRANCH_OFFSET = (-(1 << 22) + 4);
100 const int32_t THM2_MAX_FWD_BRANCH_OFFSET = (((1 << 24) - 2) + 4);
101 const int32_t THM2_MAX_BWD_BRANCH_OFFSET = (-(1 << 24) + 4);
102
103 // Thread Control Block size.
104 const size_t ARM_TCB_SIZE = 8;
105
106 // The arm target class.
107 //
108 // This is a very simple port of gold for ARM-EABI. It is intended for
109 // supporting Android only for the time being.
110 //
111 // TODOs:
112 // - Implement all static relocation types documented in arm-reloc.def.
113 // - Make PLTs more flexible for different architecture features like
114 // Thumb-2 and BE8.
115 // There are probably a lot more.
116
117 // Ideally we would like to avoid using global variables but this is used
118 // very in many places and sometimes in loops. If we use a function
119 // returning a static instance of Arm_reloc_property_table, it will very
120 // slow in an threaded environment since the static instance needs to be
121 // locked. The pointer is below initialized in the
122 // Target::do_select_as_default_target() hook so that we do not spend time
123 // building the table if we are not linking ARM objects.
124 //
125 // An alternative is to to process the information in arm-reloc.def in
126 // compilation time and generate a representation of it in PODs only. That
127 // way we can avoid initialization when the linker starts.
128
129 Arm_reloc_property_table *arm_reloc_property_table = NULL;
130
131 // Instruction template class. This class is similar to the insn_sequence
132 // struct in bfd/elf32-arm.c.
133
134 class Insn_template
135 {
136 public:
137 // Types of instruction templates.
138 enum Type
139 {
140 THUMB16_TYPE = 1,
141 // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
142 // templates with class-specific semantics. Currently this is used
143 // only by the Cortex_a8_stub class for handling condition codes in
144 // conditional branches.
145 THUMB16_SPECIAL_TYPE,
146 THUMB32_TYPE,
147 ARM_TYPE,
148 DATA_TYPE
149 };
150
151 // Factory methods to create instruction templates in different formats.
152
153 static const Insn_template
154 thumb16_insn(uint32_t data)
155 { return Insn_template(data, THUMB16_TYPE, elfcpp::R_ARM_NONE, 0); }
156
157 // A Thumb conditional branch, in which the proper condition is inserted
158 // when we build the stub.
159 static const Insn_template
160 thumb16_bcond_insn(uint32_t data)
161 { return Insn_template(data, THUMB16_SPECIAL_TYPE, elfcpp::R_ARM_NONE, 1); }
162
163 static const Insn_template
164 thumb32_insn(uint32_t data)
165 { return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_NONE, 0); }
166
167 static const Insn_template
168 thumb32_b_insn(uint32_t data, int reloc_addend)
169 {
170 return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_THM_JUMP24,
171 reloc_addend);
172 }
173
174 static const Insn_template
175 arm_insn(uint32_t data)
176 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_NONE, 0); }
177
178 static const Insn_template
179 arm_rel_insn(unsigned data, int reloc_addend)
180 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_JUMP24, reloc_addend); }
181
182 static const Insn_template
183 data_word(unsigned data, unsigned int r_type, int reloc_addend)
184 { return Insn_template(data, DATA_TYPE, r_type, reloc_addend); }
185
186 // Accessors. This class is used for read-only objects so no modifiers
187 // are provided.
188
189 uint32_t
190 data() const
191 { return this->data_; }
192
193 // Return the instruction sequence type of this.
194 Type
195 type() const
196 { return this->type_; }
197
198 // Return the ARM relocation type of this.
199 unsigned int
200 r_type() const
201 { return this->r_type_; }
202
203 int32_t
204 reloc_addend() const
205 { return this->reloc_addend_; }
206
207 // Return size of instruction template in bytes.
208 size_t
209 size() const;
210
211 // Return byte-alignment of instruction template.
212 unsigned
213 alignment() const;
214
215 private:
216 // We make the constructor private to ensure that only the factory
217 // methods are used.
218 inline
219 Insn_template(unsigned data, Type type, unsigned int r_type, int reloc_addend)
220 : data_(data), type_(type), r_type_(r_type), reloc_addend_(reloc_addend)
221 { }
222
223 // Instruction specific data. This is used to store information like
224 // some of the instruction bits.
225 uint32_t data_;
226 // Instruction template type.
227 Type type_;
228 // Relocation type if there is a relocation or R_ARM_NONE otherwise.
229 unsigned int r_type_;
230 // Relocation addend.
231 int32_t reloc_addend_;
232 };
233
234 // Macro for generating code to stub types. One entry per long/short
235 // branch stub
236
237 #define DEF_STUBS \
238 DEF_STUB(long_branch_any_any) \
239 DEF_STUB(long_branch_v4t_arm_thumb) \
240 DEF_STUB(long_branch_thumb_only) \
241 DEF_STUB(long_branch_v4t_thumb_thumb) \
242 DEF_STUB(long_branch_v4t_thumb_arm) \
243 DEF_STUB(short_branch_v4t_thumb_arm) \
244 DEF_STUB(long_branch_any_arm_pic) \
245 DEF_STUB(long_branch_any_thumb_pic) \
246 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
247 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
248 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
249 DEF_STUB(long_branch_thumb_only_pic) \
250 DEF_STUB(a8_veneer_b_cond) \
251 DEF_STUB(a8_veneer_b) \
252 DEF_STUB(a8_veneer_bl) \
253 DEF_STUB(a8_veneer_blx) \
254 DEF_STUB(v4_veneer_bx)
255
256 // Stub types.
257
258 #define DEF_STUB(x) arm_stub_##x,
259 typedef enum
260 {
261 arm_stub_none,
262 DEF_STUBS
263
264 // First reloc stub type.
265 arm_stub_reloc_first = arm_stub_long_branch_any_any,
266 // Last reloc stub type.
267 arm_stub_reloc_last = arm_stub_long_branch_thumb_only_pic,
268
269 // First Cortex-A8 stub type.
270 arm_stub_cortex_a8_first = arm_stub_a8_veneer_b_cond,
271 // Last Cortex-A8 stub type.
272 arm_stub_cortex_a8_last = arm_stub_a8_veneer_blx,
273
274 // Last stub type.
275 arm_stub_type_last = arm_stub_v4_veneer_bx
276 } Stub_type;
277 #undef DEF_STUB
278
279 // Stub template class. Templates are meant to be read-only objects.
280 // A stub template for a stub type contains all read-only attributes
281 // common to all stubs of the same type.
282
283 class Stub_template
284 {
285 public:
286 Stub_template(Stub_type, const Insn_template*, size_t);
287
288 ~Stub_template()
289 { }
290
291 // Return stub type.
292 Stub_type
293 type() const
294 { return this->type_; }
295
296 // Return an array of instruction templates.
297 const Insn_template*
298 insns() const
299 { return this->insns_; }
300
301 // Return size of template in number of instructions.
302 size_t
303 insn_count() const
304 { return this->insn_count_; }
305
306 // Return size of template in bytes.
307 size_t
308 size() const
309 { return this->size_; }
310
311 // Return alignment of the stub template.
312 unsigned
313 alignment() const
314 { return this->alignment_; }
315
316 // Return whether entry point is in thumb mode.
317 bool
318 entry_in_thumb_mode() const
319 { return this->entry_in_thumb_mode_; }
320
321 // Return number of relocations in this template.
322 size_t
323 reloc_count() const
324 { return this->relocs_.size(); }
325
326 // Return index of the I-th instruction with relocation.
327 size_t
328 reloc_insn_index(size_t i) const
329 {
330 gold_assert(i < this->relocs_.size());
331 return this->relocs_[i].first;
332 }
333
334 // Return the offset of the I-th instruction with relocation from the
335 // beginning of the stub.
336 section_size_type
337 reloc_offset(size_t i) const
338 {
339 gold_assert(i < this->relocs_.size());
340 return this->relocs_[i].second;
341 }
342
343 private:
344 // This contains information about an instruction template with a relocation
345 // and its offset from start of stub.
346 typedef std::pair<size_t, section_size_type> Reloc;
347
348 // A Stub_template may not be copied. We want to share templates as much
349 // as possible.
350 Stub_template(const Stub_template&);
351 Stub_template& operator=(const Stub_template&);
352
353 // Stub type.
354 Stub_type type_;
355 // Points to an array of Insn_templates.
356 const Insn_template* insns_;
357 // Number of Insn_templates in insns_[].
358 size_t insn_count_;
359 // Size of templated instructions in bytes.
360 size_t size_;
361 // Alignment of templated instructions.
362 unsigned alignment_;
363 // Flag to indicate if entry is in thumb mode.
364 bool entry_in_thumb_mode_;
365 // A table of reloc instruction indices and offsets. We can find these by
366 // looking at the instruction templates but we pre-compute and then stash
367 // them here for speed.
368 std::vector<Reloc> relocs_;
369 };
370
371 //
372 // A class for code stubs. This is a base class for different type of
373 // stubs used in the ARM target.
374 //
375
376 class Stub
377 {
378 private:
379 static const section_offset_type invalid_offset =
380 static_cast<section_offset_type>(-1);
381
382 public:
383 Stub(const Stub_template* stub_template)
384 : stub_template_(stub_template), offset_(invalid_offset)
385 { }
386
387 virtual
388 ~Stub()
389 { }
390
391 // Return the stub template.
392 const Stub_template*
393 stub_template() const
394 { return this->stub_template_; }
395
396 // Return offset of code stub from beginning of its containing stub table.
397 section_offset_type
398 offset() const
399 {
400 gold_assert(this->offset_ != invalid_offset);
401 return this->offset_;
402 }
403
404 // Set offset of code stub from beginning of its containing stub table.
405 void
406 set_offset(section_offset_type offset)
407 { this->offset_ = offset; }
408
409 // Return the relocation target address of the i-th relocation in the
410 // stub. This must be defined in a child class.
411 Arm_address
412 reloc_target(size_t i)
413 { return this->do_reloc_target(i); }
414
415 // Write a stub at output VIEW. BIG_ENDIAN select how a stub is written.
416 void
417 write(unsigned char* view, section_size_type view_size, bool big_endian)
418 { this->do_write(view, view_size, big_endian); }
419
420 // Return the instruction for THUMB16_SPECIAL_TYPE instruction template
421 // for the i-th instruction.
422 uint16_t
423 thumb16_special(size_t i)
424 { return this->do_thumb16_special(i); }
425
426 protected:
427 // This must be defined in the child class.
428 virtual Arm_address
429 do_reloc_target(size_t) = 0;
430
431 // This may be overridden in the child class.
432 virtual void
433 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
434 {
435 if (big_endian)
436 this->do_fixed_endian_write<true>(view, view_size);
437 else
438 this->do_fixed_endian_write<false>(view, view_size);
439 }
440
441 // This must be overridden if a child class uses the THUMB16_SPECIAL_TYPE
442 // instruction template.
443 virtual uint16_t
444 do_thumb16_special(size_t)
445 { gold_unreachable(); }
446
447 private:
448 // A template to implement do_write.
449 template<bool big_endian>
450 void inline
451 do_fixed_endian_write(unsigned char*, section_size_type);
452
453 // Its template.
454 const Stub_template* stub_template_;
455 // Offset within the section of containing this stub.
456 section_offset_type offset_;
457 };
458
459 // Reloc stub class. These are stubs we use to fix up relocation because
460 // of limited branch ranges.
461
462 class Reloc_stub : public Stub
463 {
464 public:
465 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
466 // We assume we never jump to this address.
467 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
468
469 // Return destination address.
470 Arm_address
471 destination_address() const
472 {
473 gold_assert(this->destination_address_ != this->invalid_address);
474 return this->destination_address_;
475 }
476
477 // Set destination address.
478 void
479 set_destination_address(Arm_address address)
480 {
481 gold_assert(address != this->invalid_address);
482 this->destination_address_ = address;
483 }
484
485 // Reset destination address.
486 void
487 reset_destination_address()
488 { this->destination_address_ = this->invalid_address; }
489
490 // Determine stub type for a branch of a relocation of R_TYPE going
491 // from BRANCH_ADDRESS to BRANCH_TARGET. If TARGET_IS_THUMB is set,
492 // the branch target is a thumb instruction. TARGET is used for look
493 // up ARM-specific linker settings.
494 static Stub_type
495 stub_type_for_reloc(unsigned int r_type, Arm_address branch_address,
496 Arm_address branch_target, bool target_is_thumb);
497
498 // Reloc_stub key. A key is logically a triplet of a stub type, a symbol
499 // and an addend. Since we treat global and local symbol differently, we
500 // use a Symbol object for a global symbol and a object-index pair for
501 // a local symbol.
502 class Key
503 {
504 public:
505 // If SYMBOL is not null, this is a global symbol, we ignore RELOBJ and
506 // R_SYM. Otherwise, this is a local symbol and RELOBJ must non-NULL
507 // and R_SYM must not be invalid_index.
508 Key(Stub_type stub_type, const Symbol* symbol, const Relobj* relobj,
509 unsigned int r_sym, int32_t addend)
510 : stub_type_(stub_type), addend_(addend)
511 {
512 if (symbol != NULL)
513 {
514 this->r_sym_ = Reloc_stub::invalid_index;
515 this->u_.symbol = symbol;
516 }
517 else
518 {
519 gold_assert(relobj != NULL && r_sym != invalid_index);
520 this->r_sym_ = r_sym;
521 this->u_.relobj = relobj;
522 }
523 }
524
525 ~Key()
526 { }
527
528 // Accessors: Keys are meant to be read-only object so no modifiers are
529 // provided.
530
531 // Return stub type.
532 Stub_type
533 stub_type() const
534 { return this->stub_type_; }
535
536 // Return the local symbol index or invalid_index.
537 unsigned int
538 r_sym() const
539 { return this->r_sym_; }
540
541 // Return the symbol if there is one.
542 const Symbol*
543 symbol() const
544 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
545
546 // Return the relobj if there is one.
547 const Relobj*
548 relobj() const
549 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
550
551 // Whether this equals to another key k.
552 bool
553 eq(const Key& k) const
554 {
555 return ((this->stub_type_ == k.stub_type_)
556 && (this->r_sym_ == k.r_sym_)
557 && ((this->r_sym_ != Reloc_stub::invalid_index)
558 ? (this->u_.relobj == k.u_.relobj)
559 : (this->u_.symbol == k.u_.symbol))
560 && (this->addend_ == k.addend_));
561 }
562
563 // Return a hash value.
564 size_t
565 hash_value() const
566 {
567 return (this->stub_type_
568 ^ this->r_sym_
569 ^ gold::string_hash<char>(
570 (this->r_sym_ != Reloc_stub::invalid_index)
571 ? this->u_.relobj->name().c_str()
572 : this->u_.symbol->name())
573 ^ this->addend_);
574 }
575
576 // Functors for STL associative containers.
577 struct hash
578 {
579 size_t
580 operator()(const Key& k) const
581 { return k.hash_value(); }
582 };
583
584 struct equal_to
585 {
586 bool
587 operator()(const Key& k1, const Key& k2) const
588 { return k1.eq(k2); }
589 };
590
591 // Name of key. This is mainly for debugging.
592 std::string
593 name() const;
594
595 private:
596 // Stub type.
597 Stub_type stub_type_;
598 // If this is a local symbol, this is the index in the defining object.
599 // Otherwise, it is invalid_index for a global symbol.
600 unsigned int r_sym_;
601 // If r_sym_ is invalid index. This points to a global symbol.
602 // Otherwise, this points a relobj. We used the unsized and target
603 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
604 // Arm_relobj. This is done to avoid making the stub class a template
605 // as most of the stub machinery is endianness-neutral. However, it
606 // may require a bit of casting done by users of this class.
607 union
608 {
609 const Symbol* symbol;
610 const Relobj* relobj;
611 } u_;
612 // Addend associated with a reloc.
613 int32_t addend_;
614 };
615
616 protected:
617 // Reloc_stubs are created via a stub factory. So these are protected.
618 Reloc_stub(const Stub_template* stub_template)
619 : Stub(stub_template), destination_address_(invalid_address)
620 { }
621
622 ~Reloc_stub()
623 { }
624
625 friend class Stub_factory;
626
627 // Return the relocation target address of the i-th relocation in the
628 // stub.
629 Arm_address
630 do_reloc_target(size_t i)
631 {
632 // All reloc stub have only one relocation.
633 gold_assert(i == 0);
634 return this->destination_address_;
635 }
636
637 private:
638 // Address of destination.
639 Arm_address destination_address_;
640 };
641
642 // Cortex-A8 stub class. We need a Cortex-A8 stub to redirect any 32-bit
643 // THUMB branch that meets the following conditions:
644 //
645 // 1. The branch straddles across a page boundary. i.e. lower 12-bit of
646 // branch address is 0xffe.
647 // 2. The branch target address is in the same page as the first word of the
648 // branch.
649 // 3. The branch follows a 32-bit instruction which is not a branch.
650 //
651 // To do the fix up, we need to store the address of the branch instruction
652 // and its target at least. We also need to store the original branch
653 // instruction bits for the condition code in a conditional branch. The
654 // condition code is used in a special instruction template. We also want
655 // to identify input sections needing Cortex-A8 workaround quickly. We store
656 // extra information about object and section index of the code section
657 // containing a branch being fixed up. The information is used to mark
658 // the code section when we finalize the Cortex-A8 stubs.
659 //
660
661 class Cortex_a8_stub : public Stub
662 {
663 public:
664 ~Cortex_a8_stub()
665 { }
666
667 // Return the object of the code section containing the branch being fixed
668 // up.
669 Relobj*
670 relobj() const
671 { return this->relobj_; }
672
673 // Return the section index of the code section containing the branch being
674 // fixed up.
675 unsigned int
676 shndx() const
677 { return this->shndx_; }
678
679 // Return the source address of stub. This is the address of the original
680 // branch instruction. LSB is 1 always set to indicate that it is a THUMB
681 // instruction.
682 Arm_address
683 source_address() const
684 { return this->source_address_; }
685
686 // Return the destination address of the stub. This is the branch taken
687 // address of the original branch instruction. LSB is 1 if it is a THUMB
688 // instruction address.
689 Arm_address
690 destination_address() const
691 { return this->destination_address_; }
692
693 // Return the instruction being fixed up.
694 uint32_t
695 original_insn() const
696 { return this->original_insn_; }
697
698 protected:
699 // Cortex_a8_stubs are created via a stub factory. So these are protected.
700 Cortex_a8_stub(const Stub_template* stub_template, Relobj* relobj,
701 unsigned int shndx, Arm_address source_address,
702 Arm_address destination_address, uint32_t original_insn)
703 : Stub(stub_template), relobj_(relobj), shndx_(shndx),
704 source_address_(source_address | 1U),
705 destination_address_(destination_address),
706 original_insn_(original_insn)
707 { }
708
709 friend class Stub_factory;
710
711 // Return the relocation target address of the i-th relocation in the
712 // stub.
713 Arm_address
714 do_reloc_target(size_t i)
715 {
716 if (this->stub_template()->type() == arm_stub_a8_veneer_b_cond)
717 {
718 // The conditional branch veneer has two relocations.
719 gold_assert(i < 2);
720 return i == 0 ? this->source_address_ + 4 : this->destination_address_;
721 }
722 else
723 {
724 // All other Cortex-A8 stubs have only one relocation.
725 gold_assert(i == 0);
726 return this->destination_address_;
727 }
728 }
729
730 // Return an instruction for the THUMB16_SPECIAL_TYPE instruction template.
731 uint16_t
732 do_thumb16_special(size_t);
733
734 private:
735 // Object of the code section containing the branch being fixed up.
736 Relobj* relobj_;
737 // Section index of the code section containing the branch begin fixed up.
738 unsigned int shndx_;
739 // Source address of original branch.
740 Arm_address source_address_;
741 // Destination address of the original branch.
742 Arm_address destination_address_;
743 // Original branch instruction. This is needed for copying the condition
744 // code from a condition branch to its stub.
745 uint32_t original_insn_;
746 };
747
748 // ARMv4 BX Rx branch relocation stub class.
749 class Arm_v4bx_stub : public Stub
750 {
751 public:
752 ~Arm_v4bx_stub()
753 { }
754
755 // Return the associated register.
756 uint32_t
757 reg() const
758 { return this->reg_; }
759
760 protected:
761 // Arm V4BX stubs are created via a stub factory. So these are protected.
762 Arm_v4bx_stub(const Stub_template* stub_template, const uint32_t reg)
763 : Stub(stub_template), reg_(reg)
764 { }
765
766 friend class Stub_factory;
767
768 // Return the relocation target address of the i-th relocation in the
769 // stub.
770 Arm_address
771 do_reloc_target(size_t)
772 { gold_unreachable(); }
773
774 // This may be overridden in the child class.
775 virtual void
776 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
777 {
778 if (big_endian)
779 this->do_fixed_endian_v4bx_write<true>(view, view_size);
780 else
781 this->do_fixed_endian_v4bx_write<false>(view, view_size);
782 }
783
784 private:
785 // A template to implement do_write.
786 template<bool big_endian>
787 void inline
788 do_fixed_endian_v4bx_write(unsigned char* view, section_size_type)
789 {
790 const Insn_template* insns = this->stub_template()->insns();
791 elfcpp::Swap<32, big_endian>::writeval(view,
792 (insns[0].data()
793 + (this->reg_ << 16)));
794 view += insns[0].size();
795 elfcpp::Swap<32, big_endian>::writeval(view,
796 (insns[1].data() + this->reg_));
797 view += insns[1].size();
798 elfcpp::Swap<32, big_endian>::writeval(view,
799 (insns[2].data() + this->reg_));
800 }
801
802 // A register index (r0-r14), which is associated with the stub.
803 uint32_t reg_;
804 };
805
806 // Stub factory class.
807
808 class Stub_factory
809 {
810 public:
811 // Return the unique instance of this class.
812 static const Stub_factory&
813 get_instance()
814 {
815 static Stub_factory singleton;
816 return singleton;
817 }
818
819 // Make a relocation stub.
820 Reloc_stub*
821 make_reloc_stub(Stub_type stub_type) const
822 {
823 gold_assert(stub_type >= arm_stub_reloc_first
824 && stub_type <= arm_stub_reloc_last);
825 return new Reloc_stub(this->stub_templates_[stub_type]);
826 }
827
828 // Make a Cortex-A8 stub.
829 Cortex_a8_stub*
830 make_cortex_a8_stub(Stub_type stub_type, Relobj* relobj, unsigned int shndx,
831 Arm_address source, Arm_address destination,
832 uint32_t original_insn) const
833 {
834 gold_assert(stub_type >= arm_stub_cortex_a8_first
835 && stub_type <= arm_stub_cortex_a8_last);
836 return new Cortex_a8_stub(this->stub_templates_[stub_type], relobj, shndx,
837 source, destination, original_insn);
838 }
839
840 // Make an ARM V4BX relocation stub.
841 // This method creates a stub from the arm_stub_v4_veneer_bx template only.
842 Arm_v4bx_stub*
843 make_arm_v4bx_stub(uint32_t reg) const
844 {
845 gold_assert(reg < 0xf);
846 return new Arm_v4bx_stub(this->stub_templates_[arm_stub_v4_veneer_bx],
847 reg);
848 }
849
850 private:
851 // Constructor and destructor are protected since we only return a single
852 // instance created in Stub_factory::get_instance().
853
854 Stub_factory();
855
856 // A Stub_factory may not be copied since it is a singleton.
857 Stub_factory(const Stub_factory&);
858 Stub_factory& operator=(Stub_factory&);
859
860 // Stub templates. These are initialized in the constructor.
861 const Stub_template* stub_templates_[arm_stub_type_last+1];
862 };
863
864 // A class to hold stubs for the ARM target.
865
866 template<bool big_endian>
867 class Stub_table : public Output_data
868 {
869 public:
870 Stub_table(Arm_input_section<big_endian>* owner)
871 : Output_data(), owner_(owner), reloc_stubs_(), reloc_stubs_size_(0),
872 reloc_stubs_addralign_(1), cortex_a8_stubs_(), arm_v4bx_stubs_(0xf),
873 prev_data_size_(0), prev_addralign_(1)
874 { }
875
876 ~Stub_table()
877 { }
878
879 // Owner of this stub table.
880 Arm_input_section<big_endian>*
881 owner() const
882 { return this->owner_; }
883
884 // Whether this stub table is empty.
885 bool
886 empty() const
887 {
888 return (this->reloc_stubs_.empty()
889 && this->cortex_a8_stubs_.empty()
890 && this->arm_v4bx_stubs_.empty());
891 }
892
893 // Return the current data size.
894 off_t
895 current_data_size() const
896 { return this->current_data_size_for_child(); }
897
898 // Add a STUB with using KEY. Caller is reponsible for avoid adding
899 // if already a STUB with the same key has been added.
900 void
901 add_reloc_stub(Reloc_stub* stub, const Reloc_stub::Key& key)
902 {
903 const Stub_template* stub_template = stub->stub_template();
904 gold_assert(stub_template->type() == key.stub_type());
905 this->reloc_stubs_[key] = stub;
906
907 // Assign stub offset early. We can do this because we never remove
908 // reloc stubs and they are in the beginning of the stub table.
909 uint64_t align = stub_template->alignment();
910 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_, align);
911 stub->set_offset(this->reloc_stubs_size_);
912 this->reloc_stubs_size_ += stub_template->size();
913 this->reloc_stubs_addralign_ =
914 std::max(this->reloc_stubs_addralign_, align);
915 }
916
917 // Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
918 // Caller is reponsible for avoid adding if already a STUB with the same
919 // address has been added.
920 void
921 add_cortex_a8_stub(Arm_address address, Cortex_a8_stub* stub)
922 {
923 std::pair<Arm_address, Cortex_a8_stub*> value(address, stub);
924 this->cortex_a8_stubs_.insert(value);
925 }
926
927 // Add an ARM V4BX relocation stub. A register index will be retrieved
928 // from the stub.
929 void
930 add_arm_v4bx_stub(Arm_v4bx_stub* stub)
931 {
932 gold_assert(stub != NULL && this->arm_v4bx_stubs_[stub->reg()] == NULL);
933 this->arm_v4bx_stubs_[stub->reg()] = stub;
934 }
935
936 // Remove all Cortex-A8 stubs.
937 void
938 remove_all_cortex_a8_stubs();
939
940 // Look up a relocation stub using KEY. Return NULL if there is none.
941 Reloc_stub*
942 find_reloc_stub(const Reloc_stub::Key& key) const
943 {
944 typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.find(key);
945 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
946 }
947
948 // Look up an arm v4bx relocation stub using the register index.
949 // Return NULL if there is none.
950 Arm_v4bx_stub*
951 find_arm_v4bx_stub(const uint32_t reg) const
952 {
953 gold_assert(reg < 0xf);
954 return this->arm_v4bx_stubs_[reg];
955 }
956
957 // Relocate stubs in this stub table.
958 void
959 relocate_stubs(const Relocate_info<32, big_endian>*,
960 Target_arm<big_endian>*, Output_section*,
961 unsigned char*, Arm_address, section_size_type);
962
963 // Update data size and alignment at the end of a relaxation pass. Return
964 // true if either data size or alignment is different from that of the
965 // previous relaxation pass.
966 bool
967 update_data_size_and_addralign();
968
969 // Finalize stubs. Set the offsets of all stubs and mark input sections
970 // needing the Cortex-A8 workaround.
971 void
972 finalize_stubs();
973
974 // Apply Cortex-A8 workaround to an address range.
975 void
976 apply_cortex_a8_workaround_to_address_range(Target_arm<big_endian>*,
977 unsigned char*, Arm_address,
978 section_size_type);
979
980 protected:
981 // Write out section contents.
982 void
983 do_write(Output_file*);
984
985 // Return the required alignment.
986 uint64_t
987 do_addralign() const
988 { return this->prev_addralign_; }
989
990 // Reset address and file offset.
991 void
992 do_reset_address_and_file_offset()
993 { this->set_current_data_size_for_child(this->prev_data_size_); }
994
995 // Set final data size.
996 void
997 set_final_data_size()
998 { this->set_data_size(this->current_data_size()); }
999
1000 private:
1001 // Relocate one stub.
1002 void
1003 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
1004 Target_arm<big_endian>*, Output_section*,
1005 unsigned char*, Arm_address, section_size_type);
1006
1007 // Unordered map of relocation stubs.
1008 typedef
1009 Unordered_map<Reloc_stub::Key, Reloc_stub*, Reloc_stub::Key::hash,
1010 Reloc_stub::Key::equal_to>
1011 Reloc_stub_map;
1012
1013 // List of Cortex-A8 stubs ordered by addresses of branches being
1014 // fixed up in output.
1015 typedef std::map<Arm_address, Cortex_a8_stub*> Cortex_a8_stub_list;
1016 // List of Arm V4BX relocation stubs ordered by associated registers.
1017 typedef std::vector<Arm_v4bx_stub*> Arm_v4bx_stub_list;
1018
1019 // Owner of this stub table.
1020 Arm_input_section<big_endian>* owner_;
1021 // The relocation stubs.
1022 Reloc_stub_map reloc_stubs_;
1023 // Size of reloc stubs.
1024 off_t reloc_stubs_size_;
1025 // Maximum address alignment of reloc stubs.
1026 uint64_t reloc_stubs_addralign_;
1027 // The cortex_a8_stubs.
1028 Cortex_a8_stub_list cortex_a8_stubs_;
1029 // The Arm V4BX relocation stubs.
1030 Arm_v4bx_stub_list arm_v4bx_stubs_;
1031 // data size of this in the previous pass.
1032 off_t prev_data_size_;
1033 // address alignment of this in the previous pass.
1034 uint64_t prev_addralign_;
1035 };
1036
1037 // Arm_exidx_cantunwind class. This represents an EXIDX_CANTUNWIND entry
1038 // we add to the end of an EXIDX input section that goes into the output.
1039
1040 class Arm_exidx_cantunwind : public Output_section_data
1041 {
1042 public:
1043 Arm_exidx_cantunwind(Relobj* relobj, unsigned int shndx)
1044 : Output_section_data(8, 4, true), relobj_(relobj), shndx_(shndx)
1045 { }
1046
1047 // Return the object containing the section pointed by this.
1048 Relobj*
1049 relobj() const
1050 { return this->relobj_; }
1051
1052 // Return the section index of the section pointed by this.
1053 unsigned int
1054 shndx() const
1055 { return this->shndx_; }
1056
1057 protected:
1058 void
1059 do_write(Output_file* of)
1060 {
1061 if (parameters->target().is_big_endian())
1062 this->do_fixed_endian_write<true>(of);
1063 else
1064 this->do_fixed_endian_write<false>(of);
1065 }
1066
1067 private:
1068 // Implement do_write for a given endianness.
1069 template<bool big_endian>
1070 void inline
1071 do_fixed_endian_write(Output_file*);
1072
1073 // The object containing the section pointed by this.
1074 Relobj* relobj_;
1075 // The section index of the section pointed by this.
1076 unsigned int shndx_;
1077 };
1078
1079 // During EXIDX coverage fix-up, we compact an EXIDX section. The
1080 // Offset map is used to map input section offset within the EXIDX section
1081 // to the output offset from the start of this EXIDX section.
1082
1083 typedef std::map<section_offset_type, section_offset_type>
1084 Arm_exidx_section_offset_map;
1085
1086 // Arm_exidx_merged_section class. This represents an EXIDX input section
1087 // with some of its entries merged.
1088
1089 class Arm_exidx_merged_section : public Output_relaxed_input_section
1090 {
1091 public:
1092 // Constructor for Arm_exidx_merged_section.
1093 // EXIDX_INPUT_SECTION points to the unmodified EXIDX input section.
1094 // SECTION_OFFSET_MAP points to a section offset map describing how
1095 // parts of the input section are mapped to output. DELETED_BYTES is
1096 // the number of bytes deleted from the EXIDX input section.
1097 Arm_exidx_merged_section(
1098 const Arm_exidx_input_section& exidx_input_section,
1099 const Arm_exidx_section_offset_map& section_offset_map,
1100 uint32_t deleted_bytes);
1101
1102 // Return the original EXIDX input section.
1103 const Arm_exidx_input_section&
1104 exidx_input_section() const
1105 { return this->exidx_input_section_; }
1106
1107 // Return the section offset map.
1108 const Arm_exidx_section_offset_map&
1109 section_offset_map() const
1110 { return this->section_offset_map_; }
1111
1112 protected:
1113 // Write merged section into file OF.
1114 void
1115 do_write(Output_file* of);
1116
1117 bool
1118 do_output_offset(const Relobj*, unsigned int, section_offset_type,
1119 section_offset_type*) const;
1120
1121 private:
1122 // Original EXIDX input section.
1123 const Arm_exidx_input_section& exidx_input_section_;
1124 // Section offset map.
1125 const Arm_exidx_section_offset_map& section_offset_map_;
1126 };
1127
1128 // A class to wrap an ordinary input section containing executable code.
1129
1130 template<bool big_endian>
1131 class Arm_input_section : public Output_relaxed_input_section
1132 {
1133 public:
1134 Arm_input_section(Relobj* relobj, unsigned int shndx)
1135 : Output_relaxed_input_section(relobj, shndx, 1),
1136 original_addralign_(1), original_size_(0), stub_table_(NULL)
1137 { }
1138
1139 ~Arm_input_section()
1140 { }
1141
1142 // Initialize.
1143 void
1144 init();
1145
1146 // Whether this is a stub table owner.
1147 bool
1148 is_stub_table_owner() const
1149 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
1150
1151 // Return the stub table.
1152 Stub_table<big_endian>*
1153 stub_table() const
1154 { return this->stub_table_; }
1155
1156 // Set the stub_table.
1157 void
1158 set_stub_table(Stub_table<big_endian>* stub_table)
1159 { this->stub_table_ = stub_table; }
1160
1161 // Downcast a base pointer to an Arm_input_section pointer. This is
1162 // not type-safe but we only use Arm_input_section not the base class.
1163 static Arm_input_section<big_endian>*
1164 as_arm_input_section(Output_relaxed_input_section* poris)
1165 { return static_cast<Arm_input_section<big_endian>*>(poris); }
1166
1167 // Return the original size of the section.
1168 uint32_t
1169 original_size() const
1170 { return this->original_size_; }
1171
1172 protected:
1173 // Write data to output file.
1174 void
1175 do_write(Output_file*);
1176
1177 // Return required alignment of this.
1178 uint64_t
1179 do_addralign() const
1180 {
1181 if (this->is_stub_table_owner())
1182 return std::max(this->stub_table_->addralign(),
1183 static_cast<uint64_t>(this->original_addralign_));
1184 else
1185 return this->original_addralign_;
1186 }
1187
1188 // Finalize data size.
1189 void
1190 set_final_data_size();
1191
1192 // Reset address and file offset.
1193 void
1194 do_reset_address_and_file_offset();
1195
1196 // Output offset.
1197 bool
1198 do_output_offset(const Relobj* object, unsigned int shndx,
1199 section_offset_type offset,
1200 section_offset_type* poutput) const
1201 {
1202 if ((object == this->relobj())
1203 && (shndx == this->shndx())
1204 && (offset >= 0)
1205 && (offset <=
1206 convert_types<section_offset_type, uint32_t>(this->original_size_)))
1207 {
1208 *poutput = offset;
1209 return true;
1210 }
1211 else
1212 return false;
1213 }
1214
1215 private:
1216 // Copying is not allowed.
1217 Arm_input_section(const Arm_input_section&);
1218 Arm_input_section& operator=(const Arm_input_section&);
1219
1220 // Address alignment of the original input section.
1221 uint32_t original_addralign_;
1222 // Section size of the original input section.
1223 uint32_t original_size_;
1224 // Stub table.
1225 Stub_table<big_endian>* stub_table_;
1226 };
1227
1228 // Arm_exidx_fixup class. This is used to define a number of methods
1229 // and keep states for fixing up EXIDX coverage.
1230
1231 class Arm_exidx_fixup
1232 {
1233 public:
1234 Arm_exidx_fixup(Output_section* exidx_output_section,
1235 bool merge_exidx_entries = true)
1236 : exidx_output_section_(exidx_output_section), last_unwind_type_(UT_NONE),
1237 last_inlined_entry_(0), last_input_section_(NULL),
1238 section_offset_map_(NULL), first_output_text_section_(NULL),
1239 merge_exidx_entries_(merge_exidx_entries)
1240 { }
1241
1242 ~Arm_exidx_fixup()
1243 { delete this->section_offset_map_; }
1244
1245 // Process an EXIDX section for entry merging. Return number of bytes to
1246 // be deleted in output. If parts of the input EXIDX section are merged
1247 // a heap allocated Arm_exidx_section_offset_map is store in the located
1248 // PSECTION_OFFSET_MAP. The caller owns the map and is reponsible for
1249 // releasing it.
1250 template<bool big_endian>
1251 uint32_t
1252 process_exidx_section(const Arm_exidx_input_section* exidx_input_section,
1253 Arm_exidx_section_offset_map** psection_offset_map);
1254
1255 // Append an EXIDX_CANTUNWIND entry pointing at the end of the last
1256 // input section, if there is not one already.
1257 void
1258 add_exidx_cantunwind_as_needed();
1259
1260 // Return the output section for the text section which is linked to the
1261 // first exidx input in output.
1262 Output_section*
1263 first_output_text_section() const
1264 { return this->first_output_text_section_; }
1265
1266 private:
1267 // Copying is not allowed.
1268 Arm_exidx_fixup(const Arm_exidx_fixup&);
1269 Arm_exidx_fixup& operator=(const Arm_exidx_fixup&);
1270
1271 // Type of EXIDX unwind entry.
1272 enum Unwind_type
1273 {
1274 // No type.
1275 UT_NONE,
1276 // EXIDX_CANTUNWIND.
1277 UT_EXIDX_CANTUNWIND,
1278 // Inlined entry.
1279 UT_INLINED_ENTRY,
1280 // Normal entry.
1281 UT_NORMAL_ENTRY,
1282 };
1283
1284 // Process an EXIDX entry. We only care about the second word of the
1285 // entry. Return true if the entry can be deleted.
1286 bool
1287 process_exidx_entry(uint32_t second_word);
1288
1289 // Update the current section offset map during EXIDX section fix-up.
1290 // If there is no map, create one. INPUT_OFFSET is the offset of a
1291 // reference point, DELETED_BYTES is the number of deleted by in the
1292 // section so far. If DELETE_ENTRY is true, the reference point and
1293 // all offsets after the previous reference point are discarded.
1294 void
1295 update_offset_map(section_offset_type input_offset,
1296 section_size_type deleted_bytes, bool delete_entry);
1297
1298 // EXIDX output section.
1299 Output_section* exidx_output_section_;
1300 // Unwind type of the last EXIDX entry processed.
1301 Unwind_type last_unwind_type_;
1302 // Last seen inlined EXIDX entry.
1303 uint32_t last_inlined_entry_;
1304 // Last processed EXIDX input section.
1305 const Arm_exidx_input_section* last_input_section_;
1306 // Section offset map created in process_exidx_section.
1307 Arm_exidx_section_offset_map* section_offset_map_;
1308 // Output section for the text section which is linked to the first exidx
1309 // input in output.
1310 Output_section* first_output_text_section_;
1311
1312 bool merge_exidx_entries_;
1313 };
1314
1315 // Arm output section class. This is defined mainly to add a number of
1316 // stub generation methods.
1317
1318 template<bool big_endian>
1319 class Arm_output_section : public Output_section
1320 {
1321 public:
1322 typedef std::vector<std::pair<Relobj*, unsigned int> > Text_section_list;
1323
1324 Arm_output_section(const char* name, elfcpp::Elf_Word type,
1325 elfcpp::Elf_Xword flags)
1326 : Output_section(name, type, flags)
1327 { }
1328
1329 ~Arm_output_section()
1330 { }
1331
1332 // Group input sections for stub generation.
1333 void
1334 group_sections(section_size_type, bool, Target_arm<big_endian>*);
1335
1336 // Downcast a base pointer to an Arm_output_section pointer. This is
1337 // not type-safe but we only use Arm_output_section not the base class.
1338 static Arm_output_section<big_endian>*
1339 as_arm_output_section(Output_section* os)
1340 { return static_cast<Arm_output_section<big_endian>*>(os); }
1341
1342 // Append all input text sections in this into LIST.
1343 void
1344 append_text_sections_to_list(Text_section_list* list);
1345
1346 // Fix EXIDX coverage of this EXIDX output section. SORTED_TEXT_SECTION
1347 // is a list of text input sections sorted in ascending order of their
1348 // output addresses.
1349 void
1350 fix_exidx_coverage(Layout* layout,
1351 const Text_section_list& sorted_text_section,
1352 Symbol_table* symtab,
1353 bool merge_exidx_entries);
1354
1355 private:
1356 // For convenience.
1357 typedef Output_section::Input_section Input_section;
1358 typedef Output_section::Input_section_list Input_section_list;
1359
1360 // Create a stub group.
1361 void create_stub_group(Input_section_list::const_iterator,
1362 Input_section_list::const_iterator,
1363 Input_section_list::const_iterator,
1364 Target_arm<big_endian>*,
1365 std::vector<Output_relaxed_input_section*>*);
1366 };
1367
1368 // Arm_exidx_input_section class. This represents an EXIDX input section.
1369
1370 class Arm_exidx_input_section
1371 {
1372 public:
1373 static const section_offset_type invalid_offset =
1374 static_cast<section_offset_type>(-1);
1375
1376 Arm_exidx_input_section(Relobj* relobj, unsigned int shndx,
1377 unsigned int link, uint32_t size, uint32_t addralign)
1378 : relobj_(relobj), shndx_(shndx), link_(link), size_(size),
1379 addralign_(addralign)
1380 { }
1381
1382 ~Arm_exidx_input_section()
1383 { }
1384
1385 // Accessors: This is a read-only class.
1386
1387 // Return the object containing this EXIDX input section.
1388 Relobj*
1389 relobj() const
1390 { return this->relobj_; }
1391
1392 // Return the section index of this EXIDX input section.
1393 unsigned int
1394 shndx() const
1395 { return this->shndx_; }
1396
1397 // Return the section index of linked text section in the same object.
1398 unsigned int
1399 link() const
1400 { return this->link_; }
1401
1402 // Return size of the EXIDX input section.
1403 uint32_t
1404 size() const
1405 { return this->size_; }
1406
1407 // Reutnr address alignment of EXIDX input section.
1408 uint32_t
1409 addralign() const
1410 { return this->addralign_; }
1411
1412 private:
1413 // Object containing this.
1414 Relobj* relobj_;
1415 // Section index of this.
1416 unsigned int shndx_;
1417 // text section linked to this in the same object.
1418 unsigned int link_;
1419 // Size of this. For ARM 32-bit is sufficient.
1420 uint32_t size_;
1421 // Address alignment of this. For ARM 32-bit is sufficient.
1422 uint32_t addralign_;
1423 };
1424
1425 // Arm_relobj class.
1426
1427 template<bool big_endian>
1428 class Arm_relobj : public Sized_relobj<32, big_endian>
1429 {
1430 public:
1431 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
1432
1433 Arm_relobj(const std::string& name, Input_file* input_file, off_t offset,
1434 const typename elfcpp::Ehdr<32, big_endian>& ehdr)
1435 : Sized_relobj<32, big_endian>(name, input_file, offset, ehdr),
1436 stub_tables_(), local_symbol_is_thumb_function_(),
1437 attributes_section_data_(NULL), mapping_symbols_info_(),
1438 section_has_cortex_a8_workaround_(NULL), exidx_section_map_(),
1439 output_local_symbol_count_needs_update_(false),
1440 merge_flags_and_attributes_(true)
1441 { }
1442
1443 ~Arm_relobj()
1444 { delete this->attributes_section_data_; }
1445
1446 // Return the stub table of the SHNDX-th section if there is one.
1447 Stub_table<big_endian>*
1448 stub_table(unsigned int shndx) const
1449 {
1450 gold_assert(shndx < this->stub_tables_.size());
1451 return this->stub_tables_[shndx];
1452 }
1453
1454 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1455 void
1456 set_stub_table(unsigned int shndx, Stub_table<big_endian>* stub_table)
1457 {
1458 gold_assert(shndx < this->stub_tables_.size());
1459 this->stub_tables_[shndx] = stub_table;
1460 }
1461
1462 // Whether a local symbol is a THUMB function. R_SYM is the symbol table
1463 // index. This is only valid after do_count_local_symbol is called.
1464 bool
1465 local_symbol_is_thumb_function(unsigned int r_sym) const
1466 {
1467 gold_assert(r_sym < this->local_symbol_is_thumb_function_.size());
1468 return this->local_symbol_is_thumb_function_[r_sym];
1469 }
1470
1471 // Scan all relocation sections for stub generation.
1472 void
1473 scan_sections_for_stubs(Target_arm<big_endian>*, const Symbol_table*,
1474 const Layout*);
1475
1476 // Convert regular input section with index SHNDX to a relaxed section.
1477 void
1478 convert_input_section_to_relaxed_section(unsigned shndx)
1479 {
1480 // The stubs have relocations and we need to process them after writing
1481 // out the stubs. So relocation now must follow section write.
1482 this->set_section_offset(shndx, -1ULL);
1483 this->set_relocs_must_follow_section_writes();
1484 }
1485
1486 // Downcast a base pointer to an Arm_relobj pointer. This is
1487 // not type-safe but we only use Arm_relobj not the base class.
1488 static Arm_relobj<big_endian>*
1489 as_arm_relobj(Relobj* relobj)
1490 { return static_cast<Arm_relobj<big_endian>*>(relobj); }
1491
1492 // Processor-specific flags in ELF file header. This is valid only after
1493 // reading symbols.
1494 elfcpp::Elf_Word
1495 processor_specific_flags() const
1496 { return this->processor_specific_flags_; }
1497
1498 // Attribute section data This is the contents of the .ARM.attribute section
1499 // if there is one.
1500 const Attributes_section_data*
1501 attributes_section_data() const
1502 { return this->attributes_section_data_; }
1503
1504 // Mapping symbol location.
1505 typedef std::pair<unsigned int, Arm_address> Mapping_symbol_position;
1506
1507 // Functor for STL container.
1508 struct Mapping_symbol_position_less
1509 {
1510 bool
1511 operator()(const Mapping_symbol_position& p1,
1512 const Mapping_symbol_position& p2) const
1513 {
1514 return (p1.first < p2.first
1515 || (p1.first == p2.first && p1.second < p2.second));
1516 }
1517 };
1518
1519 // We only care about the first character of a mapping symbol, so
1520 // we only store that instead of the whole symbol name.
1521 typedef std::map<Mapping_symbol_position, char,
1522 Mapping_symbol_position_less> Mapping_symbols_info;
1523
1524 // Whether a section contains any Cortex-A8 workaround.
1525 bool
1526 section_has_cortex_a8_workaround(unsigned int shndx) const
1527 {
1528 return (this->section_has_cortex_a8_workaround_ != NULL
1529 && (*this->section_has_cortex_a8_workaround_)[shndx]);
1530 }
1531
1532 // Mark a section that has Cortex-A8 workaround.
1533 void
1534 mark_section_for_cortex_a8_workaround(unsigned int shndx)
1535 {
1536 if (this->section_has_cortex_a8_workaround_ == NULL)
1537 this->section_has_cortex_a8_workaround_ =
1538 new std::vector<bool>(this->shnum(), false);
1539 (*this->section_has_cortex_a8_workaround_)[shndx] = true;
1540 }
1541
1542 // Return the EXIDX section of an text section with index SHNDX or NULL
1543 // if the text section has no associated EXIDX section.
1544 const Arm_exidx_input_section*
1545 exidx_input_section_by_link(unsigned int shndx) const
1546 {
1547 Exidx_section_map::const_iterator p = this->exidx_section_map_.find(shndx);
1548 return ((p != this->exidx_section_map_.end()
1549 && p->second->link() == shndx)
1550 ? p->second
1551 : NULL);
1552 }
1553
1554 // Return the EXIDX section with index SHNDX or NULL if there is none.
1555 const Arm_exidx_input_section*
1556 exidx_input_section_by_shndx(unsigned shndx) const
1557 {
1558 Exidx_section_map::const_iterator p = this->exidx_section_map_.find(shndx);
1559 return ((p != this->exidx_section_map_.end()
1560 && p->second->shndx() == shndx)
1561 ? p->second
1562 : NULL);
1563 }
1564
1565 // Whether output local symbol count needs updating.
1566 bool
1567 output_local_symbol_count_needs_update() const
1568 { return this->output_local_symbol_count_needs_update_; }
1569
1570 // Set output_local_symbol_count_needs_update flag to be true.
1571 void
1572 set_output_local_symbol_count_needs_update()
1573 { this->output_local_symbol_count_needs_update_ = true; }
1574
1575 // Update output local symbol count at the end of relaxation.
1576 void
1577 update_output_local_symbol_count();
1578
1579 // Whether we want to merge processor-specific flags and attributes.
1580 bool
1581 merge_flags_and_attributes() const
1582 { return this->merge_flags_and_attributes_; }
1583
1584 protected:
1585 // Post constructor setup.
1586 void
1587 do_setup()
1588 {
1589 // Call parent's setup method.
1590 Sized_relobj<32, big_endian>::do_setup();
1591
1592 // Initialize look-up tables.
1593 Stub_table_list empty_stub_table_list(this->shnum(), NULL);
1594 this->stub_tables_.swap(empty_stub_table_list);
1595 }
1596
1597 // Count the local symbols.
1598 void
1599 do_count_local_symbols(Stringpool_template<char>*,
1600 Stringpool_template<char>*);
1601
1602 void
1603 do_relocate_sections(const Symbol_table* symtab, const Layout* layout,
1604 const unsigned char* pshdrs,
1605 typename Sized_relobj<32, big_endian>::Views* pivews);
1606
1607 // Read the symbol information.
1608 void
1609 do_read_symbols(Read_symbols_data* sd);
1610
1611 // Process relocs for garbage collection.
1612 void
1613 do_gc_process_relocs(Symbol_table*, Layout*, Read_relocs_data*);
1614
1615 private:
1616
1617 // Whether a section needs to be scanned for relocation stubs.
1618 bool
1619 section_needs_reloc_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1620 const Relobj::Output_sections&,
1621 const Symbol_table *, const unsigned char*);
1622
1623 // Whether a section is a scannable text section.
1624 bool
1625 section_is_scannable(const elfcpp::Shdr<32, big_endian>&, unsigned int,
1626 const Output_section*, const Symbol_table *);
1627
1628 // Whether a section needs to be scanned for the Cortex-A8 erratum.
1629 bool
1630 section_needs_cortex_a8_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1631 unsigned int, Output_section*,
1632 const Symbol_table *);
1633
1634 // Scan a section for the Cortex-A8 erratum.
1635 void
1636 scan_section_for_cortex_a8_erratum(const elfcpp::Shdr<32, big_endian>&,
1637 unsigned int, Output_section*,
1638 Target_arm<big_endian>*);
1639
1640 // Find the linked text section of an EXIDX section by looking at the
1641 // first reloction of the EXIDX section. PSHDR points to the section
1642 // headers of a relocation section and PSYMS points to the local symbols.
1643 // PSHNDX points to a location storing the text section index if found.
1644 // Return whether we can find the linked section.
1645 bool
1646 find_linked_text_section(const unsigned char* pshdr,
1647 const unsigned char* psyms, unsigned int* pshndx);
1648
1649 //
1650 // Make a new Arm_exidx_input_section object for EXIDX section with
1651 // index SHNDX and section header SHDR. TEXT_SHNDX is the section
1652 // index of the linked text section.
1653 void
1654 make_exidx_input_section(unsigned int shndx,
1655 const elfcpp::Shdr<32, big_endian>& shdr,
1656 unsigned int text_shndx);
1657
1658 // Return the output address of either a plain input section or a
1659 // relaxed input section. SHNDX is the section index.
1660 Arm_address
1661 simple_input_section_output_address(unsigned int, Output_section*);
1662
1663 typedef std::vector<Stub_table<big_endian>*> Stub_table_list;
1664 typedef Unordered_map<unsigned int, const Arm_exidx_input_section*>
1665 Exidx_section_map;
1666
1667 // List of stub tables.
1668 Stub_table_list stub_tables_;
1669 // Bit vector to tell if a local symbol is a thumb function or not.
1670 // This is only valid after do_count_local_symbol is called.
1671 std::vector<bool> local_symbol_is_thumb_function_;
1672 // processor-specific flags in ELF file header.
1673 elfcpp::Elf_Word processor_specific_flags_;
1674 // Object attributes if there is an .ARM.attributes section or NULL.
1675 Attributes_section_data* attributes_section_data_;
1676 // Mapping symbols information.
1677 Mapping_symbols_info mapping_symbols_info_;
1678 // Bitmap to indicate sections with Cortex-A8 workaround or NULL.
1679 std::vector<bool>* section_has_cortex_a8_workaround_;
1680 // Map a text section to its associated .ARM.exidx section, if there is one.
1681 Exidx_section_map exidx_section_map_;
1682 // Whether output local symbol count needs updating.
1683 bool output_local_symbol_count_needs_update_;
1684 // Whether we merge processor flags and attributes of this object to
1685 // output.
1686 bool merge_flags_and_attributes_;
1687 };
1688
1689 // Arm_dynobj class.
1690
1691 template<bool big_endian>
1692 class Arm_dynobj : public Sized_dynobj<32, big_endian>
1693 {
1694 public:
1695 Arm_dynobj(const std::string& name, Input_file* input_file, off_t offset,
1696 const elfcpp::Ehdr<32, big_endian>& ehdr)
1697 : Sized_dynobj<32, big_endian>(name, input_file, offset, ehdr),
1698 processor_specific_flags_(0), attributes_section_data_(NULL)
1699 { }
1700
1701 ~Arm_dynobj()
1702 { delete this->attributes_section_data_; }
1703
1704 // Downcast a base pointer to an Arm_relobj pointer. This is
1705 // not type-safe but we only use Arm_relobj not the base class.
1706 static Arm_dynobj<big_endian>*
1707 as_arm_dynobj(Dynobj* dynobj)
1708 { return static_cast<Arm_dynobj<big_endian>*>(dynobj); }
1709
1710 // Processor-specific flags in ELF file header. This is valid only after
1711 // reading symbols.
1712 elfcpp::Elf_Word
1713 processor_specific_flags() const
1714 { return this->processor_specific_flags_; }
1715
1716 // Attributes section data.
1717 const Attributes_section_data*
1718 attributes_section_data() const
1719 { return this->attributes_section_data_; }
1720
1721 protected:
1722 // Read the symbol information.
1723 void
1724 do_read_symbols(Read_symbols_data* sd);
1725
1726 private:
1727 // processor-specific flags in ELF file header.
1728 elfcpp::Elf_Word processor_specific_flags_;
1729 // Object attributes if there is an .ARM.attributes section or NULL.
1730 Attributes_section_data* attributes_section_data_;
1731 };
1732
1733 // Functor to read reloc addends during stub generation.
1734
1735 template<int sh_type, bool big_endian>
1736 struct Stub_addend_reader
1737 {
1738 // Return the addend for a relocation of a particular type. Depending
1739 // on whether this is a REL or RELA relocation, read the addend from a
1740 // view or from a Reloc object.
1741 elfcpp::Elf_types<32>::Elf_Swxword
1742 operator()(
1743 unsigned int /* r_type */,
1744 const unsigned char* /* view */,
1745 const typename Reloc_types<sh_type,
1746 32, big_endian>::Reloc& /* reloc */) const;
1747 };
1748
1749 // Specialized Stub_addend_reader for SHT_REL type relocation sections.
1750
1751 template<bool big_endian>
1752 struct Stub_addend_reader<elfcpp::SHT_REL, big_endian>
1753 {
1754 elfcpp::Elf_types<32>::Elf_Swxword
1755 operator()(
1756 unsigned int,
1757 const unsigned char*,
1758 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const;
1759 };
1760
1761 // Specialized Stub_addend_reader for RELA type relocation sections.
1762 // We currently do not handle RELA type relocation sections but it is trivial
1763 // to implement the addend reader. This is provided for completeness and to
1764 // make it easier to add support for RELA relocation sections in the future.
1765
1766 template<bool big_endian>
1767 struct Stub_addend_reader<elfcpp::SHT_RELA, big_endian>
1768 {
1769 elfcpp::Elf_types<32>::Elf_Swxword
1770 operator()(
1771 unsigned int,
1772 const unsigned char*,
1773 const typename Reloc_types<elfcpp::SHT_RELA, 32,
1774 big_endian>::Reloc& reloc) const
1775 { return reloc.get_r_addend(); }
1776 };
1777
1778 // Cortex_a8_reloc class. We keep record of relocation that may need
1779 // the Cortex-A8 erratum workaround.
1780
1781 class Cortex_a8_reloc
1782 {
1783 public:
1784 Cortex_a8_reloc(Reloc_stub* reloc_stub, unsigned r_type,
1785 Arm_address destination)
1786 : reloc_stub_(reloc_stub), r_type_(r_type), destination_(destination)
1787 { }
1788
1789 ~Cortex_a8_reloc()
1790 { }
1791
1792 // Accessors: This is a read-only class.
1793
1794 // Return the relocation stub associated with this relocation if there is
1795 // one.
1796 const Reloc_stub*
1797 reloc_stub() const
1798 { return this->reloc_stub_; }
1799
1800 // Return the relocation type.
1801 unsigned int
1802 r_type() const
1803 { return this->r_type_; }
1804
1805 // Return the destination address of the relocation. LSB stores the THUMB
1806 // bit.
1807 Arm_address
1808 destination() const
1809 { return this->destination_; }
1810
1811 private:
1812 // Associated relocation stub if there is one, or NULL.
1813 const Reloc_stub* reloc_stub_;
1814 // Relocation type.
1815 unsigned int r_type_;
1816 // Destination address of this relocation. LSB is used to distinguish
1817 // ARM/THUMB mode.
1818 Arm_address destination_;
1819 };
1820
1821 // Arm_output_data_got class. We derive this from Output_data_got to add
1822 // extra methods to handle TLS relocations in a static link.
1823
1824 template<bool big_endian>
1825 class Arm_output_data_got : public Output_data_got<32, big_endian>
1826 {
1827 public:
1828 Arm_output_data_got(Symbol_table* symtab, Layout* layout)
1829 : Output_data_got<32, big_endian>(), symbol_table_(symtab), layout_(layout)
1830 { }
1831
1832 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
1833 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
1834 // applied in a static link.
1835 void
1836 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
1837 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
1838
1839 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
1840 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
1841 // relocation that needs to be applied in a static link.
1842 void
1843 add_static_reloc(unsigned int got_offset, unsigned int r_type,
1844 Sized_relobj<32, big_endian>* relobj, unsigned int index)
1845 {
1846 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
1847 index));
1848 }
1849
1850 // Add a GOT pair for R_ARM_TLS_GD32. The creates a pair of GOT entries.
1851 // The first one is initialized to be 1, which is the module index for
1852 // the main executable and the second one 0. A reloc of the type
1853 // R_ARM_TLS_DTPOFF32 will be created for the second GOT entry and will
1854 // be applied by gold. GSYM is a global symbol.
1855 void
1856 add_tls_gd32_with_static_reloc(unsigned int got_type, Symbol* gsym);
1857
1858 // Same as the above but for a local symbol in OBJECT with INDEX.
1859 void
1860 add_tls_gd32_with_static_reloc(unsigned int got_type,
1861 Sized_relobj<32, big_endian>* object,
1862 unsigned int index);
1863
1864 protected:
1865 // Write out the GOT table.
1866 void
1867 do_write(Output_file*);
1868
1869 private:
1870 // This class represent dynamic relocations that need to be applied by
1871 // gold because we are using TLS relocations in a static link.
1872 class Static_reloc
1873 {
1874 public:
1875 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
1876 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
1877 { this->u_.global.symbol = gsym; }
1878
1879 Static_reloc(unsigned int got_offset, unsigned int r_type,
1880 Sized_relobj<32, big_endian>* relobj, unsigned int index)
1881 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
1882 {
1883 this->u_.local.relobj = relobj;
1884 this->u_.local.index = index;
1885 }
1886
1887 // Return the GOT offset.
1888 unsigned int
1889 got_offset() const
1890 { return this->got_offset_; }
1891
1892 // Relocation type.
1893 unsigned int
1894 r_type() const
1895 { return this->r_type_; }
1896
1897 // Whether the symbol is global or not.
1898 bool
1899 symbol_is_global() const
1900 { return this->symbol_is_global_; }
1901
1902 // For a relocation against a global symbol, the global symbol.
1903 Symbol*
1904 symbol() const
1905 {
1906 gold_assert(this->symbol_is_global_);
1907 return this->u_.global.symbol;
1908 }
1909
1910 // For a relocation against a local symbol, the defining object.
1911 Sized_relobj<32, big_endian>*
1912 relobj() const
1913 {
1914 gold_assert(!this->symbol_is_global_);
1915 return this->u_.local.relobj;
1916 }
1917
1918 // For a relocation against a local symbol, the local symbol index.
1919 unsigned int
1920 index() const
1921 {
1922 gold_assert(!this->symbol_is_global_);
1923 return this->u_.local.index;
1924 }
1925
1926 private:
1927 // GOT offset of the entry to which this relocation is applied.
1928 unsigned int got_offset_;
1929 // Type of relocation.
1930 unsigned int r_type_;
1931 // Whether this relocation is against a global symbol.
1932 bool symbol_is_global_;
1933 // A global or local symbol.
1934 union
1935 {
1936 struct
1937 {
1938 // For a global symbol, the symbol itself.
1939 Symbol* symbol;
1940 } global;
1941 struct
1942 {
1943 // For a local symbol, the object defining object.
1944 Sized_relobj<32, big_endian>* relobj;
1945 // For a local symbol, the symbol index.
1946 unsigned int index;
1947 } local;
1948 } u_;
1949 };
1950
1951 // Symbol table of the output object.
1952 Symbol_table* symbol_table_;
1953 // Layout of the output object.
1954 Layout* layout_;
1955 // Static relocs to be applied to the GOT.
1956 std::vector<Static_reloc> static_relocs_;
1957 };
1958
1959 // Utilities for manipulating integers of up to 32-bits
1960
1961 namespace utils
1962 {
1963 // Sign extend an n-bit unsigned integer stored in an uint32_t into
1964 // an int32_t. NO_BITS must be between 1 to 32.
1965 template<int no_bits>
1966 static inline int32_t
1967 sign_extend(uint32_t bits)
1968 {
1969 gold_assert(no_bits >= 0 && no_bits <= 32);
1970 if (no_bits == 32)
1971 return static_cast<int32_t>(bits);
1972 uint32_t mask = (~((uint32_t) 0)) >> (32 - no_bits);
1973 bits &= mask;
1974 uint32_t top_bit = 1U << (no_bits - 1);
1975 int32_t as_signed = static_cast<int32_t>(bits);
1976 return (bits & top_bit) ? as_signed + (-top_bit * 2) : as_signed;
1977 }
1978
1979 // Detects overflow of an NO_BITS integer stored in a uint32_t.
1980 template<int no_bits>
1981 static inline bool
1982 has_overflow(uint32_t bits)
1983 {
1984 gold_assert(no_bits >= 0 && no_bits <= 32);
1985 if (no_bits == 32)
1986 return false;
1987 int32_t max = (1 << (no_bits - 1)) - 1;
1988 int32_t min = -(1 << (no_bits - 1));
1989 int32_t as_signed = static_cast<int32_t>(bits);
1990 return as_signed > max || as_signed < min;
1991 }
1992
1993 // Detects overflow of an NO_BITS integer stored in a uint32_t when it
1994 // fits in the given number of bits as either a signed or unsigned value.
1995 // For example, has_signed_unsigned_overflow<8> would check
1996 // -128 <= bits <= 255
1997 template<int no_bits>
1998 static inline bool
1999 has_signed_unsigned_overflow(uint32_t bits)
2000 {
2001 gold_assert(no_bits >= 2 && no_bits <= 32);
2002 if (no_bits == 32)
2003 return false;
2004 int32_t max = static_cast<int32_t>((1U << no_bits) - 1);
2005 int32_t min = -(1 << (no_bits - 1));
2006 int32_t as_signed = static_cast<int32_t>(bits);
2007 return as_signed > max || as_signed < min;
2008 }
2009
2010 // Select bits from A and B using bits in MASK. For each n in [0..31],
2011 // the n-th bit in the result is chosen from the n-th bits of A and B.
2012 // A zero selects A and a one selects B.
2013 static inline uint32_t
2014 bit_select(uint32_t a, uint32_t b, uint32_t mask)
2015 { return (a & ~mask) | (b & mask); }
2016 };
2017
2018 template<bool big_endian>
2019 class Target_arm : public Sized_target<32, big_endian>
2020 {
2021 public:
2022 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
2023 Reloc_section;
2024
2025 // When were are relocating a stub, we pass this as the relocation number.
2026 static const size_t fake_relnum_for_stubs = static_cast<size_t>(-1);
2027
2028 Target_arm()
2029 : Sized_target<32, big_endian>(&arm_info),
2030 got_(NULL), plt_(NULL), got_plt_(NULL), rel_dyn_(NULL),
2031 copy_relocs_(elfcpp::R_ARM_COPY), dynbss_(NULL),
2032 got_mod_index_offset_(-1U), tls_base_symbol_defined_(false),
2033 stub_tables_(), stub_factory_(Stub_factory::get_instance()),
2034 may_use_blx_(false), should_force_pic_veneer_(false),
2035 arm_input_section_map_(), attributes_section_data_(NULL),
2036 fix_cortex_a8_(false), cortex_a8_relocs_info_()
2037 { }
2038
2039 // Whether we can use BLX.
2040 bool
2041 may_use_blx() const
2042 { return this->may_use_blx_; }
2043
2044 // Set use-BLX flag.
2045 void
2046 set_may_use_blx(bool value)
2047 { this->may_use_blx_ = value; }
2048
2049 // Whether we force PCI branch veneers.
2050 bool
2051 should_force_pic_veneer() const
2052 { return this->should_force_pic_veneer_; }
2053
2054 // Set PIC veneer flag.
2055 void
2056 set_should_force_pic_veneer(bool value)
2057 { this->should_force_pic_veneer_ = value; }
2058
2059 // Whether we use THUMB-2 instructions.
2060 bool
2061 using_thumb2() const
2062 {
2063 Object_attribute* attr =
2064 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
2065 int arch = attr->int_value();
2066 return arch == elfcpp::TAG_CPU_ARCH_V6T2 || arch >= elfcpp::TAG_CPU_ARCH_V7;
2067 }
2068
2069 // Whether we use THUMB/THUMB-2 instructions only.
2070 bool
2071 using_thumb_only() const
2072 {
2073 Object_attribute* attr =
2074 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
2075
2076 if (attr->int_value() == elfcpp::TAG_CPU_ARCH_V6_M
2077 || attr->int_value() == elfcpp::TAG_CPU_ARCH_V6S_M)
2078 return true;
2079 if (attr->int_value() != elfcpp::TAG_CPU_ARCH_V7
2080 && attr->int_value() != elfcpp::TAG_CPU_ARCH_V7E_M)
2081 return false;
2082 attr = this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
2083 return attr->int_value() == 'M';
2084 }
2085
2086 // Whether we have an NOP instruction. If not, use mov r0, r0 instead.
2087 bool
2088 may_use_arm_nop() const
2089 {
2090 Object_attribute* attr =
2091 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
2092 int arch = attr->int_value();
2093 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
2094 || arch == elfcpp::TAG_CPU_ARCH_V6K
2095 || arch == elfcpp::TAG_CPU_ARCH_V7
2096 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
2097 }
2098
2099 // Whether we have THUMB-2 NOP.W instruction.
2100 bool
2101 may_use_thumb2_nop() const
2102 {
2103 Object_attribute* attr =
2104 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
2105 int arch = attr->int_value();
2106 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
2107 || arch == elfcpp::TAG_CPU_ARCH_V7
2108 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
2109 }
2110
2111 // Process the relocations to determine unreferenced sections for
2112 // garbage collection.
2113 void
2114 gc_process_relocs(Symbol_table* symtab,
2115 Layout* layout,
2116 Sized_relobj<32, big_endian>* object,
2117 unsigned int data_shndx,
2118 unsigned int sh_type,
2119 const unsigned char* prelocs,
2120 size_t reloc_count,
2121 Output_section* output_section,
2122 bool needs_special_offset_handling,
2123 size_t local_symbol_count,
2124 const unsigned char* plocal_symbols);
2125
2126 // Scan the relocations to look for symbol adjustments.
2127 void
2128 scan_relocs(Symbol_table* symtab,
2129 Layout* layout,
2130 Sized_relobj<32, big_endian>* object,
2131 unsigned int data_shndx,
2132 unsigned int sh_type,
2133 const unsigned char* prelocs,
2134 size_t reloc_count,
2135 Output_section* output_section,
2136 bool needs_special_offset_handling,
2137 size_t local_symbol_count,
2138 const unsigned char* plocal_symbols);
2139
2140 // Finalize the sections.
2141 void
2142 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2143
2144 // Return the value to use for a dynamic symbol which requires special
2145 // treatment.
2146 uint64_t
2147 do_dynsym_value(const Symbol*) const;
2148
2149 // Relocate a section.
2150 void
2151 relocate_section(const Relocate_info<32, big_endian>*,
2152 unsigned int sh_type,
2153 const unsigned char* prelocs,
2154 size_t reloc_count,
2155 Output_section* output_section,
2156 bool needs_special_offset_handling,
2157 unsigned char* view,
2158 Arm_address view_address,
2159 section_size_type view_size,
2160 const Reloc_symbol_changes*);
2161
2162 // Scan the relocs during a relocatable link.
2163 void
2164 scan_relocatable_relocs(Symbol_table* symtab,
2165 Layout* layout,
2166 Sized_relobj<32, big_endian>* object,
2167 unsigned int data_shndx,
2168 unsigned int sh_type,
2169 const unsigned char* prelocs,
2170 size_t reloc_count,
2171 Output_section* output_section,
2172 bool needs_special_offset_handling,
2173 size_t local_symbol_count,
2174 const unsigned char* plocal_symbols,
2175 Relocatable_relocs*);
2176
2177 // Relocate a section during a relocatable link.
2178 void
2179 relocate_for_relocatable(const Relocate_info<32, big_endian>*,
2180 unsigned int sh_type,
2181 const unsigned char* prelocs,
2182 size_t reloc_count,
2183 Output_section* output_section,
2184 off_t offset_in_output_section,
2185 const Relocatable_relocs*,
2186 unsigned char* view,
2187 Arm_address view_address,
2188 section_size_type view_size,
2189 unsigned char* reloc_view,
2190 section_size_type reloc_view_size);
2191
2192 // Return whether SYM is defined by the ABI.
2193 bool
2194 do_is_defined_by_abi(Symbol* sym) const
2195 { return strcmp(sym->name(), "__tls_get_addr") == 0; }
2196
2197 // Return whether there is a GOT section.
2198 bool
2199 has_got_section() const
2200 { return this->got_ != NULL; }
2201
2202 // Return the size of the GOT section.
2203 section_size_type
2204 got_size()
2205 {
2206 gold_assert(this->got_ != NULL);
2207 return this->got_->data_size();
2208 }
2209
2210 // Map platform-specific reloc types
2211 static unsigned int
2212 get_real_reloc_type (unsigned int r_type);
2213
2214 //
2215 // Methods to support stub-generations.
2216 //
2217
2218 // Return the stub factory
2219 const Stub_factory&
2220 stub_factory() const
2221 { return this->stub_factory_; }
2222
2223 // Make a new Arm_input_section object.
2224 Arm_input_section<big_endian>*
2225 new_arm_input_section(Relobj*, unsigned int);
2226
2227 // Find the Arm_input_section object corresponding to the SHNDX-th input
2228 // section of RELOBJ.
2229 Arm_input_section<big_endian>*
2230 find_arm_input_section(Relobj* relobj, unsigned int shndx) const;
2231
2232 // Make a new Stub_table
2233 Stub_table<big_endian>*
2234 new_stub_table(Arm_input_section<big_endian>*);
2235
2236 // Scan a section for stub generation.
2237 void
2238 scan_section_for_stubs(const Relocate_info<32, big_endian>*, unsigned int,
2239 const unsigned char*, size_t, Output_section*,
2240 bool, const unsigned char*, Arm_address,
2241 section_size_type);
2242
2243 // Relocate a stub.
2244 void
2245 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
2246 Output_section*, unsigned char*, Arm_address,
2247 section_size_type);
2248
2249 // Get the default ARM target.
2250 static Target_arm<big_endian>*
2251 default_target()
2252 {
2253 gold_assert(parameters->target().machine_code() == elfcpp::EM_ARM
2254 && parameters->target().is_big_endian() == big_endian);
2255 return static_cast<Target_arm<big_endian>*>(
2256 parameters->sized_target<32, big_endian>());
2257 }
2258
2259 // Whether NAME belongs to a mapping symbol.
2260 static bool
2261 is_mapping_symbol_name(const char* name)
2262 {
2263 return (name
2264 && name[0] == '$'
2265 && (name[1] == 'a' || name[1] == 't' || name[1] == 'd')
2266 && (name[2] == '\0' || name[2] == '.'));
2267 }
2268
2269 // Whether we work around the Cortex-A8 erratum.
2270 bool
2271 fix_cortex_a8() const
2272 { return this->fix_cortex_a8_; }
2273
2274 // Whether we merge exidx entries in debuginfo.
2275 bool
2276 merge_exidx_entries() const
2277 { return parameters->options().merge_exidx_entries(); }
2278
2279 // Whether we fix R_ARM_V4BX relocation.
2280 // 0 - do not fix
2281 // 1 - replace with MOV instruction (armv4 target)
2282 // 2 - make interworking veneer (>= armv4t targets only)
2283 General_options::Fix_v4bx
2284 fix_v4bx() const
2285 { return parameters->options().fix_v4bx(); }
2286
2287 // Scan a span of THUMB code section for Cortex-A8 erratum.
2288 void
2289 scan_span_for_cortex_a8_erratum(Arm_relobj<big_endian>*, unsigned int,
2290 section_size_type, section_size_type,
2291 const unsigned char*, Arm_address);
2292
2293 // Apply Cortex-A8 workaround to a branch.
2294 void
2295 apply_cortex_a8_workaround(const Cortex_a8_stub*, Arm_address,
2296 unsigned char*, Arm_address);
2297
2298 protected:
2299 // Make an ELF object.
2300 Object*
2301 do_make_elf_object(const std::string&, Input_file*, off_t,
2302 const elfcpp::Ehdr<32, big_endian>& ehdr);
2303
2304 Object*
2305 do_make_elf_object(const std::string&, Input_file*, off_t,
2306 const elfcpp::Ehdr<32, !big_endian>&)
2307 { gold_unreachable(); }
2308
2309 Object*
2310 do_make_elf_object(const std::string&, Input_file*, off_t,
2311 const elfcpp::Ehdr<64, false>&)
2312 { gold_unreachable(); }
2313
2314 Object*
2315 do_make_elf_object(const std::string&, Input_file*, off_t,
2316 const elfcpp::Ehdr<64, true>&)
2317 { gold_unreachable(); }
2318
2319 // Make an output section.
2320 Output_section*
2321 do_make_output_section(const char* name, elfcpp::Elf_Word type,
2322 elfcpp::Elf_Xword flags)
2323 { return new Arm_output_section<big_endian>(name, type, flags); }
2324
2325 void
2326 do_adjust_elf_header(unsigned char* view, int len) const;
2327
2328 // We only need to generate stubs, and hence perform relaxation if we are
2329 // not doing relocatable linking.
2330 bool
2331 do_may_relax() const
2332 { return !parameters->options().relocatable(); }
2333
2334 bool
2335 do_relax(int, const Input_objects*, Symbol_table*, Layout*);
2336
2337 // Determine whether an object attribute tag takes an integer, a
2338 // string or both.
2339 int
2340 do_attribute_arg_type(int tag) const;
2341
2342 // Reorder tags during output.
2343 int
2344 do_attributes_order(int num) const;
2345
2346 // This is called when the target is selected as the default.
2347 void
2348 do_select_as_default_target()
2349 {
2350 // No locking is required since there should only be one default target.
2351 // We cannot have both the big-endian and little-endian ARM targets
2352 // as the default.
2353 gold_assert(arm_reloc_property_table == NULL);
2354 arm_reloc_property_table = new Arm_reloc_property_table();
2355 }
2356
2357 private:
2358 // The class which scans relocations.
2359 class Scan
2360 {
2361 public:
2362 Scan()
2363 : issued_non_pic_error_(false)
2364 { }
2365
2366 inline void
2367 local(Symbol_table* symtab, Layout* layout, Target_arm* target,
2368 Sized_relobj<32, big_endian>* object,
2369 unsigned int data_shndx,
2370 Output_section* output_section,
2371 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
2372 const elfcpp::Sym<32, big_endian>& lsym);
2373
2374 inline void
2375 global(Symbol_table* symtab, Layout* layout, Target_arm* target,
2376 Sized_relobj<32, big_endian>* object,
2377 unsigned int data_shndx,
2378 Output_section* output_section,
2379 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
2380 Symbol* gsym);
2381
2382 inline bool
2383 local_reloc_may_be_function_pointer(Symbol_table* , Layout* , Target_arm* ,
2384 Sized_relobj<32, big_endian>* ,
2385 unsigned int ,
2386 Output_section* ,
2387 const elfcpp::Rel<32, big_endian>& ,
2388 unsigned int ,
2389 const elfcpp::Sym<32, big_endian>&)
2390 { return false; }
2391
2392 inline bool
2393 global_reloc_may_be_function_pointer(Symbol_table* , Layout* , Target_arm* ,
2394 Sized_relobj<32, big_endian>* ,
2395 unsigned int ,
2396 Output_section* ,
2397 const elfcpp::Rel<32, big_endian>& ,
2398 unsigned int , Symbol*)
2399 { return false; }
2400
2401 private:
2402 static void
2403 unsupported_reloc_local(Sized_relobj<32, big_endian>*,
2404 unsigned int r_type);
2405
2406 static void
2407 unsupported_reloc_global(Sized_relobj<32, big_endian>*,
2408 unsigned int r_type, Symbol*);
2409
2410 void
2411 check_non_pic(Relobj*, unsigned int r_type);
2412
2413 // Almost identical to Symbol::needs_plt_entry except that it also
2414 // handles STT_ARM_TFUNC.
2415 static bool
2416 symbol_needs_plt_entry(const Symbol* sym)
2417 {
2418 // An undefined symbol from an executable does not need a PLT entry.
2419 if (sym->is_undefined() && !parameters->options().shared())
2420 return false;
2421
2422 return (!parameters->doing_static_link()
2423 && (sym->type() == elfcpp::STT_FUNC
2424 || sym->type() == elfcpp::STT_ARM_TFUNC)
2425 && (sym->is_from_dynobj()
2426 || sym->is_undefined()
2427 || sym->is_preemptible()));
2428 }
2429
2430 // Whether we have issued an error about a non-PIC compilation.
2431 bool issued_non_pic_error_;
2432 };
2433
2434 // The class which implements relocation.
2435 class Relocate
2436 {
2437 public:
2438 Relocate()
2439 { }
2440
2441 ~Relocate()
2442 { }
2443
2444 // Return whether the static relocation needs to be applied.
2445 inline bool
2446 should_apply_static_reloc(const Sized_symbol<32>* gsym,
2447 int ref_flags,
2448 bool is_32bit,
2449 Output_section* output_section);
2450
2451 // Do a relocation. Return false if the caller should not issue
2452 // any warnings about this relocation.
2453 inline bool
2454 relocate(const Relocate_info<32, big_endian>*, Target_arm*,
2455 Output_section*, size_t relnum,
2456 const elfcpp::Rel<32, big_endian>&,
2457 unsigned int r_type, const Sized_symbol<32>*,
2458 const Symbol_value<32>*,
2459 unsigned char*, Arm_address,
2460 section_size_type);
2461
2462 // Return whether we want to pass flag NON_PIC_REF for this
2463 // reloc. This means the relocation type accesses a symbol not via
2464 // GOT or PLT.
2465 static inline bool
2466 reloc_is_non_pic (unsigned int r_type)
2467 {
2468 switch (r_type)
2469 {
2470 // These relocation types reference GOT or PLT entries explicitly.
2471 case elfcpp::R_ARM_GOT_BREL:
2472 case elfcpp::R_ARM_GOT_ABS:
2473 case elfcpp::R_ARM_GOT_PREL:
2474 case elfcpp::R_ARM_GOT_BREL12:
2475 case elfcpp::R_ARM_PLT32_ABS:
2476 case elfcpp::R_ARM_TLS_GD32:
2477 case elfcpp::R_ARM_TLS_LDM32:
2478 case elfcpp::R_ARM_TLS_IE32:
2479 case elfcpp::R_ARM_TLS_IE12GP:
2480
2481 // These relocate types may use PLT entries.
2482 case elfcpp::R_ARM_CALL:
2483 case elfcpp::R_ARM_THM_CALL:
2484 case elfcpp::R_ARM_JUMP24:
2485 case elfcpp::R_ARM_THM_JUMP24:
2486 case elfcpp::R_ARM_THM_JUMP19:
2487 case elfcpp::R_ARM_PLT32:
2488 case elfcpp::R_ARM_THM_XPC22:
2489 case elfcpp::R_ARM_PREL31:
2490 case elfcpp::R_ARM_SBREL31:
2491 return false;
2492
2493 default:
2494 return true;
2495 }
2496 }
2497
2498 private:
2499 // Do a TLS relocation.
2500 inline typename Arm_relocate_functions<big_endian>::Status
2501 relocate_tls(const Relocate_info<32, big_endian>*, Target_arm<big_endian>*,
2502 size_t, const elfcpp::Rel<32, big_endian>&, unsigned int,
2503 const Sized_symbol<32>*, const Symbol_value<32>*,
2504 unsigned char*, elfcpp::Elf_types<32>::Elf_Addr,
2505 section_size_type);
2506
2507 };
2508
2509 // A class which returns the size required for a relocation type,
2510 // used while scanning relocs during a relocatable link.
2511 class Relocatable_size_for_reloc
2512 {
2513 public:
2514 unsigned int
2515 get_size_for_reloc(unsigned int, Relobj*);
2516 };
2517
2518 // Adjust TLS relocation type based on the options and whether this
2519 // is a local symbol.
2520 static tls::Tls_optimization
2521 optimize_tls_reloc(bool is_final, int r_type);
2522
2523 // Get the GOT section, creating it if necessary.
2524 Arm_output_data_got<big_endian>*
2525 got_section(Symbol_table*, Layout*);
2526
2527 // Get the GOT PLT section.
2528 Output_data_space*
2529 got_plt_section() const
2530 {
2531 gold_assert(this->got_plt_ != NULL);
2532 return this->got_plt_;
2533 }
2534
2535 // Create a PLT entry for a global symbol.
2536 void
2537 make_plt_entry(Symbol_table*, Layout*, Symbol*);
2538
2539 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
2540 void
2541 define_tls_base_symbol(Symbol_table*, Layout*);
2542
2543 // Create a GOT entry for the TLS module index.
2544 unsigned int
2545 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
2546 Sized_relobj<32, big_endian>* object);
2547
2548 // Get the PLT section.
2549 const Output_data_plt_arm<big_endian>*
2550 plt_section() const
2551 {
2552 gold_assert(this->plt_ != NULL);
2553 return this->plt_;
2554 }
2555
2556 // Get the dynamic reloc section, creating it if necessary.
2557 Reloc_section*
2558 rel_dyn_section(Layout*);
2559
2560 // Get the section to use for TLS_DESC relocations.
2561 Reloc_section*
2562 rel_tls_desc_section(Layout*) const;
2563
2564 // Return true if the symbol may need a COPY relocation.
2565 // References from an executable object to non-function symbols
2566 // defined in a dynamic object may need a COPY relocation.
2567 bool
2568 may_need_copy_reloc(Symbol* gsym)
2569 {
2570 return (gsym->type() != elfcpp::STT_ARM_TFUNC
2571 && gsym->may_need_copy_reloc());
2572 }
2573
2574 // Add a potential copy relocation.
2575 void
2576 copy_reloc(Symbol_table* symtab, Layout* layout,
2577 Sized_relobj<32, big_endian>* object,
2578 unsigned int shndx, Output_section* output_section,
2579 Symbol* sym, const elfcpp::Rel<32, big_endian>& reloc)
2580 {
2581 this->copy_relocs_.copy_reloc(symtab, layout,
2582 symtab->get_sized_symbol<32>(sym),
2583 object, shndx, output_section, reloc,
2584 this->rel_dyn_section(layout));
2585 }
2586
2587 // Whether two EABI versions are compatible.
2588 static bool
2589 are_eabi_versions_compatible(elfcpp::Elf_Word v1, elfcpp::Elf_Word v2);
2590
2591 // Merge processor-specific flags from input object and those in the ELF
2592 // header of the output.
2593 void
2594 merge_processor_specific_flags(const std::string&, elfcpp::Elf_Word);
2595
2596 // Get the secondary compatible architecture.
2597 static int
2598 get_secondary_compatible_arch(const Attributes_section_data*);
2599
2600 // Set the secondary compatible architecture.
2601 static void
2602 set_secondary_compatible_arch(Attributes_section_data*, int);
2603
2604 static int
2605 tag_cpu_arch_combine(const char*, int, int*, int, int);
2606
2607 // Helper to print AEABI enum tag value.
2608 static std::string
2609 aeabi_enum_name(unsigned int);
2610
2611 // Return string value for TAG_CPU_name.
2612 static std::string
2613 tag_cpu_name_value(unsigned int);
2614
2615 // Merge object attributes from input object and those in the output.
2616 void
2617 merge_object_attributes(const char*, const Attributes_section_data*);
2618
2619 // Helper to get an AEABI object attribute
2620 Object_attribute*
2621 get_aeabi_object_attribute(int tag) const
2622 {
2623 Attributes_section_data* pasd = this->attributes_section_data_;
2624 gold_assert(pasd != NULL);
2625 Object_attribute* attr =
2626 pasd->get_attribute(Object_attribute::OBJ_ATTR_PROC, tag);
2627 gold_assert(attr != NULL);
2628 return attr;
2629 }
2630
2631 //
2632 // Methods to support stub-generations.
2633 //
2634
2635 // Group input sections for stub generation.
2636 void
2637 group_sections(Layout*, section_size_type, bool);
2638
2639 // Scan a relocation for stub generation.
2640 void
2641 scan_reloc_for_stub(const Relocate_info<32, big_endian>*, unsigned int,
2642 const Sized_symbol<32>*, unsigned int,
2643 const Symbol_value<32>*,
2644 elfcpp::Elf_types<32>::Elf_Swxword, Arm_address);
2645
2646 // Scan a relocation section for stub.
2647 template<int sh_type>
2648 void
2649 scan_reloc_section_for_stubs(
2650 const Relocate_info<32, big_endian>* relinfo,
2651 const unsigned char* prelocs,
2652 size_t reloc_count,
2653 Output_section* output_section,
2654 bool needs_special_offset_handling,
2655 const unsigned char* view,
2656 elfcpp::Elf_types<32>::Elf_Addr view_address,
2657 section_size_type);
2658
2659 // Fix .ARM.exidx section coverage.
2660 void
2661 fix_exidx_coverage(Layout*, Arm_output_section<big_endian>*, Symbol_table*);
2662
2663 // Functors for STL set.
2664 struct output_section_address_less_than
2665 {
2666 bool
2667 operator()(const Output_section* s1, const Output_section* s2) const
2668 { return s1->address() < s2->address(); }
2669 };
2670
2671 // Information about this specific target which we pass to the
2672 // general Target structure.
2673 static const Target::Target_info arm_info;
2674
2675 // The types of GOT entries needed for this platform.
2676 enum Got_type
2677 {
2678 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
2679 GOT_TYPE_TLS_NOFFSET = 1, // GOT entry for negative TLS offset
2680 GOT_TYPE_TLS_OFFSET = 2, // GOT entry for positive TLS offset
2681 GOT_TYPE_TLS_PAIR = 3, // GOT entry for TLS module/offset pair
2682 GOT_TYPE_TLS_DESC = 4 // GOT entry for TLS_DESC pair
2683 };
2684
2685 typedef typename std::vector<Stub_table<big_endian>*> Stub_table_list;
2686
2687 // Map input section to Arm_input_section.
2688 typedef Unordered_map<Section_id,
2689 Arm_input_section<big_endian>*,
2690 Section_id_hash>
2691 Arm_input_section_map;
2692
2693 // Map output addresses to relocs for Cortex-A8 erratum.
2694 typedef Unordered_map<Arm_address, const Cortex_a8_reloc*>
2695 Cortex_a8_relocs_info;
2696
2697 // The GOT section.
2698 Arm_output_data_got<big_endian>* got_;
2699 // The PLT section.
2700 Output_data_plt_arm<big_endian>* plt_;
2701 // The GOT PLT section.
2702 Output_data_space* got_plt_;
2703 // The dynamic reloc section.
2704 Reloc_section* rel_dyn_;
2705 // Relocs saved to avoid a COPY reloc.
2706 Copy_relocs<elfcpp::SHT_REL, 32, big_endian> copy_relocs_;
2707 // Space for variables copied with a COPY reloc.
2708 Output_data_space* dynbss_;
2709 // Offset of the GOT entry for the TLS module index.
2710 unsigned int got_mod_index_offset_;
2711 // True if the _TLS_MODULE_BASE_ symbol has been defined.
2712 bool tls_base_symbol_defined_;
2713 // Vector of Stub_tables created.
2714 Stub_table_list stub_tables_;
2715 // Stub factory.
2716 const Stub_factory &stub_factory_;
2717 // Whether we can use BLX.
2718 bool may_use_blx_;
2719 // Whether we force PIC branch veneers.
2720 bool should_force_pic_veneer_;
2721 // Map for locating Arm_input_sections.
2722 Arm_input_section_map arm_input_section_map_;
2723 // Attributes section data in output.
2724 Attributes_section_data* attributes_section_data_;
2725 // Whether we want to fix code for Cortex-A8 erratum.
2726 bool fix_cortex_a8_;
2727 // Map addresses to relocs for Cortex-A8 erratum.
2728 Cortex_a8_relocs_info cortex_a8_relocs_info_;
2729 };
2730
2731 template<bool big_endian>
2732 const Target::Target_info Target_arm<big_endian>::arm_info =
2733 {
2734 32, // size
2735 big_endian, // is_big_endian
2736 elfcpp::EM_ARM, // machine_code
2737 false, // has_make_symbol
2738 false, // has_resolve
2739 false, // has_code_fill
2740 true, // is_default_stack_executable
2741 '\0', // wrap_char
2742 "/usr/lib/libc.so.1", // dynamic_linker
2743 0x8000, // default_text_segment_address
2744 0x1000, // abi_pagesize (overridable by -z max-page-size)
2745 0x1000, // common_pagesize (overridable by -z common-page-size)
2746 elfcpp::SHN_UNDEF, // small_common_shndx
2747 elfcpp::SHN_UNDEF, // large_common_shndx
2748 0, // small_common_section_flags
2749 0, // large_common_section_flags
2750 ".ARM.attributes", // attributes_section
2751 "aeabi" // attributes_vendor
2752 };
2753
2754 // Arm relocate functions class
2755 //
2756
2757 template<bool big_endian>
2758 class Arm_relocate_functions : public Relocate_functions<32, big_endian>
2759 {
2760 public:
2761 typedef enum
2762 {
2763 STATUS_OKAY, // No error during relocation.
2764 STATUS_OVERFLOW, // Relocation oveflow.
2765 STATUS_BAD_RELOC // Relocation cannot be applied.
2766 } Status;
2767
2768 private:
2769 typedef Relocate_functions<32, big_endian> Base;
2770 typedef Arm_relocate_functions<big_endian> This;
2771
2772 // Encoding of imm16 argument for movt and movw ARM instructions
2773 // from ARM ARM:
2774 //
2775 // imm16 := imm4 | imm12
2776 //
2777 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2778 // +-------+---------------+-------+-------+-----------------------+
2779 // | | |imm4 | |imm12 |
2780 // +-------+---------------+-------+-------+-----------------------+
2781
2782 // Extract the relocation addend from VAL based on the ARM
2783 // instruction encoding described above.
2784 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2785 extract_arm_movw_movt_addend(
2786 typename elfcpp::Swap<32, big_endian>::Valtype val)
2787 {
2788 // According to the Elf ABI for ARM Architecture the immediate
2789 // field is sign-extended to form the addend.
2790 return utils::sign_extend<16>(((val >> 4) & 0xf000) | (val & 0xfff));
2791 }
2792
2793 // Insert X into VAL based on the ARM instruction encoding described
2794 // above.
2795 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2796 insert_val_arm_movw_movt(
2797 typename elfcpp::Swap<32, big_endian>::Valtype val,
2798 typename elfcpp::Swap<32, big_endian>::Valtype x)
2799 {
2800 val &= 0xfff0f000;
2801 val |= x & 0x0fff;
2802 val |= (x & 0xf000) << 4;
2803 return val;
2804 }
2805
2806 // Encoding of imm16 argument for movt and movw Thumb2 instructions
2807 // from ARM ARM:
2808 //
2809 // imm16 := imm4 | i | imm3 | imm8
2810 //
2811 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2812 // +---------+-+-----------+-------++-+-----+-------+---------------+
2813 // | |i| |imm4 || |imm3 | |imm8 |
2814 // +---------+-+-----------+-------++-+-----+-------+---------------+
2815
2816 // Extract the relocation addend from VAL based on the Thumb2
2817 // instruction encoding described above.
2818 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2819 extract_thumb_movw_movt_addend(
2820 typename elfcpp::Swap<32, big_endian>::Valtype val)
2821 {
2822 // According to the Elf ABI for ARM Architecture the immediate
2823 // field is sign-extended to form the addend.
2824 return utils::sign_extend<16>(((val >> 4) & 0xf000)
2825 | ((val >> 15) & 0x0800)
2826 | ((val >> 4) & 0x0700)
2827 | (val & 0x00ff));
2828 }
2829
2830 // Insert X into VAL based on the Thumb2 instruction encoding
2831 // described above.
2832 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2833 insert_val_thumb_movw_movt(
2834 typename elfcpp::Swap<32, big_endian>::Valtype val,
2835 typename elfcpp::Swap<32, big_endian>::Valtype x)
2836 {
2837 val &= 0xfbf08f00;
2838 val |= (x & 0xf000) << 4;
2839 val |= (x & 0x0800) << 15;
2840 val |= (x & 0x0700) << 4;
2841 val |= (x & 0x00ff);
2842 return val;
2843 }
2844
2845 // Calculate the smallest constant Kn for the specified residual.
2846 // (see (AAELF 4.6.1.4 Static ARM relocations, Group Relocations, p.32)
2847 static uint32_t
2848 calc_grp_kn(typename elfcpp::Swap<32, big_endian>::Valtype residual)
2849 {
2850 int32_t msb;
2851
2852 if (residual == 0)
2853 return 0;
2854 // Determine the most significant bit in the residual and
2855 // align the resulting value to a 2-bit boundary.
2856 for (msb = 30; (msb >= 0) && !(residual & (3 << msb)); msb -= 2)
2857 ;
2858 // The desired shift is now (msb - 6), or zero, whichever
2859 // is the greater.
2860 return (((msb - 6) < 0) ? 0 : (msb - 6));
2861 }
2862
2863 // Calculate the final residual for the specified group index.
2864 // If the passed group index is less than zero, the method will return
2865 // the value of the specified residual without any change.
2866 // (see (AAELF 4.6.1.4 Static ARM relocations, Group Relocations, p.32)
2867 static typename elfcpp::Swap<32, big_endian>::Valtype
2868 calc_grp_residual(typename elfcpp::Swap<32, big_endian>::Valtype residual,
2869 const int group)
2870 {
2871 for (int n = 0; n <= group; n++)
2872 {
2873 // Calculate which part of the value to mask.
2874 uint32_t shift = calc_grp_kn(residual);
2875 // Calculate the residual for the next time around.
2876 residual &= ~(residual & (0xff << shift));
2877 }
2878
2879 return residual;
2880 }
2881
2882 // Calculate the value of Gn for the specified group index.
2883 // We return it in the form of an encoded constant-and-rotation.
2884 // (see (AAELF 4.6.1.4 Static ARM relocations, Group Relocations, p.32)
2885 static typename elfcpp::Swap<32, big_endian>::Valtype
2886 calc_grp_gn(typename elfcpp::Swap<32, big_endian>::Valtype residual,
2887 const int group)
2888 {
2889 typename elfcpp::Swap<32, big_endian>::Valtype gn = 0;
2890 uint32_t shift = 0;
2891
2892 for (int n = 0; n <= group; n++)
2893 {
2894 // Calculate which part of the value to mask.
2895 shift = calc_grp_kn(residual);
2896 // Calculate Gn in 32-bit as well as encoded constant-and-rotation form.
2897 gn = residual & (0xff << shift);
2898 // Calculate the residual for the next time around.
2899 residual &= ~gn;
2900 }
2901 // Return Gn in the form of an encoded constant-and-rotation.
2902 return ((gn >> shift) | ((gn <= 0xff ? 0 : (32 - shift) / 2) << 8));
2903 }
2904
2905 public:
2906 // Handle ARM long branches.
2907 static typename This::Status
2908 arm_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2909 unsigned char *, const Sized_symbol<32>*,
2910 const Arm_relobj<big_endian>*, unsigned int,
2911 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2912
2913 // Handle THUMB long branches.
2914 static typename This::Status
2915 thumb_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2916 unsigned char *, const Sized_symbol<32>*,
2917 const Arm_relobj<big_endian>*, unsigned int,
2918 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2919
2920
2921 // Return the branch offset of a 32-bit THUMB branch.
2922 static inline int32_t
2923 thumb32_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2924 {
2925 // We use the Thumb-2 encoding (backwards compatible with Thumb-1)
2926 // involving the J1 and J2 bits.
2927 uint32_t s = (upper_insn & (1U << 10)) >> 10;
2928 uint32_t upper = upper_insn & 0x3ffU;
2929 uint32_t lower = lower_insn & 0x7ffU;
2930 uint32_t j1 = (lower_insn & (1U << 13)) >> 13;
2931 uint32_t j2 = (lower_insn & (1U << 11)) >> 11;
2932 uint32_t i1 = j1 ^ s ? 0 : 1;
2933 uint32_t i2 = j2 ^ s ? 0 : 1;
2934
2935 return utils::sign_extend<25>((s << 24) | (i1 << 23) | (i2 << 22)
2936 | (upper << 12) | (lower << 1));
2937 }
2938
2939 // Insert OFFSET to a 32-bit THUMB branch and return the upper instruction.
2940 // UPPER_INSN is the original upper instruction of the branch. Caller is
2941 // responsible for overflow checking and BLX offset adjustment.
2942 static inline uint16_t
2943 thumb32_branch_upper(uint16_t upper_insn, int32_t offset)
2944 {
2945 uint32_t s = offset < 0 ? 1 : 0;
2946 uint32_t bits = static_cast<uint32_t>(offset);
2947 return (upper_insn & ~0x7ffU) | ((bits >> 12) & 0x3ffU) | (s << 10);
2948 }
2949
2950 // Insert OFFSET to a 32-bit THUMB branch and return the lower instruction.
2951 // LOWER_INSN is the original lower instruction of the branch. Caller is
2952 // responsible for overflow checking and BLX offset adjustment.
2953 static inline uint16_t
2954 thumb32_branch_lower(uint16_t lower_insn, int32_t offset)
2955 {
2956 uint32_t s = offset < 0 ? 1 : 0;
2957 uint32_t bits = static_cast<uint32_t>(offset);
2958 return ((lower_insn & ~0x2fffU)
2959 | ((((bits >> 23) & 1) ^ !s) << 13)
2960 | ((((bits >> 22) & 1) ^ !s) << 11)
2961 | ((bits >> 1) & 0x7ffU));
2962 }
2963
2964 // Return the branch offset of a 32-bit THUMB conditional branch.
2965 static inline int32_t
2966 thumb32_cond_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2967 {
2968 uint32_t s = (upper_insn & 0x0400U) >> 10;
2969 uint32_t j1 = (lower_insn & 0x2000U) >> 13;
2970 uint32_t j2 = (lower_insn & 0x0800U) >> 11;
2971 uint32_t lower = (lower_insn & 0x07ffU);
2972 uint32_t upper = (s << 8) | (j2 << 7) | (j1 << 6) | (upper_insn & 0x003fU);
2973
2974 return utils::sign_extend<21>((upper << 12) | (lower << 1));
2975 }
2976
2977 // Insert OFFSET to a 32-bit THUMB conditional branch and return the upper
2978 // instruction. UPPER_INSN is the original upper instruction of the branch.
2979 // Caller is responsible for overflow checking.
2980 static inline uint16_t
2981 thumb32_cond_branch_upper(uint16_t upper_insn, int32_t offset)
2982 {
2983 uint32_t s = offset < 0 ? 1 : 0;
2984 uint32_t bits = static_cast<uint32_t>(offset);
2985 return (upper_insn & 0xfbc0U) | (s << 10) | ((bits & 0x0003f000U) >> 12);
2986 }
2987
2988 // Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
2989 // instruction. LOWER_INSN is the original lower instruction of the branch.
2990 // Caller is reponsible for overflow checking.
2991 static inline uint16_t
2992 thumb32_cond_branch_lower(uint16_t lower_insn, int32_t offset)
2993 {
2994 uint32_t bits = static_cast<uint32_t>(offset);
2995 uint32_t j2 = (bits & 0x00080000U) >> 19;
2996 uint32_t j1 = (bits & 0x00040000U) >> 18;
2997 uint32_t lo = (bits & 0x00000ffeU) >> 1;
2998
2999 return (lower_insn & 0xd000U) | (j1 << 13) | (j2 << 11) | lo;
3000 }
3001
3002 // R_ARM_ABS8: S + A
3003 static inline typename This::Status
3004 abs8(unsigned char *view,
3005 const Sized_relobj<32, big_endian>* object,
3006 const Symbol_value<32>* psymval)
3007 {
3008 typedef typename elfcpp::Swap<8, big_endian>::Valtype Valtype;
3009 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3010 Valtype* wv = reinterpret_cast<Valtype*>(view);
3011 Valtype val = elfcpp::Swap<8, big_endian>::readval(wv);
3012 Reltype addend = utils::sign_extend<8>(val);
3013 Reltype x = psymval->value(object, addend);
3014 val = utils::bit_select(val, x, 0xffU);
3015 elfcpp::Swap<8, big_endian>::writeval(wv, val);
3016
3017 // R_ARM_ABS8 permits signed or unsigned results.
3018 int signed_x = static_cast<int32_t>(x);
3019 return ((signed_x < -128 || signed_x > 255)
3020 ? This::STATUS_OVERFLOW
3021 : This::STATUS_OKAY);
3022 }
3023
3024 // R_ARM_THM_ABS5: S + A
3025 static inline typename This::Status
3026 thm_abs5(unsigned char *view,
3027 const Sized_relobj<32, big_endian>* object,
3028 const Symbol_value<32>* psymval)
3029 {
3030 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3031 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3032 Valtype* wv = reinterpret_cast<Valtype*>(view);
3033 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
3034 Reltype addend = (val & 0x7e0U) >> 6;
3035 Reltype x = psymval->value(object, addend);
3036 val = utils::bit_select(val, x << 6, 0x7e0U);
3037 elfcpp::Swap<16, big_endian>::writeval(wv, val);
3038
3039 // R_ARM_ABS16 permits signed or unsigned results.
3040 int signed_x = static_cast<int32_t>(x);
3041 return ((signed_x < -32768 || signed_x > 65535)
3042 ? This::STATUS_OVERFLOW
3043 : This::STATUS_OKAY);
3044 }
3045
3046 // R_ARM_ABS12: S + A
3047 static inline typename This::Status
3048 abs12(unsigned char *view,
3049 const Sized_relobj<32, big_endian>* object,
3050 const Symbol_value<32>* psymval)
3051 {
3052 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3053 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3054 Valtype* wv = reinterpret_cast<Valtype*>(view);
3055 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3056 Reltype addend = val & 0x0fffU;
3057 Reltype x = psymval->value(object, addend);
3058 val = utils::bit_select(val, x, 0x0fffU);
3059 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3060 return (utils::has_overflow<12>(x)
3061 ? This::STATUS_OVERFLOW
3062 : This::STATUS_OKAY);
3063 }
3064
3065 // R_ARM_ABS16: S + A
3066 static inline typename This::Status
3067 abs16(unsigned char *view,
3068 const Sized_relobj<32, big_endian>* object,
3069 const Symbol_value<32>* psymval)
3070 {
3071 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3072 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3073 Valtype* wv = reinterpret_cast<Valtype*>(view);
3074 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
3075 Reltype addend = utils::sign_extend<16>(val);
3076 Reltype x = psymval->value(object, addend);
3077 val = utils::bit_select(val, x, 0xffffU);
3078 elfcpp::Swap<16, big_endian>::writeval(wv, val);
3079 return (utils::has_signed_unsigned_overflow<16>(x)
3080 ? This::STATUS_OVERFLOW
3081 : This::STATUS_OKAY);
3082 }
3083
3084 // R_ARM_ABS32: (S + A) | T
3085 static inline typename This::Status
3086 abs32(unsigned char *view,
3087 const Sized_relobj<32, big_endian>* object,
3088 const Symbol_value<32>* psymval,
3089 Arm_address thumb_bit)
3090 {
3091 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3092 Valtype* wv = reinterpret_cast<Valtype*>(view);
3093 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
3094 Valtype x = psymval->value(object, addend) | thumb_bit;
3095 elfcpp::Swap<32, big_endian>::writeval(wv, x);
3096 return This::STATUS_OKAY;
3097 }
3098
3099 // R_ARM_REL32: (S + A) | T - P
3100 static inline typename This::Status
3101 rel32(unsigned char *view,
3102 const Sized_relobj<32, big_endian>* object,
3103 const Symbol_value<32>* psymval,
3104 Arm_address address,
3105 Arm_address thumb_bit)
3106 {
3107 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3108 Valtype* wv = reinterpret_cast<Valtype*>(view);
3109 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
3110 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
3111 elfcpp::Swap<32, big_endian>::writeval(wv, x);
3112 return This::STATUS_OKAY;
3113 }
3114
3115 // R_ARM_THM_JUMP24: (S + A) | T - P
3116 static typename This::Status
3117 thm_jump19(unsigned char *view, const Arm_relobj<big_endian>* object,
3118 const Symbol_value<32>* psymval, Arm_address address,
3119 Arm_address thumb_bit);
3120
3121 // R_ARM_THM_JUMP6: S + A – P
3122 static inline typename This::Status
3123 thm_jump6(unsigned char *view,
3124 const Sized_relobj<32, big_endian>* object,
3125 const Symbol_value<32>* psymval,
3126 Arm_address address)
3127 {
3128 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3129 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
3130 Valtype* wv = reinterpret_cast<Valtype*>(view);
3131 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
3132 // bit[9]:bit[7:3]:’0’ (mask: 0x02f8)
3133 Reltype addend = (((val & 0x0200) >> 3) | ((val & 0x00f8) >> 2));
3134 Reltype x = (psymval->value(object, addend) - address);
3135 val = (val & 0xfd07) | ((x & 0x0040) << 3) | ((val & 0x003e) << 2);
3136 elfcpp::Swap<16, big_endian>::writeval(wv, val);
3137 // CZB does only forward jumps.
3138 return ((x > 0x007e)
3139 ? This::STATUS_OVERFLOW
3140 : This::STATUS_OKAY);
3141 }
3142
3143 // R_ARM_THM_JUMP8: S + A – P
3144 static inline typename This::Status
3145 thm_jump8(unsigned char *view,
3146 const Sized_relobj<32, big_endian>* object,
3147 const Symbol_value<32>* psymval,
3148 Arm_address address)
3149 {
3150 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3151 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
3152 Valtype* wv = reinterpret_cast<Valtype*>(view);
3153 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
3154 Reltype addend = utils::sign_extend<8>((val & 0x00ff) << 1);
3155 Reltype x = (psymval->value(object, addend) - address);
3156 elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xff00) | ((x & 0x01fe) >> 1));
3157 return (utils::has_overflow<8>(x)
3158 ? This::STATUS_OVERFLOW
3159 : This::STATUS_OKAY);
3160 }
3161
3162 // R_ARM_THM_JUMP11: S + A – P
3163 static inline typename This::Status
3164 thm_jump11(unsigned char *view,
3165 const Sized_relobj<32, big_endian>* object,
3166 const Symbol_value<32>* psymval,
3167 Arm_address address)
3168 {
3169 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3170 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
3171 Valtype* wv = reinterpret_cast<Valtype*>(view);
3172 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
3173 Reltype addend = utils::sign_extend<11>((val & 0x07ff) << 1);
3174 Reltype x = (psymval->value(object, addend) - address);
3175 elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xf800) | ((x & 0x0ffe) >> 1));
3176 return (utils::has_overflow<11>(x)
3177 ? This::STATUS_OVERFLOW
3178 : This::STATUS_OKAY);
3179 }
3180
3181 // R_ARM_BASE_PREL: B(S) + A - P
3182 static inline typename This::Status
3183 base_prel(unsigned char* view,
3184 Arm_address origin,
3185 Arm_address address)
3186 {
3187 Base::rel32(view, origin - address);
3188 return STATUS_OKAY;
3189 }
3190
3191 // R_ARM_BASE_ABS: B(S) + A
3192 static inline typename This::Status
3193 base_abs(unsigned char* view,
3194 Arm_address origin)
3195 {
3196 Base::rel32(view, origin);
3197 return STATUS_OKAY;
3198 }
3199
3200 // R_ARM_GOT_BREL: GOT(S) + A - GOT_ORG
3201 static inline typename This::Status
3202 got_brel(unsigned char* view,
3203 typename elfcpp::Swap<32, big_endian>::Valtype got_offset)
3204 {
3205 Base::rel32(view, got_offset);
3206 return This::STATUS_OKAY;
3207 }
3208
3209 // R_ARM_GOT_PREL: GOT(S) + A - P
3210 static inline typename This::Status
3211 got_prel(unsigned char *view,
3212 Arm_address got_entry,
3213 Arm_address address)
3214 {
3215 Base::rel32(view, got_entry - address);
3216 return This::STATUS_OKAY;
3217 }
3218
3219 // R_ARM_PREL: (S + A) | T - P
3220 static inline typename This::Status
3221 prel31(unsigned char *view,
3222 const Sized_relobj<32, big_endian>* object,
3223 const Symbol_value<32>* psymval,
3224 Arm_address address,
3225 Arm_address thumb_bit)
3226 {
3227 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3228 Valtype* wv = reinterpret_cast<Valtype*>(view);
3229 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3230 Valtype addend = utils::sign_extend<31>(val);
3231 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
3232 val = utils::bit_select(val, x, 0x7fffffffU);
3233 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3234 return (utils::has_overflow<31>(x) ?
3235 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3236 }
3237
3238 // R_ARM_MOVW_ABS_NC: (S + A) | T (relative address base is )
3239 // R_ARM_MOVW_PREL_NC: (S + A) | T - P
3240 // R_ARM_MOVW_BREL_NC: ((S + A) | T) - B(S)
3241 // R_ARM_MOVW_BREL: ((S + A) | T) - B(S)
3242 static inline typename This::Status
3243 movw(unsigned char* view,
3244 const Sized_relobj<32, big_endian>* object,
3245 const Symbol_value<32>* psymval,
3246 Arm_address relative_address_base,
3247 Arm_address thumb_bit,
3248 bool check_overflow)
3249 {
3250 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3251 Valtype* wv = reinterpret_cast<Valtype*>(view);
3252 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3253 Valtype addend = This::extract_arm_movw_movt_addend(val);
3254 Valtype x = ((psymval->value(object, addend) | thumb_bit)
3255 - relative_address_base);
3256 val = This::insert_val_arm_movw_movt(val, x);
3257 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3258 return ((check_overflow && utils::has_overflow<16>(x))
3259 ? This::STATUS_OVERFLOW
3260 : This::STATUS_OKAY);
3261 }
3262
3263 // R_ARM_MOVT_ABS: S + A (relative address base is 0)
3264 // R_ARM_MOVT_PREL: S + A - P
3265 // R_ARM_MOVT_BREL: S + A - B(S)
3266 static inline typename This::Status
3267 movt(unsigned char* view,
3268 const Sized_relobj<32, big_endian>* object,
3269 const Symbol_value<32>* psymval,
3270 Arm_address relative_address_base)
3271 {
3272 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3273 Valtype* wv = reinterpret_cast<Valtype*>(view);
3274 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3275 Valtype addend = This::extract_arm_movw_movt_addend(val);
3276 Valtype x = (psymval->value(object, addend) - relative_address_base) >> 16;
3277 val = This::insert_val_arm_movw_movt(val, x);
3278 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3279 // FIXME: IHI0044D says that we should check for overflow.
3280 return This::STATUS_OKAY;
3281 }
3282
3283 // R_ARM_THM_MOVW_ABS_NC: S + A | T (relative_address_base is 0)
3284 // R_ARM_THM_MOVW_PREL_NC: (S + A) | T - P
3285 // R_ARM_THM_MOVW_BREL_NC: ((S + A) | T) - B(S)
3286 // R_ARM_THM_MOVW_BREL: ((S + A) | T) - B(S)
3287 static inline typename This::Status
3288 thm_movw(unsigned char *view,
3289 const Sized_relobj<32, big_endian>* object,
3290 const Symbol_value<32>* psymval,
3291 Arm_address relative_address_base,
3292 Arm_address thumb_bit,
3293 bool check_overflow)
3294 {
3295 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3296 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3297 Valtype* wv = reinterpret_cast<Valtype*>(view);
3298 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3299 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3300 Reltype addend = This::extract_thumb_movw_movt_addend(val);
3301 Reltype x =
3302 (psymval->value(object, addend) | thumb_bit) - relative_address_base;
3303 val = This::insert_val_thumb_movw_movt(val, x);
3304 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3305 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3306 return ((check_overflow && utils::has_overflow<16>(x))
3307 ? This::STATUS_OVERFLOW
3308 : This::STATUS_OKAY);
3309 }
3310
3311 // R_ARM_THM_MOVT_ABS: S + A (relative address base is 0)
3312 // R_ARM_THM_MOVT_PREL: S + A - P
3313 // R_ARM_THM_MOVT_BREL: S + A - B(S)
3314 static inline typename This::Status
3315 thm_movt(unsigned char* view,
3316 const Sized_relobj<32, big_endian>* object,
3317 const Symbol_value<32>* psymval,
3318 Arm_address relative_address_base)
3319 {
3320 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3321 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3322 Valtype* wv = reinterpret_cast<Valtype*>(view);
3323 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3324 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3325 Reltype addend = This::extract_thumb_movw_movt_addend(val);
3326 Reltype x = (psymval->value(object, addend) - relative_address_base) >> 16;
3327 val = This::insert_val_thumb_movw_movt(val, x);
3328 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3329 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3330 return This::STATUS_OKAY;
3331 }
3332
3333 // R_ARM_THM_ALU_PREL_11_0: ((S + A) | T) - Pa (Thumb32)
3334 static inline typename This::Status
3335 thm_alu11(unsigned char* view,
3336 const Sized_relobj<32, big_endian>* object,
3337 const Symbol_value<32>* psymval,
3338 Arm_address address,
3339 Arm_address thumb_bit)
3340 {
3341 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3342 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3343 Valtype* wv = reinterpret_cast<Valtype*>(view);
3344 Reltype insn = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3345 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3346
3347 // f e d c b|a|9|8 7 6 5|4|3 2 1 0||f|e d c|b a 9 8|7 6 5 4 3 2 1 0
3348 // -----------------------------------------------------------------------
3349 // ADD{S} 1 1 1 1 0|i|0|1 0 0 0|S|1 1 0 1||0|imm3 |Rd |imm8
3350 // ADDW 1 1 1 1 0|i|1|0 0 0 0|0|1 1 0 1||0|imm3 |Rd |imm8
3351 // ADR[+] 1 1 1 1 0|i|1|0 0 0 0|0|1 1 1 1||0|imm3 |Rd |imm8
3352 // SUB{S} 1 1 1 1 0|i|0|1 1 0 1|S|1 1 0 1||0|imm3 |Rd |imm8
3353 // SUBW 1 1 1 1 0|i|1|0 1 0 1|0|1 1 0 1||0|imm3 |Rd |imm8
3354 // ADR[-] 1 1 1 1 0|i|1|0 1 0 1|0|1 1 1 1||0|imm3 |Rd |imm8
3355
3356 // Determine a sign for the addend.
3357 const int sign = ((insn & 0xf8ef0000) == 0xf0ad0000
3358 || (insn & 0xf8ef0000) == 0xf0af0000) ? -1 : 1;
3359 // Thumb2 addend encoding:
3360 // imm12 := i | imm3 | imm8
3361 int32_t addend = (insn & 0xff)
3362 | ((insn & 0x00007000) >> 4)
3363 | ((insn & 0x04000000) >> 15);
3364 // Apply a sign to the added.
3365 addend *= sign;
3366
3367 int32_t x = (psymval->value(object, addend) | thumb_bit)
3368 - (address & 0xfffffffc);
3369 Reltype val = abs(x);
3370 // Mask out the value and a distinct part of the ADD/SUB opcode
3371 // (bits 7:5 of opword).
3372 insn = (insn & 0xfb0f8f00)
3373 | (val & 0xff)
3374 | ((val & 0x700) << 4)
3375 | ((val & 0x800) << 15);
3376 // Set the opcode according to whether the value to go in the
3377 // place is negative.
3378 if (x < 0)
3379 insn |= 0x00a00000;
3380
3381 elfcpp::Swap<16, big_endian>::writeval(wv, insn >> 16);
3382 elfcpp::Swap<16, big_endian>::writeval(wv + 1, insn & 0xffff);
3383 return ((val > 0xfff) ?
3384 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3385 }
3386
3387 // R_ARM_THM_PC8: S + A - Pa (Thumb)
3388 static inline typename This::Status
3389 thm_pc8(unsigned char* view,
3390 const Sized_relobj<32, big_endian>* object,
3391 const Symbol_value<32>* psymval,
3392 Arm_address address)
3393 {
3394 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3395 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
3396 Valtype* wv = reinterpret_cast<Valtype*>(view);
3397 Valtype insn = elfcpp::Swap<16, big_endian>::readval(wv);
3398 Reltype addend = ((insn & 0x00ff) << 2);
3399 int32_t x = (psymval->value(object, addend) - (address & 0xfffffffc));
3400 Reltype val = abs(x);
3401 insn = (insn & 0xff00) | ((val & 0x03fc) >> 2);
3402
3403 elfcpp::Swap<16, big_endian>::writeval(wv, insn);
3404 return ((val > 0x03fc)
3405 ? This::STATUS_OVERFLOW
3406 : This::STATUS_OKAY);
3407 }
3408
3409 // R_ARM_THM_PC12: S + A - Pa (Thumb32)
3410 static inline typename This::Status
3411 thm_pc12(unsigned char* view,
3412 const Sized_relobj<32, big_endian>* object,
3413 const Symbol_value<32>* psymval,
3414 Arm_address address)
3415 {
3416 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3417 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3418 Valtype* wv = reinterpret_cast<Valtype*>(view);
3419 Reltype insn = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3420 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3421 // Determine a sign for the addend (positive if the U bit is 1).
3422 const int sign = (insn & 0x00800000) ? 1 : -1;
3423 int32_t addend = (insn & 0xfff);
3424 // Apply a sign to the added.
3425 addend *= sign;
3426
3427 int32_t x = (psymval->value(object, addend) - (address & 0xfffffffc));
3428 Reltype val = abs(x);
3429 // Mask out and apply the value and the U bit.
3430 insn = (insn & 0xff7ff000) | (val & 0xfff);
3431 // Set the U bit according to whether the value to go in the
3432 // place is positive.
3433 if (x >= 0)
3434 insn |= 0x00800000;
3435
3436 elfcpp::Swap<16, big_endian>::writeval(wv, insn >> 16);
3437 elfcpp::Swap<16, big_endian>::writeval(wv + 1, insn & 0xffff);
3438 return ((val > 0xfff) ?
3439 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3440 }
3441
3442 // R_ARM_V4BX
3443 static inline typename This::Status
3444 v4bx(const Relocate_info<32, big_endian>* relinfo,
3445 unsigned char *view,
3446 const Arm_relobj<big_endian>* object,
3447 const Arm_address address,
3448 const bool is_interworking)
3449 {
3450
3451 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3452 Valtype* wv = reinterpret_cast<Valtype*>(view);
3453 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3454
3455 // Ensure that we have a BX instruction.
3456 gold_assert((val & 0x0ffffff0) == 0x012fff10);
3457 const uint32_t reg = (val & 0xf);
3458 if (is_interworking && reg != 0xf)
3459 {
3460 Stub_table<big_endian>* stub_table =
3461 object->stub_table(relinfo->data_shndx);
3462 gold_assert(stub_table != NULL);
3463
3464 Arm_v4bx_stub* stub = stub_table->find_arm_v4bx_stub(reg);
3465 gold_assert(stub != NULL);
3466
3467 int32_t veneer_address =
3468 stub_table->address() + stub->offset() - 8 - address;
3469 gold_assert((veneer_address <= ARM_MAX_FWD_BRANCH_OFFSET)
3470 && (veneer_address >= ARM_MAX_BWD_BRANCH_OFFSET));
3471 // Replace with a branch to veneer (B <addr>)
3472 val = (val & 0xf0000000) | 0x0a000000
3473 | ((veneer_address >> 2) & 0x00ffffff);
3474 }
3475 else
3476 {
3477 // Preserve Rm (lowest four bits) and the condition code
3478 // (highest four bits). Other bits encode MOV PC,Rm.
3479 val = (val & 0xf000000f) | 0x01a0f000;
3480 }
3481 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3482 return This::STATUS_OKAY;
3483 }
3484
3485 // R_ARM_ALU_PC_G0_NC: ((S + A) | T) - P
3486 // R_ARM_ALU_PC_G0: ((S + A) | T) - P
3487 // R_ARM_ALU_PC_G1_NC: ((S + A) | T) - P
3488 // R_ARM_ALU_PC_G1: ((S + A) | T) - P
3489 // R_ARM_ALU_PC_G2: ((S + A) | T) - P
3490 // R_ARM_ALU_SB_G0_NC: ((S + A) | T) - B(S)
3491 // R_ARM_ALU_SB_G0: ((S + A) | T) - B(S)
3492 // R_ARM_ALU_SB_G1_NC: ((S + A) | T) - B(S)
3493 // R_ARM_ALU_SB_G1: ((S + A) | T) - B(S)
3494 // R_ARM_ALU_SB_G2: ((S + A) | T) - B(S)
3495 static inline typename This::Status
3496 arm_grp_alu(unsigned char* view,
3497 const Sized_relobj<32, big_endian>* object,
3498 const Symbol_value<32>* psymval,
3499 const int group,
3500 Arm_address address,
3501 Arm_address thumb_bit,
3502 bool check_overflow)
3503 {
3504 gold_assert(group >= 0 && group < 3);
3505 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3506 Valtype* wv = reinterpret_cast<Valtype*>(view);
3507 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3508
3509 // ALU group relocations are allowed only for the ADD/SUB instructions.
3510 // (0x00800000 - ADD, 0x00400000 - SUB)
3511 const Valtype opcode = insn & 0x01e00000;
3512 if (opcode != 0x00800000 && opcode != 0x00400000)
3513 return This::STATUS_BAD_RELOC;
3514
3515 // Determine a sign for the addend.
3516 const int sign = (opcode == 0x00800000) ? 1 : -1;
3517 // shifter = rotate_imm * 2
3518 const uint32_t shifter = (insn & 0xf00) >> 7;
3519 // Initial addend value.
3520 int32_t addend = insn & 0xff;
3521 // Rotate addend right by shifter.
3522 addend = (addend >> shifter) | (addend << (32 - shifter));
3523 // Apply a sign to the added.
3524 addend *= sign;
3525
3526 int32_t x = ((psymval->value(object, addend) | thumb_bit) - address);
3527 Valtype gn = Arm_relocate_functions::calc_grp_gn(abs(x), group);
3528 // Check for overflow if required
3529 if (check_overflow
3530 && (Arm_relocate_functions::calc_grp_residual(abs(x), group) != 0))
3531 return This::STATUS_OVERFLOW;
3532
3533 // Mask out the value and the ADD/SUB part of the opcode; take care
3534 // not to destroy the S bit.
3535 insn &= 0xff1ff000;
3536 // Set the opcode according to whether the value to go in the
3537 // place is negative.
3538 insn |= ((x < 0) ? 0x00400000 : 0x00800000);
3539 // Encode the offset (encoded Gn).
3540 insn |= gn;
3541
3542 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3543 return This::STATUS_OKAY;
3544 }
3545
3546 // R_ARM_LDR_PC_G0: S + A - P
3547 // R_ARM_LDR_PC_G1: S + A - P
3548 // R_ARM_LDR_PC_G2: S + A - P
3549 // R_ARM_LDR_SB_G0: S + A - B(S)
3550 // R_ARM_LDR_SB_G1: S + A - B(S)
3551 // R_ARM_LDR_SB_G2: S + A - B(S)
3552 static inline typename This::Status
3553 arm_grp_ldr(unsigned char* view,
3554 const Sized_relobj<32, big_endian>* object,
3555 const Symbol_value<32>* psymval,
3556 const int group,
3557 Arm_address address)
3558 {
3559 gold_assert(group >= 0 && group < 3);
3560 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3561 Valtype* wv = reinterpret_cast<Valtype*>(view);
3562 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3563
3564 const int sign = (insn & 0x00800000) ? 1 : -1;
3565 int32_t addend = (insn & 0xfff) * sign;
3566 int32_t x = (psymval->value(object, addend) - address);
3567 // Calculate the relevant G(n-1) value to obtain this stage residual.
3568 Valtype residual =
3569 Arm_relocate_functions::calc_grp_residual(abs(x), group - 1);
3570 if (residual >= 0x1000)
3571 return This::STATUS_OVERFLOW;
3572
3573 // Mask out the value and U bit.
3574 insn &= 0xff7ff000;
3575 // Set the U bit for non-negative values.
3576 if (x >= 0)
3577 insn |= 0x00800000;
3578 insn |= residual;
3579
3580 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3581 return This::STATUS_OKAY;
3582 }
3583
3584 // R_ARM_LDRS_PC_G0: S + A - P
3585 // R_ARM_LDRS_PC_G1: S + A - P
3586 // R_ARM_LDRS_PC_G2: S + A - P
3587 // R_ARM_LDRS_SB_G0: S + A - B(S)
3588 // R_ARM_LDRS_SB_G1: S + A - B(S)
3589 // R_ARM_LDRS_SB_G2: S + A - B(S)
3590 static inline typename This::Status
3591 arm_grp_ldrs(unsigned char* view,
3592 const Sized_relobj<32, big_endian>* object,
3593 const Symbol_value<32>* psymval,
3594 const int group,
3595 Arm_address address)
3596 {
3597 gold_assert(group >= 0 && group < 3);
3598 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3599 Valtype* wv = reinterpret_cast<Valtype*>(view);
3600 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3601
3602 const int sign = (insn & 0x00800000) ? 1 : -1;
3603 int32_t addend = (((insn & 0xf00) >> 4) + (insn & 0xf)) * sign;
3604 int32_t x = (psymval->value(object, addend) - address);
3605 // Calculate the relevant G(n-1) value to obtain this stage residual.
3606 Valtype residual =
3607 Arm_relocate_functions::calc_grp_residual(abs(x), group - 1);
3608 if (residual >= 0x100)
3609 return This::STATUS_OVERFLOW;
3610
3611 // Mask out the value and U bit.
3612 insn &= 0xff7ff0f0;
3613 // Set the U bit for non-negative values.
3614 if (x >= 0)
3615 insn |= 0x00800000;
3616 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
3617
3618 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3619 return This::STATUS_OKAY;
3620 }
3621
3622 // R_ARM_LDC_PC_G0: S + A - P
3623 // R_ARM_LDC_PC_G1: S + A - P
3624 // R_ARM_LDC_PC_G2: S + A - P
3625 // R_ARM_LDC_SB_G0: S + A - B(S)
3626 // R_ARM_LDC_SB_G1: S + A - B(S)
3627 // R_ARM_LDC_SB_G2: S + A - B(S)
3628 static inline typename This::Status
3629 arm_grp_ldc(unsigned char* view,
3630 const Sized_relobj<32, big_endian>* object,
3631 const Symbol_value<32>* psymval,
3632 const int group,
3633 Arm_address address)
3634 {
3635 gold_assert(group >= 0 && group < 3);
3636 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3637 Valtype* wv = reinterpret_cast<Valtype*>(view);
3638 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3639
3640 const int sign = (insn & 0x00800000) ? 1 : -1;
3641 int32_t addend = ((insn & 0xff) << 2) * sign;
3642 int32_t x = (psymval->value(object, addend) - address);
3643 // Calculate the relevant G(n-1) value to obtain this stage residual.
3644 Valtype residual =
3645 Arm_relocate_functions::calc_grp_residual(abs(x), group - 1);
3646 if ((residual & 0x3) != 0 || residual >= 0x400)
3647 return This::STATUS_OVERFLOW;
3648
3649 // Mask out the value and U bit.
3650 insn &= 0xff7fff00;
3651 // Set the U bit for non-negative values.
3652 if (x >= 0)
3653 insn |= 0x00800000;
3654 insn |= (residual >> 2);
3655
3656 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3657 return This::STATUS_OKAY;
3658 }
3659 };
3660
3661 // Relocate ARM long branches. This handles relocation types
3662 // R_ARM_CALL, R_ARM_JUMP24, R_ARM_PLT32 and R_ARM_XPC25.
3663 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3664 // undefined and we do not use PLT in this relocation. In such a case,
3665 // the branch is converted into an NOP.
3666
3667 template<bool big_endian>
3668 typename Arm_relocate_functions<big_endian>::Status
3669 Arm_relocate_functions<big_endian>::arm_branch_common(
3670 unsigned int r_type,
3671 const Relocate_info<32, big_endian>* relinfo,
3672 unsigned char *view,
3673 const Sized_symbol<32>* gsym,
3674 const Arm_relobj<big_endian>* object,
3675 unsigned int r_sym,
3676 const Symbol_value<32>* psymval,
3677 Arm_address address,
3678 Arm_address thumb_bit,
3679 bool is_weakly_undefined_without_plt)
3680 {
3681 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3682 Valtype* wv = reinterpret_cast<Valtype*>(view);
3683 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3684
3685 bool insn_is_b = (((val >> 28) & 0xf) <= 0xe)
3686 && ((val & 0x0f000000UL) == 0x0a000000UL);
3687 bool insn_is_uncond_bl = (val & 0xff000000UL) == 0xeb000000UL;
3688 bool insn_is_cond_bl = (((val >> 28) & 0xf) < 0xe)
3689 && ((val & 0x0f000000UL) == 0x0b000000UL);
3690 bool insn_is_blx = (val & 0xfe000000UL) == 0xfa000000UL;
3691 bool insn_is_any_branch = (val & 0x0e000000UL) == 0x0a000000UL;
3692
3693 // Check that the instruction is valid.
3694 if (r_type == elfcpp::R_ARM_CALL)
3695 {
3696 if (!insn_is_uncond_bl && !insn_is_blx)
3697 return This::STATUS_BAD_RELOC;
3698 }
3699 else if (r_type == elfcpp::R_ARM_JUMP24)
3700 {
3701 if (!insn_is_b && !insn_is_cond_bl)
3702 return This::STATUS_BAD_RELOC;
3703 }
3704 else if (r_type == elfcpp::R_ARM_PLT32)
3705 {
3706 if (!insn_is_any_branch)
3707 return This::STATUS_BAD_RELOC;
3708 }
3709 else if (r_type == elfcpp::R_ARM_XPC25)
3710 {
3711 // FIXME: AAELF document IH0044C does not say much about it other
3712 // than it being obsolete.
3713 if (!insn_is_any_branch)
3714 return This::STATUS_BAD_RELOC;
3715 }
3716 else
3717 gold_unreachable();
3718
3719 // A branch to an undefined weak symbol is turned into a jump to
3720 // the next instruction unless a PLT entry will be created.
3721 // Do the same for local undefined symbols.
3722 // The jump to the next instruction is optimized as a NOP depending
3723 // on the architecture.
3724 const Target_arm<big_endian>* arm_target =
3725 Target_arm<big_endian>::default_target();
3726 if (is_weakly_undefined_without_plt)
3727 {
3728 Valtype cond = val & 0xf0000000U;
3729 if (arm_target->may_use_arm_nop())
3730 val = cond | 0x0320f000;
3731 else
3732 val = cond | 0x01a00000; // Using pre-UAL nop: mov r0, r0.
3733 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3734 return This::STATUS_OKAY;
3735 }
3736
3737 Valtype addend = utils::sign_extend<26>(val << 2);
3738 Valtype branch_target = psymval->value(object, addend);
3739 int32_t branch_offset = branch_target - address;
3740
3741 // We need a stub if the branch offset is too large or if we need
3742 // to switch mode.
3743 bool may_use_blx = arm_target->may_use_blx();
3744 Reloc_stub* stub = NULL;
3745 if (utils::has_overflow<26>(branch_offset)
3746 || ((thumb_bit != 0) && !(may_use_blx && r_type == elfcpp::R_ARM_CALL)))
3747 {
3748 Valtype unadjusted_branch_target = psymval->value(object, 0);
3749
3750 Stub_type stub_type =
3751 Reloc_stub::stub_type_for_reloc(r_type, address,
3752 unadjusted_branch_target,
3753 (thumb_bit != 0));
3754 if (stub_type != arm_stub_none)
3755 {
3756 Stub_table<big_endian>* stub_table =
3757 object->stub_table(relinfo->data_shndx);
3758 gold_assert(stub_table != NULL);
3759
3760 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
3761 stub = stub_table->find_reloc_stub(stub_key);
3762 gold_assert(stub != NULL);
3763 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
3764 branch_target = stub_table->address() + stub->offset() + addend;
3765 branch_offset = branch_target - address;
3766 gold_assert(!utils::has_overflow<26>(branch_offset));
3767 }
3768 }
3769
3770 // At this point, if we still need to switch mode, the instruction
3771 // must either be a BLX or a BL that can be converted to a BLX.
3772 if (thumb_bit != 0)
3773 {
3774 // Turn BL to BLX.
3775 gold_assert(may_use_blx && r_type == elfcpp::R_ARM_CALL);
3776 val = (val & 0xffffff) | 0xfa000000 | ((branch_offset & 2) << 23);
3777 }
3778
3779 val = utils::bit_select(val, (branch_offset >> 2), 0xffffffUL);
3780 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3781 return (utils::has_overflow<26>(branch_offset)
3782 ? This::STATUS_OVERFLOW : This::STATUS_OKAY);
3783 }
3784
3785 // Relocate THUMB long branches. This handles relocation types
3786 // R_ARM_THM_CALL, R_ARM_THM_JUMP24 and R_ARM_THM_XPC22.
3787 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3788 // undefined and we do not use PLT in this relocation. In such a case,
3789 // the branch is converted into an NOP.
3790
3791 template<bool big_endian>
3792 typename Arm_relocate_functions<big_endian>::Status
3793 Arm_relocate_functions<big_endian>::thumb_branch_common(
3794 unsigned int r_type,
3795 const Relocate_info<32, big_endian>* relinfo,
3796 unsigned char *view,
3797 const Sized_symbol<32>* gsym,
3798 const Arm_relobj<big_endian>* object,
3799 unsigned int r_sym,
3800 const Symbol_value<32>* psymval,
3801 Arm_address address,
3802 Arm_address thumb_bit,
3803 bool is_weakly_undefined_without_plt)
3804 {
3805 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3806 Valtype* wv = reinterpret_cast<Valtype*>(view);
3807 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
3808 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
3809
3810 // FIXME: These tests are too loose and do not take THUMB/THUMB-2 difference
3811 // into account.
3812 bool is_bl_insn = (lower_insn & 0x1000U) == 0x1000U;
3813 bool is_blx_insn = (lower_insn & 0x1000U) == 0x0000U;
3814
3815 // Check that the instruction is valid.
3816 if (r_type == elfcpp::R_ARM_THM_CALL)
3817 {
3818 if (!is_bl_insn && !is_blx_insn)
3819 return This::STATUS_BAD_RELOC;
3820 }
3821 else if (r_type == elfcpp::R_ARM_THM_JUMP24)
3822 {
3823 // This cannot be a BLX.
3824 if (!is_bl_insn)
3825 return This::STATUS_BAD_RELOC;
3826 }
3827 else if (r_type == elfcpp::R_ARM_THM_XPC22)
3828 {
3829 // Check for Thumb to Thumb call.
3830 if (!is_blx_insn)
3831 return This::STATUS_BAD_RELOC;
3832 if (thumb_bit != 0)
3833 {
3834 gold_warning(_("%s: Thumb BLX instruction targets "
3835 "thumb function '%s'."),
3836 object->name().c_str(),
3837 (gsym ? gsym->name() : "(local)"));
3838 // Convert BLX to BL.
3839 lower_insn |= 0x1000U;
3840 }
3841 }
3842 else
3843 gold_unreachable();
3844
3845 // A branch to an undefined weak symbol is turned into a jump to
3846 // the next instruction unless a PLT entry will be created.
3847 // The jump to the next instruction is optimized as a NOP.W for
3848 // Thumb-2 enabled architectures.
3849 const Target_arm<big_endian>* arm_target =
3850 Target_arm<big_endian>::default_target();
3851 if (is_weakly_undefined_without_plt)
3852 {
3853 if (arm_target->may_use_thumb2_nop())
3854 {
3855 elfcpp::Swap<16, big_endian>::writeval(wv, 0xf3af);
3856 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0x8000);
3857 }
3858 else
3859 {
3860 elfcpp::Swap<16, big_endian>::writeval(wv, 0xe000);
3861 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0xbf00);
3862 }
3863 return This::STATUS_OKAY;
3864 }
3865
3866 int32_t addend = This::thumb32_branch_offset(upper_insn, lower_insn);
3867 Arm_address branch_target = psymval->value(object, addend);
3868
3869 // For BLX, bit 1 of target address comes from bit 1 of base address.
3870 bool may_use_blx = arm_target->may_use_blx();
3871 if (thumb_bit == 0 && may_use_blx)
3872 branch_target = utils::bit_select(branch_target, address, 0x2);
3873
3874 int32_t branch_offset = branch_target - address;
3875
3876 // We need a stub if the branch offset is too large or if we need
3877 // to switch mode.
3878 bool thumb2 = arm_target->using_thumb2();
3879 if ((!thumb2 && utils::has_overflow<23>(branch_offset))
3880 || (thumb2 && utils::has_overflow<25>(branch_offset))
3881 || ((thumb_bit == 0)
3882 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
3883 || r_type == elfcpp::R_ARM_THM_JUMP24)))
3884 {
3885 Arm_address unadjusted_branch_target = psymval->value(object, 0);
3886
3887 Stub_type stub_type =
3888 Reloc_stub::stub_type_for_reloc(r_type, address,
3889 unadjusted_branch_target,
3890 (thumb_bit != 0));
3891
3892 if (stub_type != arm_stub_none)
3893 {
3894 Stub_table<big_endian>* stub_table =
3895 object->stub_table(relinfo->data_shndx);
3896 gold_assert(stub_table != NULL);
3897
3898 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
3899 Reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
3900 gold_assert(stub != NULL);
3901 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
3902 branch_target = stub_table->address() + stub->offset() + addend;
3903 if (thumb_bit == 0 && may_use_blx)
3904 branch_target = utils::bit_select(branch_target, address, 0x2);
3905 branch_offset = branch_target - address;
3906 }
3907 }
3908
3909 // At this point, if we still need to switch mode, the instruction
3910 // must either be a BLX or a BL that can be converted to a BLX.
3911 if (thumb_bit == 0)
3912 {
3913 gold_assert(may_use_blx
3914 && (r_type == elfcpp::R_ARM_THM_CALL
3915 || r_type == elfcpp::R_ARM_THM_XPC22));
3916 // Make sure this is a BLX.
3917 lower_insn &= ~0x1000U;
3918 }
3919 else
3920 {
3921 // Make sure this is a BL.
3922 lower_insn |= 0x1000U;
3923 }
3924
3925 // For a BLX instruction, make sure that the relocation is rounded up
3926 // to a word boundary. This follows the semantics of the instruction
3927 // which specifies that bit 1 of the target address will come from bit
3928 // 1 of the base address.
3929 if ((lower_insn & 0x5000U) == 0x4000U)
3930 gold_assert((branch_offset & 3) == 0);
3931
3932 // Put BRANCH_OFFSET back into the insn. Assumes two's complement.
3933 // We use the Thumb-2 encoding, which is safe even if dealing with
3934 // a Thumb-1 instruction by virtue of our overflow check above. */
3935 upper_insn = This::thumb32_branch_upper(upper_insn, branch_offset);
3936 lower_insn = This::thumb32_branch_lower(lower_insn, branch_offset);
3937
3938 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3939 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3940
3941 gold_assert(!utils::has_overflow<25>(branch_offset));
3942
3943 return ((thumb2
3944 ? utils::has_overflow<25>(branch_offset)
3945 : utils::has_overflow<23>(branch_offset))
3946 ? This::STATUS_OVERFLOW
3947 : This::STATUS_OKAY);
3948 }
3949
3950 // Relocate THUMB-2 long conditional branches.
3951 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3952 // undefined and we do not use PLT in this relocation. In such a case,
3953 // the branch is converted into an NOP.
3954
3955 template<bool big_endian>
3956 typename Arm_relocate_functions<big_endian>::Status
3957 Arm_relocate_functions<big_endian>::thm_jump19(
3958 unsigned char *view,
3959 const Arm_relobj<big_endian>* object,
3960 const Symbol_value<32>* psymval,
3961 Arm_address address,
3962 Arm_address thumb_bit)
3963 {
3964 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3965 Valtype* wv = reinterpret_cast<Valtype*>(view);
3966 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
3967 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
3968 int32_t addend = This::thumb32_cond_branch_offset(upper_insn, lower_insn);
3969
3970 Arm_address branch_target = psymval->value(object, addend);
3971 int32_t branch_offset = branch_target - address;
3972
3973 // ??? Should handle interworking? GCC might someday try to
3974 // use this for tail calls.
3975 // FIXME: We do support thumb entry to PLT yet.
3976 if (thumb_bit == 0)
3977 {
3978 gold_error(_("conditional branch to PLT in THUMB-2 not supported yet."));
3979 return This::STATUS_BAD_RELOC;
3980 }
3981
3982 // Put RELOCATION back into the insn.
3983 upper_insn = This::thumb32_cond_branch_upper(upper_insn, branch_offset);
3984 lower_insn = This::thumb32_cond_branch_lower(lower_insn, branch_offset);
3985
3986 // Put the relocated value back in the object file:
3987 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3988 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3989
3990 return (utils::has_overflow<21>(branch_offset)
3991 ? This::STATUS_OVERFLOW
3992 : This::STATUS_OKAY);
3993 }
3994
3995 // Get the GOT section, creating it if necessary.
3996
3997 template<bool big_endian>
3998 Arm_output_data_got<big_endian>*
3999 Target_arm<big_endian>::got_section(Symbol_table* symtab, Layout* layout)
4000 {
4001 if (this->got_ == NULL)
4002 {
4003 gold_assert(symtab != NULL && layout != NULL);
4004
4005 this->got_ = new Arm_output_data_got<big_endian>(symtab, layout);
4006
4007 Output_section* os;
4008 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
4009 (elfcpp::SHF_ALLOC
4010 | elfcpp::SHF_WRITE),
4011 this->got_, false, false, false,
4012 true);
4013 // The old GNU linker creates a .got.plt section. We just
4014 // create another set of data in the .got section. Note that we
4015 // always create a PLT if we create a GOT, although the PLT
4016 // might be empty.
4017 this->got_plt_ = new Output_data_space(4, "** GOT PLT");
4018 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
4019 (elfcpp::SHF_ALLOC
4020 | elfcpp::SHF_WRITE),
4021 this->got_plt_, false, false,
4022 false, false);
4023
4024 // The first three entries are reserved.
4025 this->got_plt_->set_current_data_size(3 * 4);
4026
4027 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
4028 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
4029 Symbol_table::PREDEFINED,
4030 this->got_plt_,
4031 0, 0, elfcpp::STT_OBJECT,
4032 elfcpp::STB_LOCAL,
4033 elfcpp::STV_HIDDEN, 0,
4034 false, false);
4035 }
4036 return this->got_;
4037 }
4038
4039 // Get the dynamic reloc section, creating it if necessary.
4040
4041 template<bool big_endian>
4042 typename Target_arm<big_endian>::Reloc_section*
4043 Target_arm<big_endian>::rel_dyn_section(Layout* layout)
4044 {
4045 if (this->rel_dyn_ == NULL)
4046 {
4047 gold_assert(layout != NULL);
4048 this->rel_dyn_ = new Reloc_section(parameters->options().combreloc());
4049 layout->add_output_section_data(".rel.dyn", elfcpp::SHT_REL,
4050 elfcpp::SHF_ALLOC, this->rel_dyn_, true,
4051 false, false, false);
4052 }
4053 return this->rel_dyn_;
4054 }
4055
4056 // Insn_template methods.
4057
4058 // Return byte size of an instruction template.
4059
4060 size_t
4061 Insn_template::size() const
4062 {
4063 switch (this->type())
4064 {
4065 case THUMB16_TYPE:
4066 case THUMB16_SPECIAL_TYPE:
4067 return 2;
4068 case ARM_TYPE:
4069 case THUMB32_TYPE:
4070 case DATA_TYPE:
4071 return 4;
4072 default:
4073 gold_unreachable();
4074 }
4075 }
4076
4077 // Return alignment of an instruction template.
4078
4079 unsigned
4080 Insn_template::alignment() const
4081 {
4082 switch (this->type())
4083 {
4084 case THUMB16_TYPE:
4085 case THUMB16_SPECIAL_TYPE:
4086 case THUMB32_TYPE:
4087 return 2;
4088 case ARM_TYPE:
4089 case DATA_TYPE:
4090 return 4;
4091 default:
4092 gold_unreachable();
4093 }
4094 }
4095
4096 // Stub_template methods.
4097
4098 Stub_template::Stub_template(
4099 Stub_type type, const Insn_template* insns,
4100 size_t insn_count)
4101 : type_(type), insns_(insns), insn_count_(insn_count), alignment_(1),
4102 entry_in_thumb_mode_(false), relocs_()
4103 {
4104 off_t offset = 0;
4105
4106 // Compute byte size and alignment of stub template.
4107 for (size_t i = 0; i < insn_count; i++)
4108 {
4109 unsigned insn_alignment = insns[i].alignment();
4110 size_t insn_size = insns[i].size();
4111 gold_assert((offset & (insn_alignment - 1)) == 0);
4112 this->alignment_ = std::max(this->alignment_, insn_alignment);
4113 switch (insns[i].type())
4114 {
4115 case Insn_template::THUMB16_TYPE:
4116 case Insn_template::THUMB16_SPECIAL_TYPE:
4117 if (i == 0)
4118 this->entry_in_thumb_mode_ = true;
4119 break;
4120
4121 case Insn_template::THUMB32_TYPE:
4122 if (insns[i].r_type() != elfcpp::R_ARM_NONE)
4123 this->relocs_.push_back(Reloc(i, offset));
4124 if (i == 0)
4125 this->entry_in_thumb_mode_ = true;
4126 break;
4127
4128 case Insn_template::ARM_TYPE:
4129 // Handle cases where the target is encoded within the
4130 // instruction.
4131 if (insns[i].r_type() == elfcpp::R_ARM_JUMP24)
4132 this->relocs_.push_back(Reloc(i, offset));
4133 break;
4134
4135 case Insn_template::DATA_TYPE:
4136 // Entry point cannot be data.
4137 gold_assert(i != 0);
4138 this->relocs_.push_back(Reloc(i, offset));
4139 break;
4140
4141 default:
4142 gold_unreachable();
4143 }
4144 offset += insn_size;
4145 }
4146 this->size_ = offset;
4147 }
4148
4149 // Stub methods.
4150
4151 // Template to implement do_write for a specific target endianness.
4152
4153 template<bool big_endian>
4154 void inline
4155 Stub::do_fixed_endian_write(unsigned char* view, section_size_type view_size)
4156 {
4157 const Stub_template* stub_template = this->stub_template();
4158 const Insn_template* insns = stub_template->insns();
4159
4160 // FIXME: We do not handle BE8 encoding yet.
4161 unsigned char* pov = view;
4162 for (size_t i = 0; i < stub_template->insn_count(); i++)
4163 {
4164 switch (insns[i].type())
4165 {
4166 case Insn_template::THUMB16_TYPE:
4167 elfcpp::Swap<16, big_endian>::writeval(pov, insns[i].data() & 0xffff);
4168 break;
4169 case Insn_template::THUMB16_SPECIAL_TYPE:
4170 elfcpp::Swap<16, big_endian>::writeval(
4171 pov,
4172 this->thumb16_special(i));
4173 break;
4174 case Insn_template::THUMB32_TYPE:
4175 {
4176 uint32_t hi = (insns[i].data() >> 16) & 0xffff;
4177 uint32_t lo = insns[i].data() & 0xffff;
4178 elfcpp::Swap<16, big_endian>::writeval(pov, hi);
4179 elfcpp::Swap<16, big_endian>::writeval(pov + 2, lo);
4180 }
4181 break;
4182 case Insn_template::ARM_TYPE:
4183 case Insn_template::DATA_TYPE:
4184 elfcpp::Swap<32, big_endian>::writeval(pov, insns[i].data());
4185 break;
4186 default:
4187 gold_unreachable();
4188 }
4189 pov += insns[i].size();
4190 }
4191 gold_assert(static_cast<section_size_type>(pov - view) == view_size);
4192 }
4193
4194 // Reloc_stub::Key methods.
4195
4196 // Dump a Key as a string for debugging.
4197
4198 std::string
4199 Reloc_stub::Key::name() const
4200 {
4201 if (this->r_sym_ == invalid_index)
4202 {
4203 // Global symbol key name
4204 // <stub-type>:<symbol name>:<addend>.
4205 const std::string sym_name = this->u_.symbol->name();
4206 // We need to print two hex number and two colons. So just add 100 bytes
4207 // to the symbol name size.
4208 size_t len = sym_name.size() + 100;
4209 char* buffer = new char[len];
4210 int c = snprintf(buffer, len, "%d:%s:%x", this->stub_type_,
4211 sym_name.c_str(), this->addend_);
4212 gold_assert(c > 0 && c < static_cast<int>(len));
4213 delete[] buffer;
4214 return std::string(buffer);
4215 }
4216 else
4217 {
4218 // local symbol key name
4219 // <stub-type>:<object>:<r_sym>:<addend>.
4220 const size_t len = 200;
4221 char buffer[len];
4222 int c = snprintf(buffer, len, "%d:%p:%u:%x", this->stub_type_,
4223 this->u_.relobj, this->r_sym_, this->addend_);
4224 gold_assert(c > 0 && c < static_cast<int>(len));
4225 return std::string(buffer);
4226 }
4227 }
4228
4229 // Reloc_stub methods.
4230
4231 // Determine the type of stub needed, if any, for a relocation of R_TYPE at
4232 // LOCATION to DESTINATION.
4233 // This code is based on the arm_type_of_stub function in
4234 // bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
4235 // class simple.
4236
4237 Stub_type
4238 Reloc_stub::stub_type_for_reloc(
4239 unsigned int r_type,
4240 Arm_address location,
4241 Arm_address destination,
4242 bool target_is_thumb)
4243 {
4244 Stub_type stub_type = arm_stub_none;
4245
4246 // This is a bit ugly but we want to avoid using a templated class for
4247 // big and little endianities.
4248 bool may_use_blx;
4249 bool should_force_pic_veneer;
4250 bool thumb2;
4251 bool thumb_only;
4252 if (parameters->target().is_big_endian())
4253 {
4254 const Target_arm<true>* big_endian_target =
4255 Target_arm<true>::default_target();
4256 may_use_blx = big_endian_target->may_use_blx();
4257 should_force_pic_veneer = big_endian_target->should_force_pic_veneer();
4258 thumb2 = big_endian_target->using_thumb2();
4259 thumb_only = big_endian_target->using_thumb_only();
4260 }
4261 else
4262 {
4263 const Target_arm<false>* little_endian_target =
4264 Target_arm<false>::default_target();
4265 may_use_blx = little_endian_target->may_use_blx();
4266 should_force_pic_veneer = little_endian_target->should_force_pic_veneer();
4267 thumb2 = little_endian_target->using_thumb2();
4268 thumb_only = little_endian_target->using_thumb_only();
4269 }
4270
4271 int64_t branch_offset;
4272 if (r_type == elfcpp::R_ARM_THM_CALL || r_type == elfcpp::R_ARM_THM_JUMP24)
4273 {
4274 // For THUMB BLX instruction, bit 1 of target comes from bit 1 of the
4275 // base address (instruction address + 4).
4276 if ((r_type == elfcpp::R_ARM_THM_CALL) && may_use_blx && !target_is_thumb)
4277 destination = utils::bit_select(destination, location, 0x2);
4278 branch_offset = static_cast<int64_t>(destination) - location;
4279
4280 // Handle cases where:
4281 // - this call goes too far (different Thumb/Thumb2 max
4282 // distance)
4283 // - it's a Thumb->Arm call and blx is not available, or it's a
4284 // Thumb->Arm branch (not bl). A stub is needed in this case.
4285 if ((!thumb2
4286 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4287 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4288 || (thumb2
4289 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4290 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4291 || ((!target_is_thumb)
4292 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
4293 || (r_type == elfcpp::R_ARM_THM_JUMP24))))
4294 {
4295 if (target_is_thumb)
4296 {
4297 // Thumb to thumb.
4298 if (!thumb_only)
4299 {
4300 stub_type = (parameters->options().shared()
4301 || should_force_pic_veneer)
4302 // PIC stubs.
4303 ? ((may_use_blx
4304 && (r_type == elfcpp::R_ARM_THM_CALL))
4305 // V5T and above. Stub starts with ARM code, so
4306 // we must be able to switch mode before
4307 // reaching it, which is only possible for 'bl'
4308 // (ie R_ARM_THM_CALL relocation).
4309 ? arm_stub_long_branch_any_thumb_pic
4310 // On V4T, use Thumb code only.
4311 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4312
4313 // non-PIC stubs.
4314 : ((may_use_blx
4315 && (r_type == elfcpp::R_ARM_THM_CALL))
4316 ? arm_stub_long_branch_any_any // V5T and above.
4317 : arm_stub_long_branch_v4t_thumb_thumb); // V4T.
4318 }
4319 else
4320 {
4321 stub_type = (parameters->options().shared()
4322 || should_force_pic_veneer)
4323 ? arm_stub_long_branch_thumb_only_pic // PIC stub.
4324 : arm_stub_long_branch_thumb_only; // non-PIC stub.
4325 }
4326 }
4327 else
4328 {
4329 // Thumb to arm.
4330
4331 // FIXME: We should check that the input section is from an
4332 // object that has interwork enabled.
4333
4334 stub_type = (parameters->options().shared()
4335 || should_force_pic_veneer)
4336 // PIC stubs.
4337 ? ((may_use_blx
4338 && (r_type == elfcpp::R_ARM_THM_CALL))
4339 ? arm_stub_long_branch_any_arm_pic // V5T and above.
4340 : arm_stub_long_branch_v4t_thumb_arm_pic) // V4T.
4341
4342 // non-PIC stubs.
4343 : ((may_use_blx
4344 && (r_type == elfcpp::R_ARM_THM_CALL))
4345 ? arm_stub_long_branch_any_any // V5T and above.
4346 : arm_stub_long_branch_v4t_thumb_arm); // V4T.
4347
4348 // Handle v4t short branches.
4349 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4350 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4351 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4352 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4353 }
4354 }
4355 }
4356 else if (r_type == elfcpp::R_ARM_CALL
4357 || r_type == elfcpp::R_ARM_JUMP24
4358 || r_type == elfcpp::R_ARM_PLT32)
4359 {
4360 branch_offset = static_cast<int64_t>(destination) - location;
4361 if (target_is_thumb)
4362 {
4363 // Arm to thumb.
4364
4365 // FIXME: We should check that the input section is from an
4366 // object that has interwork enabled.
4367
4368 // We have an extra 2-bytes reach because of
4369 // the mode change (bit 24 (H) of BLX encoding).
4370 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4371 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4372 || ((r_type == elfcpp::R_ARM_CALL) && !may_use_blx)
4373 || (r_type == elfcpp::R_ARM_JUMP24)
4374 || (r_type == elfcpp::R_ARM_PLT32))
4375 {
4376 stub_type = (parameters->options().shared()
4377 || should_force_pic_veneer)
4378 // PIC stubs.
4379 ? (may_use_blx
4380 ? arm_stub_long_branch_any_thumb_pic// V5T and above.
4381 : arm_stub_long_branch_v4t_arm_thumb_pic) // V4T stub.
4382
4383 // non-PIC stubs.
4384 : (may_use_blx
4385 ? arm_stub_long_branch_any_any // V5T and above.
4386 : arm_stub_long_branch_v4t_arm_thumb); // V4T.
4387 }
4388 }
4389 else
4390 {
4391 // Arm to arm.
4392 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4393 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4394 {
4395 stub_type = (parameters->options().shared()
4396 || should_force_pic_veneer)
4397 ? arm_stub_long_branch_any_arm_pic // PIC stubs.
4398 : arm_stub_long_branch_any_any; /// non-PIC.
4399 }
4400 }
4401 }
4402
4403 return stub_type;
4404 }
4405
4406 // Cortex_a8_stub methods.
4407
4408 // Return the instruction for a THUMB16_SPECIAL_TYPE instruction template.
4409 // I is the position of the instruction template in the stub template.
4410
4411 uint16_t
4412 Cortex_a8_stub::do_thumb16_special(size_t i)
4413 {
4414 // The only use of this is to copy condition code from a conditional
4415 // branch being worked around to the corresponding conditional branch in
4416 // to the stub.
4417 gold_assert(this->stub_template()->type() == arm_stub_a8_veneer_b_cond
4418 && i == 0);
4419 uint16_t data = this->stub_template()->insns()[i].data();
4420 gold_assert((data & 0xff00U) == 0xd000U);
4421 data |= ((this->original_insn_ >> 22) & 0xf) << 8;
4422 return data;
4423 }
4424
4425 // Stub_factory methods.
4426
4427 Stub_factory::Stub_factory()
4428 {
4429 // The instruction template sequences are declared as static
4430 // objects and initialized first time the constructor runs.
4431
4432 // Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
4433 // to reach the stub if necessary.
4434 static const Insn_template elf32_arm_stub_long_branch_any_any[] =
4435 {
4436 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
4437 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4438 // dcd R_ARM_ABS32(X)
4439 };
4440
4441 // V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
4442 // available.
4443 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb[] =
4444 {
4445 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
4446 Insn_template::arm_insn(0xe12fff1c), // bx ip
4447 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4448 // dcd R_ARM_ABS32(X)
4449 };
4450
4451 // Thumb -> Thumb long branch stub. Used on M-profile architectures.
4452 static const Insn_template elf32_arm_stub_long_branch_thumb_only[] =
4453 {
4454 Insn_template::thumb16_insn(0xb401), // push {r0}
4455 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
4456 Insn_template::thumb16_insn(0x4684), // mov ip, r0
4457 Insn_template::thumb16_insn(0xbc01), // pop {r0}
4458 Insn_template::thumb16_insn(0x4760), // bx ip
4459 Insn_template::thumb16_insn(0xbf00), // nop
4460 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4461 // dcd R_ARM_ABS32(X)
4462 };
4463
4464 // V4T Thumb -> Thumb long branch stub. Using the stack is not
4465 // allowed.
4466 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
4467 {
4468 Insn_template::thumb16_insn(0x4778), // bx pc
4469 Insn_template::thumb16_insn(0x46c0), // nop
4470 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
4471 Insn_template::arm_insn(0xe12fff1c), // bx ip
4472 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4473 // dcd R_ARM_ABS32(X)
4474 };
4475
4476 // V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
4477 // available.
4478 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm[] =
4479 {
4480 Insn_template::thumb16_insn(0x4778), // bx pc
4481 Insn_template::thumb16_insn(0x46c0), // nop
4482 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
4483 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4484 // dcd R_ARM_ABS32(X)
4485 };
4486
4487 // V4T Thumb -> ARM short branch stub. Shorter variant of the above
4488 // one, when the destination is close enough.
4489 static const Insn_template elf32_arm_stub_short_branch_v4t_thumb_arm[] =
4490 {
4491 Insn_template::thumb16_insn(0x4778), // bx pc
4492 Insn_template::thumb16_insn(0x46c0), // nop
4493 Insn_template::arm_rel_insn(0xea000000, -8), // b (X-8)
4494 };
4495
4496 // ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
4497 // blx to reach the stub if necessary.
4498 static const Insn_template elf32_arm_stub_long_branch_any_arm_pic[] =
4499 {
4500 Insn_template::arm_insn(0xe59fc000), // ldr r12, [pc]
4501 Insn_template::arm_insn(0xe08ff00c), // add pc, pc, ip
4502 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
4503 // dcd R_ARM_REL32(X-4)
4504 };
4505
4506 // ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
4507 // blx to reach the stub if necessary. We can not add into pc;
4508 // it is not guaranteed to mode switch (different in ARMv6 and
4509 // ARMv7).
4510 static const Insn_template elf32_arm_stub_long_branch_any_thumb_pic[] =
4511 {
4512 Insn_template::arm_insn(0xe59fc004), // ldr r12, [pc, #4]
4513 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
4514 Insn_template::arm_insn(0xe12fff1c), // bx ip
4515 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
4516 // dcd R_ARM_REL32(X)
4517 };
4518
4519 // V4T ARM -> ARM long branch stub, PIC.
4520 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
4521 {
4522 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
4523 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
4524 Insn_template::arm_insn(0xe12fff1c), // bx ip
4525 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
4526 // dcd R_ARM_REL32(X)
4527 };
4528
4529 // V4T Thumb -> ARM long branch stub, PIC.
4530 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
4531 {
4532 Insn_template::thumb16_insn(0x4778), // bx pc
4533 Insn_template::thumb16_insn(0x46c0), // nop
4534 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
4535 Insn_template::arm_insn(0xe08cf00f), // add pc, ip, pc
4536 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
4537 // dcd R_ARM_REL32(X)
4538 };
4539
4540 // Thumb -> Thumb long branch stub, PIC. Used on M-profile
4541 // architectures.
4542 static const Insn_template elf32_arm_stub_long_branch_thumb_only_pic[] =
4543 {
4544 Insn_template::thumb16_insn(0xb401), // push {r0}
4545 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
4546 Insn_template::thumb16_insn(0x46fc), // mov ip, pc
4547 Insn_template::thumb16_insn(0x4484), // add ip, r0
4548 Insn_template::thumb16_insn(0xbc01), // pop {r0}
4549 Insn_template::thumb16_insn(0x4760), // bx ip
4550 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 4),
4551 // dcd R_ARM_REL32(X)
4552 };
4553
4554 // V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
4555 // allowed.
4556 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
4557 {
4558 Insn_template::thumb16_insn(0x4778), // bx pc
4559 Insn_template::thumb16_insn(0x46c0), // nop
4560 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
4561 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
4562 Insn_template::arm_insn(0xe12fff1c), // bx ip
4563 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
4564 // dcd R_ARM_REL32(X)
4565 };
4566
4567 // Cortex-A8 erratum-workaround stubs.
4568
4569 // Stub used for conditional branches (which may be beyond +/-1MB away,
4570 // so we can't use a conditional branch to reach this stub).
4571
4572 // original code:
4573 //
4574 // b<cond> X
4575 // after:
4576 //
4577 static const Insn_template elf32_arm_stub_a8_veneer_b_cond[] =
4578 {
4579 Insn_template::thumb16_bcond_insn(0xd001), // b<cond>.n true
4580 Insn_template::thumb32_b_insn(0xf000b800, -4), // b.w after
4581 Insn_template::thumb32_b_insn(0xf000b800, -4) // true:
4582 // b.w X
4583 };
4584
4585 // Stub used for b.w and bl.w instructions.
4586
4587 static const Insn_template elf32_arm_stub_a8_veneer_b[] =
4588 {
4589 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
4590 };
4591
4592 static const Insn_template elf32_arm_stub_a8_veneer_bl[] =
4593 {
4594 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
4595 };
4596
4597 // Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
4598 // instruction (which switches to ARM mode) to point to this stub. Jump to
4599 // the real destination using an ARM-mode branch.
4600 static const Insn_template elf32_arm_stub_a8_veneer_blx[] =
4601 {
4602 Insn_template::arm_rel_insn(0xea000000, -8) // b dest
4603 };
4604
4605 // Stub used to provide an interworking for R_ARM_V4BX relocation
4606 // (bx r[n] instruction).
4607 static const Insn_template elf32_arm_stub_v4_veneer_bx[] =
4608 {
4609 Insn_template::arm_insn(0xe3100001), // tst r<n>, #1
4610 Insn_template::arm_insn(0x01a0f000), // moveq pc, r<n>
4611 Insn_template::arm_insn(0xe12fff10) // bx r<n>
4612 };
4613
4614 // Fill in the stub template look-up table. Stub templates are constructed
4615 // per instance of Stub_factory for fast look-up without locking
4616 // in a thread-enabled environment.
4617
4618 this->stub_templates_[arm_stub_none] =
4619 new Stub_template(arm_stub_none, NULL, 0);
4620
4621 #define DEF_STUB(x) \
4622 do \
4623 { \
4624 size_t array_size \
4625 = sizeof(elf32_arm_stub_##x) / sizeof(elf32_arm_stub_##x[0]); \
4626 Stub_type type = arm_stub_##x; \
4627 this->stub_templates_[type] = \
4628 new Stub_template(type, elf32_arm_stub_##x, array_size); \
4629 } \
4630 while (0);
4631
4632 DEF_STUBS
4633 #undef DEF_STUB
4634 }
4635
4636 // Stub_table methods.
4637
4638 // Removel all Cortex-A8 stub.
4639
4640 template<bool big_endian>
4641 void
4642 Stub_table<big_endian>::remove_all_cortex_a8_stubs()
4643 {
4644 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
4645 p != this->cortex_a8_stubs_.end();
4646 ++p)
4647 delete p->second;
4648 this->cortex_a8_stubs_.clear();
4649 }
4650
4651 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
4652
4653 template<bool big_endian>
4654 void
4655 Stub_table<big_endian>::relocate_stub(
4656 Stub* stub,
4657 const Relocate_info<32, big_endian>* relinfo,
4658 Target_arm<big_endian>* arm_target,
4659 Output_section* output_section,
4660 unsigned char* view,
4661 Arm_address address,
4662 section_size_type view_size)
4663 {
4664 const Stub_template* stub_template = stub->stub_template();
4665 if (stub_template->reloc_count() != 0)
4666 {
4667 // Adjust view to cover the stub only.
4668 section_size_type offset = stub->offset();
4669 section_size_type stub_size = stub_template->size();
4670 gold_assert(offset + stub_size <= view_size);
4671
4672 arm_target->relocate_stub(stub, relinfo, output_section, view + offset,
4673 address + offset, stub_size);
4674 }
4675 }
4676
4677 // Relocate all stubs in this stub table.
4678
4679 template<bool big_endian>
4680 void
4681 Stub_table<big_endian>::relocate_stubs(
4682 const Relocate_info<32, big_endian>* relinfo,
4683 Target_arm<big_endian>* arm_target,
4684 Output_section* output_section,
4685 unsigned char* view,
4686 Arm_address address,
4687 section_size_type view_size)
4688 {
4689 // If we are passed a view bigger than the stub table's. we need to
4690 // adjust the view.
4691 gold_assert(address == this->address()
4692 && (view_size
4693 == static_cast<section_size_type>(this->data_size())));
4694
4695 // Relocate all relocation stubs.
4696 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4697 p != this->reloc_stubs_.end();
4698 ++p)
4699 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
4700 address, view_size);
4701
4702 // Relocate all Cortex-A8 stubs.
4703 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
4704 p != this->cortex_a8_stubs_.end();
4705 ++p)
4706 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
4707 address, view_size);
4708
4709 // Relocate all ARM V4BX stubs.
4710 for (Arm_v4bx_stub_list::iterator p = this->arm_v4bx_stubs_.begin();
4711 p != this->arm_v4bx_stubs_.end();
4712 ++p)
4713 {
4714 if (*p != NULL)
4715 this->relocate_stub(*p, relinfo, arm_target, output_section, view,
4716 address, view_size);
4717 }
4718 }
4719
4720 // Write out the stubs to file.
4721
4722 template<bool big_endian>
4723 void
4724 Stub_table<big_endian>::do_write(Output_file* of)
4725 {
4726 off_t offset = this->offset();
4727 const section_size_type oview_size =
4728 convert_to_section_size_type(this->data_size());
4729 unsigned char* const oview = of->get_output_view(offset, oview_size);
4730
4731 // Write relocation stubs.
4732 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4733 p != this->reloc_stubs_.end();
4734 ++p)
4735 {
4736 Reloc_stub* stub = p->second;
4737 Arm_address address = this->address() + stub->offset();
4738 gold_assert(address
4739 == align_address(address,
4740 stub->stub_template()->alignment()));
4741 stub->write(oview + stub->offset(), stub->stub_template()->size(),
4742 big_endian);
4743 }
4744
4745 // Write Cortex-A8 stubs.
4746 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4747 p != this->cortex_a8_stubs_.end();
4748 ++p)
4749 {
4750 Cortex_a8_stub* stub = p->second;
4751 Arm_address address = this->address() + stub->offset();
4752 gold_assert(address
4753 == align_address(address,
4754 stub->stub_template()->alignment()));
4755 stub->write(oview + stub->offset(), stub->stub_template()->size(),
4756 big_endian);
4757 }
4758
4759 // Write ARM V4BX relocation stubs.
4760 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4761 p != this->arm_v4bx_stubs_.end();
4762 ++p)
4763 {
4764 if (*p == NULL)
4765 continue;
4766
4767 Arm_address address = this->address() + (*p)->offset();
4768 gold_assert(address
4769 == align_address(address,
4770 (*p)->stub_template()->alignment()));
4771 (*p)->write(oview + (*p)->offset(), (*p)->stub_template()->size(),
4772 big_endian);
4773 }
4774
4775 of->write_output_view(this->offset(), oview_size, oview);
4776 }
4777
4778 // Update the data size and address alignment of the stub table at the end
4779 // of a relaxation pass. Return true if either the data size or the
4780 // alignment changed in this relaxation pass.
4781
4782 template<bool big_endian>
4783 bool
4784 Stub_table<big_endian>::update_data_size_and_addralign()
4785 {
4786 // Go over all stubs in table to compute data size and address alignment.
4787 off_t size = this->reloc_stubs_size_;
4788 unsigned addralign = this->reloc_stubs_addralign_;
4789
4790 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4791 p != this->cortex_a8_stubs_.end();
4792 ++p)
4793 {
4794 const Stub_template* stub_template = p->second->stub_template();
4795 addralign = std::max(addralign, stub_template->alignment());
4796 size = (align_address(size, stub_template->alignment())
4797 + stub_template->size());
4798 }
4799
4800 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4801 p != this->arm_v4bx_stubs_.end();
4802 ++p)
4803 {
4804 if (*p == NULL)
4805 continue;
4806
4807 const Stub_template* stub_template = (*p)->stub_template();
4808 addralign = std::max(addralign, stub_template->alignment());
4809 size = (align_address(size, stub_template->alignment())
4810 + stub_template->size());
4811 }
4812
4813 // Check if either data size or alignment changed in this pass.
4814 // Update prev_data_size_ and prev_addralign_. These will be used
4815 // as the current data size and address alignment for the next pass.
4816 bool changed = size != this->prev_data_size_;
4817 this->prev_data_size_ = size;
4818
4819 if (addralign != this->prev_addralign_)
4820 changed = true;
4821 this->prev_addralign_ = addralign;
4822
4823 return changed;
4824 }
4825
4826 // Finalize the stubs. This sets the offsets of the stubs within the stub
4827 // table. It also marks all input sections needing Cortex-A8 workaround.
4828
4829 template<bool big_endian>
4830 void
4831 Stub_table<big_endian>::finalize_stubs()
4832 {
4833 off_t off = this->reloc_stubs_size_;
4834 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4835 p != this->cortex_a8_stubs_.end();
4836 ++p)
4837 {
4838 Cortex_a8_stub* stub = p->second;
4839 const Stub_template* stub_template = stub->stub_template();
4840 uint64_t stub_addralign = stub_template->alignment();
4841 off = align_address(off, stub_addralign);
4842 stub->set_offset(off);
4843 off += stub_template->size();
4844
4845 // Mark input section so that we can determine later if a code section
4846 // needs the Cortex-A8 workaround quickly.
4847 Arm_relobj<big_endian>* arm_relobj =
4848 Arm_relobj<big_endian>::as_arm_relobj(stub->relobj());
4849 arm_relobj->mark_section_for_cortex_a8_workaround(stub->shndx());
4850 }
4851
4852 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4853 p != this->arm_v4bx_stubs_.end();
4854 ++p)
4855 {
4856 if (*p == NULL)
4857 continue;
4858
4859 const Stub_template* stub_template = (*p)->stub_template();
4860 uint64_t stub_addralign = stub_template->alignment();
4861 off = align_address(off, stub_addralign);
4862 (*p)->set_offset(off);
4863 off += stub_template->size();
4864 }
4865
4866 gold_assert(off <= this->prev_data_size_);
4867 }
4868
4869 // Apply Cortex-A8 workaround to an address range between VIEW_ADDRESS
4870 // and VIEW_ADDRESS + VIEW_SIZE - 1. VIEW points to the mapped address
4871 // of the address range seen by the linker.
4872
4873 template<bool big_endian>
4874 void
4875 Stub_table<big_endian>::apply_cortex_a8_workaround_to_address_range(
4876 Target_arm<big_endian>* arm_target,
4877 unsigned char* view,
4878 Arm_address view_address,
4879 section_size_type view_size)
4880 {
4881 // Cortex-A8 stubs are sorted by addresses of branches being fixed up.
4882 for (Cortex_a8_stub_list::const_iterator p =
4883 this->cortex_a8_stubs_.lower_bound(view_address);
4884 ((p != this->cortex_a8_stubs_.end())
4885 && (p->first < (view_address + view_size)));
4886 ++p)
4887 {
4888 // We do not store the THUMB bit in the LSB of either the branch address
4889 // or the stub offset. There is no need to strip the LSB.
4890 Arm_address branch_address = p->first;
4891 const Cortex_a8_stub* stub = p->second;
4892 Arm_address stub_address = this->address() + stub->offset();
4893
4894 // Offset of the branch instruction relative to this view.
4895 section_size_type offset =
4896 convert_to_section_size_type(branch_address - view_address);
4897 gold_assert((offset + 4) <= view_size);
4898
4899 arm_target->apply_cortex_a8_workaround(stub, stub_address,
4900 view + offset, branch_address);
4901 }
4902 }
4903
4904 // Arm_input_section methods.
4905
4906 // Initialize an Arm_input_section.
4907
4908 template<bool big_endian>
4909 void
4910 Arm_input_section<big_endian>::init()
4911 {
4912 Relobj* relobj = this->relobj();
4913 unsigned int shndx = this->shndx();
4914
4915 // Cache these to speed up size and alignment queries. It is too slow
4916 // to call section_addraglin and section_size every time.
4917 this->original_addralign_ =
4918 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
4919 this->original_size_ =
4920 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
4921
4922 // We want to make this look like the original input section after
4923 // output sections are finalized.
4924 Output_section* os = relobj->output_section(shndx);
4925 off_t offset = relobj->output_section_offset(shndx);
4926 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
4927 this->set_address(os->address() + offset);
4928 this->set_file_offset(os->offset() + offset);
4929
4930 this->set_current_data_size(this->original_size_);
4931 this->finalize_data_size();
4932 }
4933
4934 template<bool big_endian>
4935 void
4936 Arm_input_section<big_endian>::do_write(Output_file* of)
4937 {
4938 // We have to write out the original section content.
4939 section_size_type section_size;
4940 const unsigned char* section_contents =
4941 this->relobj()->section_contents(this->shndx(), &section_size, false);
4942 of->write(this->offset(), section_contents, section_size);
4943
4944 // If this owns a stub table and it is not empty, write it.
4945 if (this->is_stub_table_owner() && !this->stub_table_->empty())
4946 this->stub_table_->write(of);
4947 }
4948
4949 // Finalize data size.
4950
4951 template<bool big_endian>
4952 void
4953 Arm_input_section<big_endian>::set_final_data_size()
4954 {
4955 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
4956
4957 if (this->is_stub_table_owner())
4958 {
4959 this->stub_table_->finalize_data_size();
4960 off = align_address(off, this->stub_table_->addralign());
4961 off += this->stub_table_->data_size();
4962 }
4963 this->set_data_size(off);
4964 }
4965
4966 // Reset address and file offset.
4967
4968 template<bool big_endian>
4969 void
4970 Arm_input_section<big_endian>::do_reset_address_and_file_offset()
4971 {
4972 // Size of the original input section contents.
4973 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
4974
4975 // If this is a stub table owner, account for the stub table size.
4976 if (this->is_stub_table_owner())
4977 {
4978 Stub_table<big_endian>* stub_table = this->stub_table_;
4979
4980 // Reset the stub table's address and file offset. The
4981 // current data size for child will be updated after that.
4982 stub_table_->reset_address_and_file_offset();
4983 off = align_address(off, stub_table_->addralign());
4984 off += stub_table->current_data_size();
4985 }
4986
4987 this->set_current_data_size(off);
4988 }
4989
4990 // Arm_exidx_cantunwind methods.
4991
4992 // Write this to Output file OF for a fixed endianness.
4993
4994 template<bool big_endian>
4995 void
4996 Arm_exidx_cantunwind::do_fixed_endian_write(Output_file* of)
4997 {
4998 off_t offset = this->offset();
4999 const section_size_type oview_size = 8;
5000 unsigned char* const oview = of->get_output_view(offset, oview_size);
5001
5002 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5003 Valtype* wv = reinterpret_cast<Valtype*>(oview);
5004
5005 Output_section* os = this->relobj_->output_section(this->shndx_);
5006 gold_assert(os != NULL);
5007
5008 Arm_relobj<big_endian>* arm_relobj =
5009 Arm_relobj<big_endian>::as_arm_relobj(this->relobj_);
5010 Arm_address output_offset =
5011 arm_relobj->get_output_section_offset(this->shndx_);
5012 Arm_address section_start;
5013 if (output_offset != Arm_relobj<big_endian>::invalid_address)
5014 section_start = os->address() + output_offset;
5015 else
5016 {
5017 // Currently this only happens for a relaxed section.
5018 const Output_relaxed_input_section* poris =
5019 os->find_relaxed_input_section(this->relobj_, this->shndx_);
5020 gold_assert(poris != NULL);
5021 section_start = poris->address();
5022 }
5023
5024 // We always append this to the end of an EXIDX section.
5025 Arm_address output_address =
5026 section_start + this->relobj_->section_size(this->shndx_);
5027
5028 // Write out the entry. The first word either points to the beginning
5029 // or after the end of a text section. The second word is the special
5030 // EXIDX_CANTUNWIND value.
5031 uint32_t prel31_offset = output_address - this->address();
5032 if (utils::has_overflow<31>(offset))
5033 gold_error(_("PREL31 overflow in EXIDX_CANTUNWIND entry"));
5034 elfcpp::Swap<32, big_endian>::writeval(wv, prel31_offset & 0x7fffffffU);
5035 elfcpp::Swap<32, big_endian>::writeval(wv + 1, elfcpp::EXIDX_CANTUNWIND);
5036
5037 of->write_output_view(this->offset(), oview_size, oview);
5038 }
5039
5040 // Arm_exidx_merged_section methods.
5041
5042 // Constructor for Arm_exidx_merged_section.
5043 // EXIDX_INPUT_SECTION points to the unmodified EXIDX input section.
5044 // SECTION_OFFSET_MAP points to a section offset map describing how
5045 // parts of the input section are mapped to output. DELETED_BYTES is
5046 // the number of bytes deleted from the EXIDX input section.
5047
5048 Arm_exidx_merged_section::Arm_exidx_merged_section(
5049 const Arm_exidx_input_section& exidx_input_section,
5050 const Arm_exidx_section_offset_map& section_offset_map,
5051 uint32_t deleted_bytes)
5052 : Output_relaxed_input_section(exidx_input_section.relobj(),
5053 exidx_input_section.shndx(),
5054 exidx_input_section.addralign()),
5055 exidx_input_section_(exidx_input_section),
5056 section_offset_map_(section_offset_map)
5057 {
5058 // Fix size here so that we do not need to implement set_final_data_size.
5059 this->set_data_size(exidx_input_section.size() - deleted_bytes);
5060 this->fix_data_size();
5061 }
5062
5063 // Given an input OBJECT, an input section index SHNDX within that
5064 // object, and an OFFSET relative to the start of that input
5065 // section, return whether or not the corresponding offset within
5066 // the output section is known. If this function returns true, it
5067 // sets *POUTPUT to the output offset. The value -1 indicates that
5068 // this input offset is being discarded.
5069
5070 bool
5071 Arm_exidx_merged_section::do_output_offset(
5072 const Relobj* relobj,
5073 unsigned int shndx,
5074 section_offset_type offset,
5075 section_offset_type* poutput) const
5076 {
5077 // We only handle offsets for the original EXIDX input section.
5078 if (relobj != this->exidx_input_section_.relobj()
5079 || shndx != this->exidx_input_section_.shndx())
5080 return false;
5081
5082 section_offset_type section_size =
5083 convert_types<section_offset_type>(this->exidx_input_section_.size());
5084 if (offset < 0 || offset >= section_size)
5085 // Input offset is out of valid range.
5086 *poutput = -1;
5087 else
5088 {
5089 // We need to look up the section offset map to determine the output
5090 // offset. Find the reference point in map that is first offset
5091 // bigger than or equal to this offset.
5092 Arm_exidx_section_offset_map::const_iterator p =
5093 this->section_offset_map_.lower_bound(offset);
5094
5095 // The section offset maps are build such that this should not happen if
5096 // input offset is in the valid range.
5097 gold_assert(p != this->section_offset_map_.end());
5098
5099 // We need to check if this is dropped.
5100 section_offset_type ref = p->first;
5101 section_offset_type mapped_ref = p->second;
5102
5103 if (mapped_ref != Arm_exidx_input_section::invalid_offset)
5104 // Offset is present in output.
5105 *poutput = mapped_ref + (offset - ref);
5106 else
5107 // Offset is discarded owing to EXIDX entry merging.
5108 *poutput = -1;
5109 }
5110
5111 return true;
5112 }
5113
5114 // Write this to output file OF.
5115
5116 void
5117 Arm_exidx_merged_section::do_write(Output_file* of)
5118 {
5119 // If we retain or discard the whole EXIDX input section, we would
5120 // not be here.
5121 gold_assert(this->data_size() != this->exidx_input_section_.size()
5122 && this->data_size() != 0);
5123
5124 off_t offset = this->offset();
5125 const section_size_type oview_size = this->data_size();
5126 unsigned char* const oview = of->get_output_view(offset, oview_size);
5127
5128 Output_section* os = this->relobj()->output_section(this->shndx());
5129 gold_assert(os != NULL);
5130
5131 // Get contents of EXIDX input section.
5132 section_size_type section_size;
5133 const unsigned char* section_contents =
5134 this->relobj()->section_contents(this->shndx(), &section_size, false);
5135 gold_assert(section_size == this->exidx_input_section_.size());
5136
5137 // Go over spans of input offsets and write only those that are not
5138 // discarded.
5139 section_offset_type in_start = 0;
5140 section_offset_type out_start = 0;
5141 for(Arm_exidx_section_offset_map::const_iterator p =
5142 this->section_offset_map_.begin();
5143 p != this->section_offset_map_.end();
5144 ++p)
5145 {
5146 section_offset_type in_end = p->first;
5147 gold_assert(in_end >= in_start);
5148 section_offset_type out_end = p->second;
5149 size_t in_chunk_size = convert_types<size_t>(in_end - in_start + 1);
5150 if (out_end != -1)
5151 {
5152 size_t out_chunk_size =
5153 convert_types<size_t>(out_end - out_start + 1);
5154 gold_assert(out_chunk_size == in_chunk_size);
5155 memcpy(oview + out_start, section_contents + in_start,
5156 out_chunk_size);
5157 out_start += out_chunk_size;
5158 }
5159 in_start += in_chunk_size;
5160 }
5161
5162 gold_assert(convert_to_section_size_type(out_start) == oview_size);
5163 of->write_output_view(this->offset(), oview_size, oview);
5164 }
5165
5166 // Arm_exidx_fixup methods.
5167
5168 // Append an EXIDX_CANTUNWIND in the current output section if the last entry
5169 // is not an EXIDX_CANTUNWIND entry already. The new EXIDX_CANTUNWIND entry
5170 // points to the end of the last seen EXIDX section.
5171
5172 void
5173 Arm_exidx_fixup::add_exidx_cantunwind_as_needed()
5174 {
5175 if (this->last_unwind_type_ != UT_EXIDX_CANTUNWIND
5176 && this->last_input_section_ != NULL)
5177 {
5178 Relobj* relobj = this->last_input_section_->relobj();
5179 unsigned int text_shndx = this->last_input_section_->link();
5180 Arm_exidx_cantunwind* cantunwind =
5181 new Arm_exidx_cantunwind(relobj, text_shndx);
5182 this->exidx_output_section_->add_output_section_data(cantunwind);
5183 this->last_unwind_type_ = UT_EXIDX_CANTUNWIND;
5184 }
5185 }
5186
5187 // Process an EXIDX section entry in input. Return whether this entry
5188 // can be deleted in the output. SECOND_WORD in the second word of the
5189 // EXIDX entry.
5190
5191 bool
5192 Arm_exidx_fixup::process_exidx_entry(uint32_t second_word)
5193 {
5194 bool delete_entry;
5195 if (second_word == elfcpp::EXIDX_CANTUNWIND)
5196 {
5197 // Merge if previous entry is also an EXIDX_CANTUNWIND.
5198 delete_entry = this->last_unwind_type_ == UT_EXIDX_CANTUNWIND;
5199 this->last_unwind_type_ = UT_EXIDX_CANTUNWIND;
5200 }
5201 else if ((second_word & 0x80000000) != 0)
5202 {
5203 // Inlined unwinding data. Merge if equal to previous.
5204 delete_entry = (merge_exidx_entries_
5205 && this->last_unwind_type_ == UT_INLINED_ENTRY
5206 && this->last_inlined_entry_ == second_word);
5207 this->last_unwind_type_ = UT_INLINED_ENTRY;
5208 this->last_inlined_entry_ = second_word;
5209 }
5210 else
5211 {
5212 // Normal table entry. In theory we could merge these too,
5213 // but duplicate entries are likely to be much less common.
5214 delete_entry = false;
5215 this->last_unwind_type_ = UT_NORMAL_ENTRY;
5216 }
5217 return delete_entry;
5218 }
5219
5220 // Update the current section offset map during EXIDX section fix-up.
5221 // If there is no map, create one. INPUT_OFFSET is the offset of a
5222 // reference point, DELETED_BYTES is the number of deleted by in the
5223 // section so far. If DELETE_ENTRY is true, the reference point and
5224 // all offsets after the previous reference point are discarded.
5225
5226 void
5227 Arm_exidx_fixup::update_offset_map(
5228 section_offset_type input_offset,
5229 section_size_type deleted_bytes,
5230 bool delete_entry)
5231 {
5232 if (this->section_offset_map_ == NULL)
5233 this->section_offset_map_ = new Arm_exidx_section_offset_map();
5234 section_offset_type output_offset;
5235 if (delete_entry)
5236 output_offset = Arm_exidx_input_section::invalid_offset;
5237 else
5238 output_offset = input_offset - deleted_bytes;
5239 (*this->section_offset_map_)[input_offset] = output_offset;
5240 }
5241
5242 // Process EXIDX_INPUT_SECTION for EXIDX entry merging. Return the number of
5243 // bytes deleted. If some entries are merged, also store a pointer to a newly
5244 // created Arm_exidx_section_offset_map object in *PSECTION_OFFSET_MAP. The
5245 // caller owns the map and is responsible for releasing it after use.
5246
5247 template<bool big_endian>
5248 uint32_t
5249 Arm_exidx_fixup::process_exidx_section(
5250 const Arm_exidx_input_section* exidx_input_section,
5251 Arm_exidx_section_offset_map** psection_offset_map)
5252 {
5253 Relobj* relobj = exidx_input_section->relobj();
5254 unsigned shndx = exidx_input_section->shndx();
5255 section_size_type section_size;
5256 const unsigned char* section_contents =
5257 relobj->section_contents(shndx, &section_size, false);
5258
5259 if ((section_size % 8) != 0)
5260 {
5261 // Something is wrong with this section. Better not touch it.
5262 gold_error(_("uneven .ARM.exidx section size in %s section %u"),
5263 relobj->name().c_str(), shndx);
5264 this->last_input_section_ = exidx_input_section;
5265 this->last_unwind_type_ = UT_NONE;
5266 return 0;
5267 }
5268
5269 uint32_t deleted_bytes = 0;
5270 bool prev_delete_entry = false;
5271 gold_assert(this->section_offset_map_ == NULL);
5272
5273 for (section_size_type i = 0; i < section_size; i += 8)
5274 {
5275 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5276 const Valtype* wv =
5277 reinterpret_cast<const Valtype*>(section_contents + i + 4);
5278 uint32_t second_word = elfcpp::Swap<32, big_endian>::readval(wv);
5279
5280 bool delete_entry = this->process_exidx_entry(second_word);
5281
5282 // Entry deletion causes changes in output offsets. We use a std::map
5283 // to record these. And entry (x, y) means input offset x
5284 // is mapped to output offset y. If y is invalid_offset, then x is
5285 // dropped in the output. Because of the way std::map::lower_bound
5286 // works, we record the last offset in a region w.r.t to keeping or
5287 // dropping. If there is no entry (x0, y0) for an input offset x0,
5288 // the output offset y0 of it is determined by the output offset y1 of
5289 // the smallest input offset x1 > x0 that there is an (x1, y1) entry
5290 // in the map. If y1 is not -1, then y0 = y1 + x0 - x1. Othewise, y1
5291 // y0 is also -1.
5292 if (delete_entry != prev_delete_entry && i != 0)
5293 this->update_offset_map(i - 1, deleted_bytes, prev_delete_entry);
5294
5295 // Update total deleted bytes for this entry.
5296 if (delete_entry)
5297 deleted_bytes += 8;
5298
5299 prev_delete_entry = delete_entry;
5300 }
5301
5302 // If section offset map is not NULL, make an entry for the end of
5303 // section.
5304 if (this->section_offset_map_ != NULL)
5305 update_offset_map(section_size - 1, deleted_bytes, prev_delete_entry);
5306
5307 *psection_offset_map = this->section_offset_map_;
5308 this->section_offset_map_ = NULL;
5309 this->last_input_section_ = exidx_input_section;
5310
5311 // Set the first output text section so that we can link the EXIDX output
5312 // section to it. Ignore any EXIDX input section that is completely merged.
5313 if (this->first_output_text_section_ == NULL
5314 && deleted_bytes != section_size)
5315 {
5316 unsigned int link = exidx_input_section->link();
5317 Output_section* os = relobj->output_section(link);
5318 gold_assert(os != NULL);
5319 this->first_output_text_section_ = os;
5320 }
5321
5322 return deleted_bytes;
5323 }
5324
5325 // Arm_output_section methods.
5326
5327 // Create a stub group for input sections from BEGIN to END. OWNER
5328 // points to the input section to be the owner a new stub table.
5329
5330 template<bool big_endian>
5331 void
5332 Arm_output_section<big_endian>::create_stub_group(
5333 Input_section_list::const_iterator begin,
5334 Input_section_list::const_iterator end,
5335 Input_section_list::const_iterator owner,
5336 Target_arm<big_endian>* target,
5337 std::vector<Output_relaxed_input_section*>* new_relaxed_sections)
5338 {
5339 // We use a different kind of relaxed section in an EXIDX section.
5340 // The static casting from Output_relaxed_input_section to
5341 // Arm_input_section is invalid in an EXIDX section. We are okay
5342 // because we should not be calling this for an EXIDX section.
5343 gold_assert(this->type() != elfcpp::SHT_ARM_EXIDX);
5344
5345 // Currently we convert ordinary input sections into relaxed sections only
5346 // at this point but we may want to support creating relaxed input section
5347 // very early. So we check here to see if owner is already a relaxed
5348 // section.
5349
5350 Arm_input_section<big_endian>* arm_input_section;
5351 if (owner->is_relaxed_input_section())
5352 {
5353 arm_input_section =
5354 Arm_input_section<big_endian>::as_arm_input_section(
5355 owner->relaxed_input_section());
5356 }
5357 else
5358 {
5359 gold_assert(owner->is_input_section());
5360 // Create a new relaxed input section.
5361 arm_input_section =
5362 target->new_arm_input_section(owner->relobj(), owner->shndx());
5363 new_relaxed_sections->push_back(arm_input_section);
5364 }
5365
5366 // Create a stub table.
5367 Stub_table<big_endian>* stub_table =
5368 target->new_stub_table(arm_input_section);
5369
5370 arm_input_section->set_stub_table(stub_table);
5371
5372 Input_section_list::const_iterator p = begin;
5373 Input_section_list::const_iterator prev_p;
5374
5375 // Look for input sections or relaxed input sections in [begin ... end].
5376 do
5377 {
5378 if (p->is_input_section() || p->is_relaxed_input_section())
5379 {
5380 // The stub table information for input sections live
5381 // in their objects.
5382 Arm_relobj<big_endian>* arm_relobj =
5383 Arm_relobj<big_endian>::as_arm_relobj(p->relobj());
5384 arm_relobj->set_stub_table(p->shndx(), stub_table);
5385 }
5386 prev_p = p++;
5387 }
5388 while (prev_p != end);
5389 }
5390
5391 // Group input sections for stub generation. GROUP_SIZE is roughly the limit
5392 // of stub groups. We grow a stub group by adding input section until the
5393 // size is just below GROUP_SIZE. The last input section will be converted
5394 // into a stub table. If STUB_ALWAYS_AFTER_BRANCH is false, we also add
5395 // input section after the stub table, effectively double the group size.
5396 //
5397 // This is similar to the group_sections() function in elf32-arm.c but is
5398 // implemented differently.
5399
5400 template<bool big_endian>
5401 void
5402 Arm_output_section<big_endian>::group_sections(
5403 section_size_type group_size,
5404 bool stubs_always_after_branch,
5405 Target_arm<big_endian>* target)
5406 {
5407 // We only care about sections containing code.
5408 if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
5409 return;
5410
5411 // States for grouping.
5412 typedef enum
5413 {
5414 // No group is being built.
5415 NO_GROUP,
5416 // A group is being built but the stub table is not found yet.
5417 // We keep group a stub group until the size is just under GROUP_SIZE.
5418 // The last input section in the group will be used as the stub table.
5419 FINDING_STUB_SECTION,
5420 // A group is being built and we have already found a stub table.
5421 // We enter this state to grow a stub group by adding input section
5422 // after the stub table. This effectively doubles the group size.
5423 HAS_STUB_SECTION
5424 } State;
5425
5426 // Any newly created relaxed sections are stored here.
5427 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
5428
5429 State state = NO_GROUP;
5430 section_size_type off = 0;
5431 section_size_type group_begin_offset = 0;
5432 section_size_type group_end_offset = 0;
5433 section_size_type stub_table_end_offset = 0;
5434 Input_section_list::const_iterator group_begin =
5435 this->input_sections().end();
5436 Input_section_list::const_iterator stub_table =
5437 this->input_sections().end();
5438 Input_section_list::const_iterator group_end = this->input_sections().end();
5439 for (Input_section_list::const_iterator p = this->input_sections().begin();
5440 p != this->input_sections().end();
5441 ++p)
5442 {
5443 section_size_type section_begin_offset =
5444 align_address(off, p->addralign());
5445 section_size_type section_end_offset =
5446 section_begin_offset + p->data_size();
5447
5448 // Check to see if we should group the previously seens sections.
5449 switch (state)
5450 {
5451 case NO_GROUP:
5452 break;
5453
5454 case FINDING_STUB_SECTION:
5455 // Adding this section makes the group larger than GROUP_SIZE.
5456 if (section_end_offset - group_begin_offset >= group_size)
5457 {
5458 if (stubs_always_after_branch)
5459 {
5460 gold_assert(group_end != this->input_sections().end());
5461 this->create_stub_group(group_begin, group_end, group_end,
5462 target, &new_relaxed_sections);
5463 state = NO_GROUP;
5464 }
5465 else
5466 {
5467 // But wait, there's more! Input sections up to
5468 // stub_group_size bytes after the stub table can be
5469 // handled by it too.
5470 state = HAS_STUB_SECTION;
5471 stub_table = group_end;
5472 stub_table_end_offset = group_end_offset;
5473 }
5474 }
5475 break;
5476
5477 case HAS_STUB_SECTION:
5478 // Adding this section makes the post stub-section group larger
5479 // than GROUP_SIZE.
5480 if (section_end_offset - stub_table_end_offset >= group_size)
5481 {
5482 gold_assert(group_end != this->input_sections().end());
5483 this->create_stub_group(group_begin, group_end, stub_table,
5484 target, &new_relaxed_sections);
5485 state = NO_GROUP;
5486 }
5487 break;
5488
5489 default:
5490 gold_unreachable();
5491 }
5492
5493 // If we see an input section and currently there is no group, start
5494 // a new one. Skip any empty sections.
5495 if ((p->is_input_section() || p->is_relaxed_input_section())
5496 && (p->relobj()->section_size(p->shndx()) != 0))
5497 {
5498 if (state == NO_GROUP)
5499 {
5500 state = FINDING_STUB_SECTION;
5501 group_begin = p;
5502 group_begin_offset = section_begin_offset;
5503 }
5504
5505 // Keep track of the last input section seen.
5506 group_end = p;
5507 group_end_offset = section_end_offset;
5508 }
5509
5510 off = section_end_offset;
5511 }
5512
5513 // Create a stub group for any ungrouped sections.
5514 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
5515 {
5516 gold_assert(group_end != this->input_sections().end());
5517 this->create_stub_group(group_begin, group_end,
5518 (state == FINDING_STUB_SECTION
5519 ? group_end
5520 : stub_table),
5521 target, &new_relaxed_sections);
5522 }
5523
5524 // Convert input section into relaxed input section in a batch.
5525 if (!new_relaxed_sections.empty())
5526 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
5527
5528 // Update the section offsets
5529 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
5530 {
5531 Arm_relobj<big_endian>* arm_relobj =
5532 Arm_relobj<big_endian>::as_arm_relobj(
5533 new_relaxed_sections[i]->relobj());
5534 unsigned int shndx = new_relaxed_sections[i]->shndx();
5535 // Tell Arm_relobj that this input section is converted.
5536 arm_relobj->convert_input_section_to_relaxed_section(shndx);
5537 }
5538 }
5539
5540 // Append non empty text sections in this to LIST in ascending
5541 // order of their position in this.
5542
5543 template<bool big_endian>
5544 void
5545 Arm_output_section<big_endian>::append_text_sections_to_list(
5546 Text_section_list* list)
5547 {
5548 // We only care about text sections.
5549 if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
5550 return;
5551
5552 gold_assert((this->flags() & elfcpp::SHF_ALLOC) != 0);
5553
5554 for (Input_section_list::const_iterator p = this->input_sections().begin();
5555 p != this->input_sections().end();
5556 ++p)
5557 {
5558 // We only care about plain or relaxed input sections. We also
5559 // ignore any merged sections.
5560 if ((p->is_input_section() || p->is_relaxed_input_section())
5561 && p->data_size() != 0)
5562 list->push_back(Text_section_list::value_type(p->relobj(),
5563 p->shndx()));
5564 }
5565 }
5566
5567 template<bool big_endian>
5568 void
5569 Arm_output_section<big_endian>::fix_exidx_coverage(
5570 Layout* layout,
5571 const Text_section_list& sorted_text_sections,
5572 Symbol_table* symtab,
5573 bool merge_exidx_entries)
5574 {
5575 // We should only do this for the EXIDX output section.
5576 gold_assert(this->type() == elfcpp::SHT_ARM_EXIDX);
5577
5578 // We don't want the relaxation loop to undo these changes, so we discard
5579 // the current saved states and take another one after the fix-up.
5580 this->discard_states();
5581
5582 // Remove all input sections.
5583 uint64_t address = this->address();
5584 typedef std::list<Output_section::Input_section> Input_section_list;
5585 Input_section_list input_sections;
5586 this->reset_address_and_file_offset();
5587 this->get_input_sections(address, std::string(""), &input_sections);
5588
5589 if (!this->input_sections().empty())
5590 gold_error(_("Found non-EXIDX input sections in EXIDX output section"));
5591
5592 // Go through all the known input sections and record them.
5593 typedef Unordered_set<Section_id, Section_id_hash> Section_id_set;
5594 typedef Unordered_map<Section_id, const Output_section::Input_section*,
5595 Section_id_hash> Text_to_exidx_map;
5596 Text_to_exidx_map text_to_exidx_map;
5597 for (Input_section_list::const_iterator p = input_sections.begin();
5598 p != input_sections.end();
5599 ++p)
5600 {
5601 // This should never happen. At this point, we should only see
5602 // plain EXIDX input sections.
5603 gold_assert(!p->is_relaxed_input_section());
5604 text_to_exidx_map[Section_id(p->relobj(), p->shndx())] = &(*p);
5605 }
5606
5607 Arm_exidx_fixup exidx_fixup(this, merge_exidx_entries);
5608
5609 // Go over the sorted text sections.
5610 typedef Unordered_set<Section_id, Section_id_hash> Section_id_set;
5611 Section_id_set processed_input_sections;
5612 for (Text_section_list::const_iterator p = sorted_text_sections.begin();
5613 p != sorted_text_sections.end();
5614 ++p)
5615 {
5616 Relobj* relobj = p->first;
5617 unsigned int shndx = p->second;
5618
5619 Arm_relobj<big_endian>* arm_relobj =
5620 Arm_relobj<big_endian>::as_arm_relobj(relobj);
5621 const Arm_exidx_input_section* exidx_input_section =
5622 arm_relobj->exidx_input_section_by_link(shndx);
5623
5624 // If this text section has no EXIDX section, force an EXIDX_CANTUNWIND
5625 // entry pointing to the end of the last seen EXIDX section.
5626 if (exidx_input_section == NULL)
5627 {
5628 exidx_fixup.add_exidx_cantunwind_as_needed();
5629 continue;
5630 }
5631
5632 Relobj* exidx_relobj = exidx_input_section->relobj();
5633 unsigned int exidx_shndx = exidx_input_section->shndx();
5634 Section_id sid(exidx_relobj, exidx_shndx);
5635 Text_to_exidx_map::const_iterator iter = text_to_exidx_map.find(sid);
5636 if (iter == text_to_exidx_map.end())
5637 {
5638 // This is odd. We have not seen this EXIDX input section before.
5639 // We cannot do fix-up. If we saw a SECTIONS clause in a script,
5640 // issue a warning instead. We assume the user knows what he
5641 // or she is doing. Otherwise, this is an error.
5642 if (layout->script_options()->saw_sections_clause())
5643 gold_warning(_("unwinding may not work because EXIDX input section"
5644 " %u of %s is not in EXIDX output section"),
5645 exidx_shndx, exidx_relobj->name().c_str());
5646 else
5647 gold_error(_("unwinding may not work because EXIDX input section"
5648 " %u of %s is not in EXIDX output section"),
5649 exidx_shndx, exidx_relobj->name().c_str());
5650
5651 exidx_fixup.add_exidx_cantunwind_as_needed();
5652 continue;
5653 }
5654
5655 // Fix up coverage and append input section to output data list.
5656 Arm_exidx_section_offset_map* section_offset_map = NULL;
5657 uint32_t deleted_bytes =
5658 exidx_fixup.process_exidx_section<big_endian>(exidx_input_section,
5659 &section_offset_map);
5660
5661 if (deleted_bytes == exidx_input_section->size())
5662 {
5663 // The whole EXIDX section got merged. Remove it from output.
5664 gold_assert(section_offset_map == NULL);
5665 exidx_relobj->set_output_section(exidx_shndx, NULL);
5666
5667 // All local symbols defined in this input section will be dropped.
5668 // We need to adjust output local symbol count.
5669 arm_relobj->set_output_local_symbol_count_needs_update();
5670 }
5671 else if (deleted_bytes > 0)
5672 {
5673 // Some entries are merged. We need to convert this EXIDX input
5674 // section into a relaxed section.
5675 gold_assert(section_offset_map != NULL);
5676 Arm_exidx_merged_section* merged_section =
5677 new Arm_exidx_merged_section(*exidx_input_section,
5678 *section_offset_map, deleted_bytes);
5679 this->add_relaxed_input_section(merged_section);
5680 arm_relobj->convert_input_section_to_relaxed_section(exidx_shndx);
5681
5682 // All local symbols defined in discarded portions of this input
5683 // section will be dropped. We need to adjust output local symbol
5684 // count.
5685 arm_relobj->set_output_local_symbol_count_needs_update();
5686 }
5687 else
5688 {
5689 // Just add back the EXIDX input section.
5690 gold_assert(section_offset_map == NULL);
5691 const Output_section::Input_section* pis = iter->second;
5692 gold_assert(pis->is_input_section());
5693 this->add_script_input_section(*pis);
5694 }
5695
5696 processed_input_sections.insert(Section_id(exidx_relobj, exidx_shndx));
5697 }
5698
5699 // Insert an EXIDX_CANTUNWIND entry at the end of output if necessary.
5700 exidx_fixup.add_exidx_cantunwind_as_needed();
5701
5702 // Remove any known EXIDX input sections that are not processed.
5703 for (Input_section_list::const_iterator p = input_sections.begin();
5704 p != input_sections.end();
5705 ++p)
5706 {
5707 if (processed_input_sections.find(Section_id(p->relobj(), p->shndx()))
5708 == processed_input_sections.end())
5709 {
5710 // We only discard a known EXIDX section because its linked
5711 // text section has been folded by ICF.
5712 Arm_relobj<big_endian>* arm_relobj =
5713 Arm_relobj<big_endian>::as_arm_relobj(p->relobj());
5714 const Arm_exidx_input_section* exidx_input_section =
5715 arm_relobj->exidx_input_section_by_shndx(p->shndx());
5716 gold_assert(exidx_input_section != NULL);
5717 unsigned int text_shndx = exidx_input_section->link();
5718 gold_assert(symtab->is_section_folded(p->relobj(), text_shndx));
5719
5720 // Remove this from link. We also need to recount the
5721 // local symbols.
5722 p->relobj()->set_output_section(p->shndx(), NULL);
5723 arm_relobj->set_output_local_symbol_count_needs_update();
5724 }
5725 }
5726
5727 // Link exidx output section to the first seen output section and
5728 // set correct entry size.
5729 this->set_link_section(exidx_fixup.first_output_text_section());
5730 this->set_entsize(8);
5731
5732 // Make changes permanent.
5733 this->save_states();
5734 this->set_section_offsets_need_adjustment();
5735 }
5736
5737 // Arm_relobj methods.
5738
5739 // Determine if an input section is scannable for stub processing. SHDR is
5740 // the header of the section and SHNDX is the section index. OS is the output
5741 // section for the input section and SYMTAB is the global symbol table used to
5742 // look up ICF information.
5743
5744 template<bool big_endian>
5745 bool
5746 Arm_relobj<big_endian>::section_is_scannable(
5747 const elfcpp::Shdr<32, big_endian>& shdr,
5748 unsigned int shndx,
5749 const Output_section* os,
5750 const Symbol_table *symtab)
5751 {
5752 // Skip any empty sections, unallocated sections or sections whose
5753 // type are not SHT_PROGBITS.
5754 if (shdr.get_sh_size() == 0
5755 || (shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
5756 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
5757 return false;
5758
5759 // Skip any discarded or ICF'ed sections.
5760 if (os == NULL || symtab->is_section_folded(this, shndx))
5761 return false;
5762
5763 // If this requires special offset handling, check to see if it is
5764 // a relaxed section. If this is not, then it is a merged section that
5765 // we cannot handle.
5766 if (this->is_output_section_offset_invalid(shndx))
5767 {
5768 const Output_relaxed_input_section* poris =
5769 os->find_relaxed_input_section(this, shndx);
5770 if (poris == NULL)
5771 return false;
5772 }
5773
5774 return true;
5775 }
5776
5777 // Determine if we want to scan the SHNDX-th section for relocation stubs.
5778 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
5779
5780 template<bool big_endian>
5781 bool
5782 Arm_relobj<big_endian>::section_needs_reloc_stub_scanning(
5783 const elfcpp::Shdr<32, big_endian>& shdr,
5784 const Relobj::Output_sections& out_sections,
5785 const Symbol_table *symtab,
5786 const unsigned char* pshdrs)
5787 {
5788 unsigned int sh_type = shdr.get_sh_type();
5789 if (sh_type != elfcpp::SHT_REL && sh_type != elfcpp::SHT_RELA)
5790 return false;
5791
5792 // Ignore empty section.
5793 off_t sh_size = shdr.get_sh_size();
5794 if (sh_size == 0)
5795 return false;
5796
5797 // Ignore reloc section with unexpected symbol table. The
5798 // error will be reported in the final link.
5799 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
5800 return false;
5801
5802 unsigned int reloc_size;
5803 if (sh_type == elfcpp::SHT_REL)
5804 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
5805 else
5806 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
5807
5808 // Ignore reloc section with unexpected entsize or uneven size.
5809 // The error will be reported in the final link.
5810 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
5811 return false;
5812
5813 // Ignore reloc section with bad info. This error will be
5814 // reported in the final link.
5815 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
5816 if (index >= this->shnum())
5817 return false;
5818
5819 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
5820 const elfcpp::Shdr<32, big_endian> text_shdr(pshdrs + index * shdr_size);
5821 return this->section_is_scannable(text_shdr, index,
5822 out_sections[index], symtab);
5823 }
5824
5825 // Return the output address of either a plain input section or a relaxed
5826 // input section. SHNDX is the section index. We define and use this
5827 // instead of calling Output_section::output_address because that is slow
5828 // for large output.
5829
5830 template<bool big_endian>
5831 Arm_address
5832 Arm_relobj<big_endian>::simple_input_section_output_address(
5833 unsigned int shndx,
5834 Output_section* os)
5835 {
5836 if (this->is_output_section_offset_invalid(shndx))
5837 {
5838 const Output_relaxed_input_section* poris =
5839 os->find_relaxed_input_section(this, shndx);
5840 // We do not handle merged sections here.
5841 gold_assert(poris != NULL);
5842 return poris->address();
5843 }
5844 else
5845 return os->address() + this->get_output_section_offset(shndx);
5846 }
5847
5848 // Determine if we want to scan the SHNDX-th section for non-relocation stubs.
5849 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
5850
5851 template<bool big_endian>
5852 bool
5853 Arm_relobj<big_endian>::section_needs_cortex_a8_stub_scanning(
5854 const elfcpp::Shdr<32, big_endian>& shdr,
5855 unsigned int shndx,
5856 Output_section* os,
5857 const Symbol_table* symtab)
5858 {
5859 if (!this->section_is_scannable(shdr, shndx, os, symtab))
5860 return false;
5861
5862 // If the section does not cross any 4K-boundaries, it does not need to
5863 // be scanned.
5864 Arm_address address = this->simple_input_section_output_address(shndx, os);
5865 if ((address & ~0xfffU) == ((address + shdr.get_sh_size() - 1) & ~0xfffU))
5866 return false;
5867
5868 return true;
5869 }
5870
5871 // Scan a section for Cortex-A8 workaround.
5872
5873 template<bool big_endian>
5874 void
5875 Arm_relobj<big_endian>::scan_section_for_cortex_a8_erratum(
5876 const elfcpp::Shdr<32, big_endian>& shdr,
5877 unsigned int shndx,
5878 Output_section* os,
5879 Target_arm<big_endian>* arm_target)
5880 {
5881 // Look for the first mapping symbol in this section. It should be
5882 // at (shndx, 0).
5883 Mapping_symbol_position section_start(shndx, 0);
5884 typename Mapping_symbols_info::const_iterator p =
5885 this->mapping_symbols_info_.lower_bound(section_start);
5886
5887 // There are no mapping symbols for this section. Treat it as a data-only
5888 // section. Issue a warning if section is marked as containing
5889 // instructions.
5890 if (p == this->mapping_symbols_info_.end() || p->first.first != shndx)
5891 {
5892 if ((this->section_flags(shndx) & elfcpp::SHF_EXECINSTR) != 0)
5893 gold_warning(_("cannot scan executable section %u of %s for Cortex-A8 "
5894 "erratum because it has no mapping symbols."),
5895 shndx, this->name().c_str());
5896 return;
5897 }
5898
5899 Arm_address output_address =
5900 this->simple_input_section_output_address(shndx, os);
5901
5902 // Get the section contents.
5903 section_size_type input_view_size = 0;
5904 const unsigned char* input_view =
5905 this->section_contents(shndx, &input_view_size, false);
5906
5907 // We need to go through the mapping symbols to determine what to
5908 // scan. There are two reasons. First, we should look at THUMB code and
5909 // THUMB code only. Second, we only want to look at the 4K-page boundary
5910 // to speed up the scanning.
5911
5912 while (p != this->mapping_symbols_info_.end()
5913 && p->first.first == shndx)
5914 {
5915 typename Mapping_symbols_info::const_iterator next =
5916 this->mapping_symbols_info_.upper_bound(p->first);
5917
5918 // Only scan part of a section with THUMB code.
5919 if (p->second == 't')
5920 {
5921 // Determine the end of this range.
5922 section_size_type span_start =
5923 convert_to_section_size_type(p->first.second);
5924 section_size_type span_end;
5925 if (next != this->mapping_symbols_info_.end()
5926 && next->first.first == shndx)
5927 span_end = convert_to_section_size_type(next->first.second);
5928 else
5929 span_end = convert_to_section_size_type(shdr.get_sh_size());
5930
5931 if (((span_start + output_address) & ~0xfffUL)
5932 != ((span_end + output_address - 1) & ~0xfffUL))
5933 {
5934 arm_target->scan_span_for_cortex_a8_erratum(this, shndx,
5935 span_start, span_end,
5936 input_view,
5937 output_address);
5938 }
5939 }
5940
5941 p = next;
5942 }
5943 }
5944
5945 // Scan relocations for stub generation.
5946
5947 template<bool big_endian>
5948 void
5949 Arm_relobj<big_endian>::scan_sections_for_stubs(
5950 Target_arm<big_endian>* arm_target,
5951 const Symbol_table* symtab,
5952 const Layout* layout)
5953 {
5954 unsigned int shnum = this->shnum();
5955 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
5956
5957 // Read the section headers.
5958 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
5959 shnum * shdr_size,
5960 true, true);
5961
5962 // To speed up processing, we set up hash tables for fast lookup of
5963 // input offsets to output addresses.
5964 this->initialize_input_to_output_maps();
5965
5966 const Relobj::Output_sections& out_sections(this->output_sections());
5967
5968 Relocate_info<32, big_endian> relinfo;
5969 relinfo.symtab = symtab;
5970 relinfo.layout = layout;
5971 relinfo.object = this;
5972
5973 // Do relocation stubs scanning.
5974 const unsigned char* p = pshdrs + shdr_size;
5975 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
5976 {
5977 const elfcpp::Shdr<32, big_endian> shdr(p);
5978 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
5979 pshdrs))
5980 {
5981 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
5982 Arm_address output_offset = this->get_output_section_offset(index);
5983 Arm_address output_address;
5984 if (output_offset != invalid_address)
5985 output_address = out_sections[index]->address() + output_offset;
5986 else
5987 {
5988 // Currently this only happens for a relaxed section.
5989 const Output_relaxed_input_section* poris =
5990 out_sections[index]->find_relaxed_input_section(this, index);
5991 gold_assert(poris != NULL);
5992 output_address = poris->address();
5993 }
5994
5995 // Get the relocations.
5996 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
5997 shdr.get_sh_size(),
5998 true, false);
5999
6000 // Get the section contents. This does work for the case in which
6001 // we modify the contents of an input section. We need to pass the
6002 // output view under such circumstances.
6003 section_size_type input_view_size = 0;
6004 const unsigned char* input_view =
6005 this->section_contents(index, &input_view_size, false);
6006
6007 relinfo.reloc_shndx = i;
6008 relinfo.data_shndx = index;
6009 unsigned int sh_type = shdr.get_sh_type();
6010 unsigned int reloc_size;
6011 if (sh_type == elfcpp::SHT_REL)
6012 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
6013 else
6014 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
6015
6016 Output_section* os = out_sections[index];
6017 arm_target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
6018 shdr.get_sh_size() / reloc_size,
6019 os,
6020 output_offset == invalid_address,
6021 input_view, output_address,
6022 input_view_size);
6023 }
6024 }
6025
6026 // Do Cortex-A8 erratum stubs scanning. This has to be done for a section
6027 // after its relocation section, if there is one, is processed for
6028 // relocation stubs. Merging this loop with the one above would have been
6029 // complicated since we would have had to make sure that relocation stub
6030 // scanning is done first.
6031 if (arm_target->fix_cortex_a8())
6032 {
6033 const unsigned char* p = pshdrs + shdr_size;
6034 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
6035 {
6036 const elfcpp::Shdr<32, big_endian> shdr(p);
6037 if (this->section_needs_cortex_a8_stub_scanning(shdr, i,
6038 out_sections[i],
6039 symtab))
6040 this->scan_section_for_cortex_a8_erratum(shdr, i, out_sections[i],
6041 arm_target);
6042 }
6043 }
6044
6045 // After we've done the relocations, we release the hash tables,
6046 // since we no longer need them.
6047 this->free_input_to_output_maps();
6048 }
6049
6050 // Count the local symbols. The ARM backend needs to know if a symbol
6051 // is a THUMB function or not. For global symbols, it is easy because
6052 // the Symbol object keeps the ELF symbol type. For local symbol it is
6053 // harder because we cannot access this information. So we override the
6054 // do_count_local_symbol in parent and scan local symbols to mark
6055 // THUMB functions. This is not the most efficient way but I do not want to
6056 // slow down other ports by calling a per symbol targer hook inside
6057 // Sized_relobj<size, big_endian>::do_count_local_symbols.
6058
6059 template<bool big_endian>
6060 void
6061 Arm_relobj<big_endian>::do_count_local_symbols(
6062 Stringpool_template<char>* pool,
6063 Stringpool_template<char>* dynpool)
6064 {
6065 // We need to fix-up the values of any local symbols whose type are
6066 // STT_ARM_TFUNC.
6067
6068 // Ask parent to count the local symbols.
6069 Sized_relobj<32, big_endian>::do_count_local_symbols(pool, dynpool);
6070 const unsigned int loccount = this->local_symbol_count();
6071 if (loccount == 0)
6072 return;
6073
6074 // Intialize the thumb function bit-vector.
6075 std::vector<bool> empty_vector(loccount, false);
6076 this->local_symbol_is_thumb_function_.swap(empty_vector);
6077
6078 // Read the symbol table section header.
6079 const unsigned int symtab_shndx = this->symtab_shndx();
6080 elfcpp::Shdr<32, big_endian>
6081 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
6082 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
6083
6084 // Read the local symbols.
6085 const int sym_size =elfcpp::Elf_sizes<32>::sym_size;
6086 gold_assert(loccount == symtabshdr.get_sh_info());
6087 off_t locsize = loccount * sym_size;
6088 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
6089 locsize, true, true);
6090
6091 // For mapping symbol processing, we need to read the symbol names.
6092 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
6093 if (strtab_shndx >= this->shnum())
6094 {
6095 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
6096 return;
6097 }
6098
6099 elfcpp::Shdr<32, big_endian>
6100 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
6101 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
6102 {
6103 this->error(_("symbol table name section has wrong type: %u"),
6104 static_cast<unsigned int>(strtabshdr.get_sh_type()));
6105 return;
6106 }
6107 const char* pnames =
6108 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
6109 strtabshdr.get_sh_size(),
6110 false, false));
6111
6112 // Loop over the local symbols and mark any local symbols pointing
6113 // to THUMB functions.
6114
6115 // Skip the first dummy symbol.
6116 psyms += sym_size;
6117 typename Sized_relobj<32, big_endian>::Local_values* plocal_values =
6118 this->local_values();
6119 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
6120 {
6121 elfcpp::Sym<32, big_endian> sym(psyms);
6122 elfcpp::STT st_type = sym.get_st_type();
6123 Symbol_value<32>& lv((*plocal_values)[i]);
6124 Arm_address input_value = lv.input_value();
6125
6126 // Check to see if this is a mapping symbol.
6127 const char* sym_name = pnames + sym.get_st_name();
6128 if (Target_arm<big_endian>::is_mapping_symbol_name(sym_name))
6129 {
6130 bool is_ordinary;
6131 unsigned int input_shndx =
6132 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
6133 gold_assert(is_ordinary);
6134
6135 // Strip of LSB in case this is a THUMB symbol.
6136 Mapping_symbol_position msp(input_shndx, input_value & ~1U);
6137 this->mapping_symbols_info_[msp] = sym_name[1];
6138 }
6139
6140 if (st_type == elfcpp::STT_ARM_TFUNC
6141 || (st_type == elfcpp::STT_FUNC && ((input_value & 1) != 0)))
6142 {
6143 // This is a THUMB function. Mark this and canonicalize the
6144 // symbol value by setting LSB.
6145 this->local_symbol_is_thumb_function_[i] = true;
6146 if ((input_value & 1) == 0)
6147 lv.set_input_value(input_value | 1);
6148 }
6149 }
6150 }
6151
6152 // Relocate sections.
6153 template<bool big_endian>
6154 void
6155 Arm_relobj<big_endian>::do_relocate_sections(
6156 const Symbol_table* symtab,
6157 const Layout* layout,
6158 const unsigned char* pshdrs,
6159 typename Sized_relobj<32, big_endian>::Views* pviews)
6160 {
6161 // Call parent to relocate sections.
6162 Sized_relobj<32, big_endian>::do_relocate_sections(symtab, layout, pshdrs,
6163 pviews);
6164
6165 // We do not generate stubs if doing a relocatable link.
6166 if (parameters->options().relocatable())
6167 return;
6168
6169 // Relocate stub tables.
6170 unsigned int shnum = this->shnum();
6171
6172 Target_arm<big_endian>* arm_target =
6173 Target_arm<big_endian>::default_target();
6174
6175 Relocate_info<32, big_endian> relinfo;
6176 relinfo.symtab = symtab;
6177 relinfo.layout = layout;
6178 relinfo.object = this;
6179
6180 for (unsigned int i = 1; i < shnum; ++i)
6181 {
6182 Arm_input_section<big_endian>* arm_input_section =
6183 arm_target->find_arm_input_section(this, i);
6184
6185 if (arm_input_section != NULL
6186 && arm_input_section->is_stub_table_owner()
6187 && !arm_input_section->stub_table()->empty())
6188 {
6189 // We cannot discard a section if it owns a stub table.
6190 Output_section* os = this->output_section(i);
6191 gold_assert(os != NULL);
6192
6193 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
6194 relinfo.reloc_shdr = NULL;
6195 relinfo.data_shndx = i;
6196 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<32>::shdr_size;
6197
6198 gold_assert((*pviews)[i].view != NULL);
6199
6200 // We are passed the output section view. Adjust it to cover the
6201 // stub table only.
6202 Stub_table<big_endian>* stub_table = arm_input_section->stub_table();
6203 gold_assert((stub_table->address() >= (*pviews)[i].address)
6204 && ((stub_table->address() + stub_table->data_size())
6205 <= (*pviews)[i].address + (*pviews)[i].view_size));
6206
6207 off_t offset = stub_table->address() - (*pviews)[i].address;
6208 unsigned char* view = (*pviews)[i].view + offset;
6209 Arm_address address = stub_table->address();
6210 section_size_type view_size = stub_table->data_size();
6211
6212 stub_table->relocate_stubs(&relinfo, arm_target, os, view, address,
6213 view_size);
6214 }
6215
6216 // Apply Cortex A8 workaround if applicable.
6217 if (this->section_has_cortex_a8_workaround(i))
6218 {
6219 unsigned char* view = (*pviews)[i].view;
6220 Arm_address view_address = (*pviews)[i].address;
6221 section_size_type view_size = (*pviews)[i].view_size;
6222 Stub_table<big_endian>* stub_table = this->stub_tables_[i];
6223
6224 // Adjust view to cover section.
6225 Output_section* os = this->output_section(i);
6226 gold_assert(os != NULL);
6227 Arm_address section_address =
6228 this->simple_input_section_output_address(i, os);
6229 uint64_t section_size = this->section_size(i);
6230
6231 gold_assert(section_address >= view_address
6232 && ((section_address + section_size)
6233 <= (view_address + view_size)));
6234
6235 unsigned char* section_view = view + (section_address - view_address);
6236
6237 // Apply the Cortex-A8 workaround to the output address range
6238 // corresponding to this input section.
6239 stub_table->apply_cortex_a8_workaround_to_address_range(
6240 arm_target,
6241 section_view,
6242 section_address,
6243 section_size);
6244 }
6245 }
6246 }
6247
6248 // Find the linked text section of an EXIDX section by looking the the first
6249 // relocation. 4.4.1 of the EHABI specifications says that an EXIDX section
6250 // must be linked to to its associated code section via the sh_link field of
6251 // its section header. However, some tools are broken and the link is not
6252 // always set. LD just drops such an EXIDX section silently, causing the
6253 // associated code not unwindabled. Here we try a little bit harder to
6254 // discover the linked code section.
6255 //
6256 // PSHDR points to the section header of a relocation section of an EXIDX
6257 // section. If we can find a linked text section, return true and
6258 // store the text section index in the location PSHNDX. Otherwise
6259 // return false.
6260
6261 template<bool big_endian>
6262 bool
6263 Arm_relobj<big_endian>::find_linked_text_section(
6264 const unsigned char* pshdr,
6265 const unsigned char* psyms,
6266 unsigned int* pshndx)
6267 {
6268 elfcpp::Shdr<32, big_endian> shdr(pshdr);
6269
6270 // If there is no relocation, we cannot find the linked text section.
6271 size_t reloc_size;
6272 if (shdr.get_sh_type() == elfcpp::SHT_REL)
6273 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
6274 else
6275 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
6276 size_t reloc_count = shdr.get_sh_size() / reloc_size;
6277
6278 // Get the relocations.
6279 const unsigned char* prelocs =
6280 this->get_view(shdr.get_sh_offset(), shdr.get_sh_size(), true, false);
6281
6282 // Find the REL31 relocation for the first word of the first EXIDX entry.
6283 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
6284 {
6285 Arm_address r_offset;
6286 typename elfcpp::Elf_types<32>::Elf_WXword r_info;
6287 if (shdr.get_sh_type() == elfcpp::SHT_REL)
6288 {
6289 typename elfcpp::Rel<32, big_endian> reloc(prelocs);
6290 r_info = reloc.get_r_info();
6291 r_offset = reloc.get_r_offset();
6292 }
6293 else
6294 {
6295 typename elfcpp::Rela<32, big_endian> reloc(prelocs);
6296 r_info = reloc.get_r_info();
6297 r_offset = reloc.get_r_offset();
6298 }
6299
6300 unsigned int r_type = elfcpp::elf_r_type<32>(r_info);
6301 if (r_type != elfcpp::R_ARM_PREL31 && r_type != elfcpp::R_ARM_SBREL31)
6302 continue;
6303
6304 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
6305 if (r_sym == 0
6306 || r_sym >= this->local_symbol_count()
6307 || r_offset != 0)
6308 continue;
6309
6310 // This is the relocation for the first word of the first EXIDX entry.
6311 // We expect to see a local section symbol.
6312 const int sym_size = elfcpp::Elf_sizes<32>::sym_size;
6313 elfcpp::Sym<32, big_endian> sym(psyms + r_sym * sym_size);
6314 if (sym.get_st_type() == elfcpp::STT_SECTION)
6315 {
6316 bool is_ordinary;
6317 *pshndx =
6318 this->adjust_sym_shndx(r_sym, sym.get_st_shndx(), &is_ordinary);
6319 gold_assert(is_ordinary);
6320 return true;
6321 }
6322 else
6323 return false;
6324 }
6325
6326 return false;
6327 }
6328
6329 // Make an EXIDX input section object for an EXIDX section whose index is
6330 // SHNDX. SHDR is the section header of the EXIDX section and TEXT_SHNDX
6331 // is the section index of the linked text section.
6332
6333 template<bool big_endian>
6334 void
6335 Arm_relobj<big_endian>::make_exidx_input_section(
6336 unsigned int shndx,
6337 const elfcpp::Shdr<32, big_endian>& shdr,
6338 unsigned int text_shndx)
6339 {
6340 // Issue an error and ignore this EXIDX section if it points to a text
6341 // section already has an EXIDX section.
6342 if (this->exidx_section_map_[text_shndx] != NULL)
6343 {
6344 gold_error(_("EXIDX sections %u and %u both link to text section %u "
6345 "in %s"),
6346 shndx, this->exidx_section_map_[text_shndx]->shndx(),
6347 text_shndx, this->name().c_str());
6348 return;
6349 }
6350
6351 // Create an Arm_exidx_input_section object for this EXIDX section.
6352 Arm_exidx_input_section* exidx_input_section =
6353 new Arm_exidx_input_section(this, shndx, text_shndx, shdr.get_sh_size(),
6354 shdr.get_sh_addralign());
6355 this->exidx_section_map_[text_shndx] = exidx_input_section;
6356
6357 // Also map the EXIDX section index to this.
6358 gold_assert(this->exidx_section_map_[shndx] == NULL);
6359 this->exidx_section_map_[shndx] = exidx_input_section;
6360 }
6361
6362 // Read the symbol information.
6363
6364 template<bool big_endian>
6365 void
6366 Arm_relobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
6367 {
6368 // Call parent class to read symbol information.
6369 Sized_relobj<32, big_endian>::do_read_symbols(sd);
6370
6371 // If this input file is a binary file, it has no processor
6372 // specific flags and attributes section.
6373 Input_file::Format format = this->input_file()->format();
6374 if (format != Input_file::FORMAT_ELF)
6375 {
6376 gold_assert(format == Input_file::FORMAT_BINARY);
6377 this->merge_flags_and_attributes_ = false;
6378 return;
6379 }
6380
6381 // Read processor-specific flags in ELF file header.
6382 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
6383 elfcpp::Elf_sizes<32>::ehdr_size,
6384 true, false);
6385 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
6386 this->processor_specific_flags_ = ehdr.get_e_flags();
6387
6388 // Go over the section headers and look for .ARM.attributes and .ARM.exidx
6389 // sections.
6390 std::vector<unsigned int> deferred_exidx_sections;
6391 const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
6392 const unsigned char* pshdrs = sd->section_headers->data();
6393 const unsigned char *ps = pshdrs + shdr_size;
6394 bool must_merge_flags_and_attributes = false;
6395 for (unsigned int i = 1; i < this->shnum(); ++i, ps += shdr_size)
6396 {
6397 elfcpp::Shdr<32, big_endian> shdr(ps);
6398
6399 // Sometimes an object has no contents except the section name string
6400 // table and an empty symbol table with the undefined symbol. We
6401 // don't want to merge processor-specific flags from such an object.
6402 if (shdr.get_sh_type() == elfcpp::SHT_SYMTAB)
6403 {
6404 // Symbol table is not empty.
6405 const elfcpp::Elf_types<32>::Elf_WXword sym_size =
6406 elfcpp::Elf_sizes<32>::sym_size;
6407 if (shdr.get_sh_size() > sym_size)
6408 must_merge_flags_and_attributes = true;
6409 }
6410 else if (shdr.get_sh_type() != elfcpp::SHT_STRTAB)
6411 // If this is neither an empty symbol table nor a string table,
6412 // be conservative.
6413 must_merge_flags_and_attributes = true;
6414
6415 if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
6416 {
6417 gold_assert(this->attributes_section_data_ == NULL);
6418 section_offset_type section_offset = shdr.get_sh_offset();
6419 section_size_type section_size =
6420 convert_to_section_size_type(shdr.get_sh_size());
6421 File_view* view = this->get_lasting_view(section_offset,
6422 section_size, true, false);
6423 this->attributes_section_data_ =
6424 new Attributes_section_data(view->data(), section_size);
6425 }
6426 else if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
6427 {
6428 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_link());
6429 if (text_shndx >= this->shnum())
6430 gold_error(_("EXIDX section %u linked to invalid section %u"),
6431 i, text_shndx);
6432 else if (text_shndx == elfcpp::SHN_UNDEF)
6433 deferred_exidx_sections.push_back(i);
6434 else
6435 this->make_exidx_input_section(i, shdr, text_shndx);
6436 }
6437 }
6438
6439 // This is rare.
6440 if (!must_merge_flags_and_attributes)
6441 {
6442 this->merge_flags_and_attributes_ = false;
6443 return;
6444 }
6445
6446 // Some tools are broken and they do not set the link of EXIDX sections.
6447 // We look at the first relocation to figure out the linked sections.
6448 if (!deferred_exidx_sections.empty())
6449 {
6450 // We need to go over the section headers again to find the mapping
6451 // from sections being relocated to their relocation sections. This is
6452 // a bit inefficient as we could do that in the loop above. However,
6453 // we do not expect any deferred EXIDX sections normally. So we do not
6454 // want to slow down the most common path.
6455 typedef Unordered_map<unsigned int, unsigned int> Reloc_map;
6456 Reloc_map reloc_map;
6457 ps = pshdrs + shdr_size;
6458 for (unsigned int i = 1; i < this->shnum(); ++i, ps += shdr_size)
6459 {
6460 elfcpp::Shdr<32, big_endian> shdr(ps);
6461 elfcpp::Elf_Word sh_type = shdr.get_sh_type();
6462 if (sh_type == elfcpp::SHT_REL || sh_type == elfcpp::SHT_RELA)
6463 {
6464 unsigned int info_shndx = this->adjust_shndx(shdr.get_sh_info());
6465 if (info_shndx >= this->shnum())
6466 gold_error(_("relocation section %u has invalid info %u"),
6467 i, info_shndx);
6468 Reloc_map::value_type value(info_shndx, i);
6469 std::pair<Reloc_map::iterator, bool> result =
6470 reloc_map.insert(value);
6471 if (!result.second)
6472 gold_error(_("section %u has multiple relocation sections "
6473 "%u and %u"),
6474 info_shndx, i, reloc_map[info_shndx]);
6475 }
6476 }
6477
6478 // Read the symbol table section header.
6479 const unsigned int symtab_shndx = this->symtab_shndx();
6480 elfcpp::Shdr<32, big_endian>
6481 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
6482 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
6483
6484 // Read the local symbols.
6485 const int sym_size =elfcpp::Elf_sizes<32>::sym_size;
6486 const unsigned int loccount = this->local_symbol_count();
6487 gold_assert(loccount == symtabshdr.get_sh_info());
6488 off_t locsize = loccount * sym_size;
6489 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
6490 locsize, true, true);
6491
6492 // Process the deferred EXIDX sections.
6493 for(unsigned int i = 0; i < deferred_exidx_sections.size(); ++i)
6494 {
6495 unsigned int shndx = deferred_exidx_sections[i];
6496 elfcpp::Shdr<32, big_endian> shdr(pshdrs + shndx * shdr_size);
6497 unsigned int text_shndx;
6498 Reloc_map::const_iterator it = reloc_map.find(shndx);
6499 if (it != reloc_map.end()
6500 && find_linked_text_section(pshdrs + it->second * shdr_size,
6501 psyms, &text_shndx))
6502 this->make_exidx_input_section(shndx, shdr, text_shndx);
6503 else
6504 gold_error(_("EXIDX section %u has no linked text section."),
6505 shndx);
6506 }
6507 }
6508 }
6509
6510 // Process relocations for garbage collection. The ARM target uses .ARM.exidx
6511 // sections for unwinding. These sections are referenced implicitly by
6512 // text sections linked in the section headers. If we ignore these implict
6513 // references, the .ARM.exidx sections and any .ARM.extab sections they use
6514 // will be garbage-collected incorrectly. Hence we override the same function
6515 // in the base class to handle these implicit references.
6516
6517 template<bool big_endian>
6518 void
6519 Arm_relobj<big_endian>::do_gc_process_relocs(Symbol_table* symtab,
6520 Layout* layout,
6521 Read_relocs_data* rd)
6522 {
6523 // First, call base class method to process relocations in this object.
6524 Sized_relobj<32, big_endian>::do_gc_process_relocs(symtab, layout, rd);
6525
6526 // If --gc-sections is not specified, there is nothing more to do.
6527 // This happens when --icf is used but --gc-sections is not.
6528 if (!parameters->options().gc_sections())
6529 return;
6530
6531 unsigned int shnum = this->shnum();
6532 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
6533 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
6534 shnum * shdr_size,
6535 true, true);
6536
6537 // Scan section headers for sections of type SHT_ARM_EXIDX. Add references
6538 // to these from the linked text sections.
6539 const unsigned char* ps = pshdrs + shdr_size;
6540 for (unsigned int i = 1; i < shnum; ++i, ps += shdr_size)
6541 {
6542 elfcpp::Shdr<32, big_endian> shdr(ps);
6543 if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
6544 {
6545 // Found an .ARM.exidx section, add it to the set of reachable
6546 // sections from its linked text section.
6547 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_link());
6548 symtab->gc()->add_reference(this, text_shndx, this, i);
6549 }
6550 }
6551 }
6552
6553 // Update output local symbol count. Owing to EXIDX entry merging, some local
6554 // symbols will be removed in output. Adjust output local symbol count
6555 // accordingly. We can only changed the static output local symbol count. It
6556 // is too late to change the dynamic symbols.
6557
6558 template<bool big_endian>
6559 void
6560 Arm_relobj<big_endian>::update_output_local_symbol_count()
6561 {
6562 // Caller should check that this needs updating. We want caller checking
6563 // because output_local_symbol_count_needs_update() is most likely inlined.
6564 gold_assert(this->output_local_symbol_count_needs_update_);
6565
6566 gold_assert(this->symtab_shndx() != -1U);
6567 if (this->symtab_shndx() == 0)
6568 {
6569 // This object has no symbols. Weird but legal.
6570 return;
6571 }
6572
6573 // Read the symbol table section header.
6574 const unsigned int symtab_shndx = this->symtab_shndx();
6575 elfcpp::Shdr<32, big_endian>
6576 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
6577 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
6578
6579 // Read the local symbols.
6580 const int sym_size = elfcpp::Elf_sizes<32>::sym_size;
6581 const unsigned int loccount = this->local_symbol_count();
6582 gold_assert(loccount == symtabshdr.get_sh_info());
6583 off_t locsize = loccount * sym_size;
6584 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
6585 locsize, true, true);
6586
6587 // Loop over the local symbols.
6588
6589 typedef typename Sized_relobj<32, big_endian>::Output_sections
6590 Output_sections;
6591 const Output_sections& out_sections(this->output_sections());
6592 unsigned int shnum = this->shnum();
6593 unsigned int count = 0;
6594 // Skip the first, dummy, symbol.
6595 psyms += sym_size;
6596 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
6597 {
6598 elfcpp::Sym<32, big_endian> sym(psyms);
6599
6600 Symbol_value<32>& lv((*this->local_values())[i]);
6601
6602 // This local symbol was already discarded by do_count_local_symbols.
6603 if (lv.is_output_symtab_index_set() && !lv.has_output_symtab_entry())
6604 continue;
6605
6606 bool is_ordinary;
6607 unsigned int shndx = this->adjust_sym_shndx(i, sym.get_st_shndx(),
6608 &is_ordinary);
6609
6610 if (shndx < shnum)
6611 {
6612 Output_section* os = out_sections[shndx];
6613
6614 // This local symbol no longer has an output section. Discard it.
6615 if (os == NULL)
6616 {
6617 lv.set_no_output_symtab_entry();
6618 continue;
6619 }
6620
6621 // Currently we only discard parts of EXIDX input sections.
6622 // We explicitly check for a merged EXIDX input section to avoid
6623 // calling Output_section_data::output_offset unless necessary.
6624 if ((this->get_output_section_offset(shndx) == invalid_address)
6625 && (this->exidx_input_section_by_shndx(shndx) != NULL))
6626 {
6627 section_offset_type output_offset =
6628 os->output_offset(this, shndx, lv.input_value());
6629 if (output_offset == -1)
6630 {
6631 // This symbol is defined in a part of an EXIDX input section
6632 // that is discarded due to entry merging.
6633 lv.set_no_output_symtab_entry();
6634 continue;
6635 }
6636 }
6637 }
6638
6639 ++count;
6640 }
6641
6642 this->set_output_local_symbol_count(count);
6643 this->output_local_symbol_count_needs_update_ = false;
6644 }
6645
6646 // Arm_dynobj methods.
6647
6648 // Read the symbol information.
6649
6650 template<bool big_endian>
6651 void
6652 Arm_dynobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
6653 {
6654 // Call parent class to read symbol information.
6655 Sized_dynobj<32, big_endian>::do_read_symbols(sd);
6656
6657 // Read processor-specific flags in ELF file header.
6658 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
6659 elfcpp::Elf_sizes<32>::ehdr_size,
6660 true, false);
6661 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
6662 this->processor_specific_flags_ = ehdr.get_e_flags();
6663
6664 // Read the attributes section if there is one.
6665 // We read from the end because gas seems to put it near the end of
6666 // the section headers.
6667 const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
6668 const unsigned char *ps =
6669 sd->section_headers->data() + shdr_size * (this->shnum() - 1);
6670 for (unsigned int i = this->shnum(); i > 0; --i, ps -= shdr_size)
6671 {
6672 elfcpp::Shdr<32, big_endian> shdr(ps);
6673 if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
6674 {
6675 section_offset_type section_offset = shdr.get_sh_offset();
6676 section_size_type section_size =
6677 convert_to_section_size_type(shdr.get_sh_size());
6678 File_view* view = this->get_lasting_view(section_offset,
6679 section_size, true, false);
6680 this->attributes_section_data_ =
6681 new Attributes_section_data(view->data(), section_size);
6682 break;
6683 }
6684 }
6685 }
6686
6687 // Stub_addend_reader methods.
6688
6689 // Read the addend of a REL relocation of type R_TYPE at VIEW.
6690
6691 template<bool big_endian>
6692 elfcpp::Elf_types<32>::Elf_Swxword
6693 Stub_addend_reader<elfcpp::SHT_REL, big_endian>::operator()(
6694 unsigned int r_type,
6695 const unsigned char* view,
6696 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const
6697 {
6698 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
6699
6700 switch (r_type)
6701 {
6702 case elfcpp::R_ARM_CALL:
6703 case elfcpp::R_ARM_JUMP24:
6704 case elfcpp::R_ARM_PLT32:
6705 {
6706 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
6707 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
6708 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
6709 return utils::sign_extend<26>(val << 2);
6710 }
6711
6712 case elfcpp::R_ARM_THM_CALL:
6713 case elfcpp::R_ARM_THM_JUMP24:
6714 case elfcpp::R_ARM_THM_XPC22:
6715 {
6716 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
6717 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
6718 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
6719 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
6720 return RelocFuncs::thumb32_branch_offset(upper_insn, lower_insn);
6721 }
6722
6723 case elfcpp::R_ARM_THM_JUMP19:
6724 {
6725 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
6726 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
6727 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
6728 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
6729 return RelocFuncs::thumb32_cond_branch_offset(upper_insn, lower_insn);
6730 }
6731
6732 default:
6733 gold_unreachable();
6734 }
6735 }
6736
6737 // Arm_output_data_got methods.
6738
6739 // Add a GOT pair for R_ARM_TLS_GD32. The creates a pair of GOT entries.
6740 // The first one is initialized to be 1, which is the module index for
6741 // the main executable and the second one 0. A reloc of the type
6742 // R_ARM_TLS_DTPOFF32 will be created for the second GOT entry and will
6743 // be applied by gold. GSYM is a global symbol.
6744 //
6745 template<bool big_endian>
6746 void
6747 Arm_output_data_got<big_endian>::add_tls_gd32_with_static_reloc(
6748 unsigned int got_type,
6749 Symbol* gsym)
6750 {
6751 if (gsym->has_got_offset(got_type))
6752 return;
6753
6754 // We are doing a static link. Just mark it as belong to module 1,
6755 // the executable.
6756 unsigned int got_offset = this->add_constant(1);
6757 gsym->set_got_offset(got_type, got_offset);
6758 got_offset = this->add_constant(0);
6759 this->static_relocs_.push_back(Static_reloc(got_offset,
6760 elfcpp::R_ARM_TLS_DTPOFF32,
6761 gsym));
6762 }
6763
6764 // Same as the above but for a local symbol.
6765
6766 template<bool big_endian>
6767 void
6768 Arm_output_data_got<big_endian>::add_tls_gd32_with_static_reloc(
6769 unsigned int got_type,
6770 Sized_relobj<32, big_endian>* object,
6771 unsigned int index)
6772 {
6773 if (object->local_has_got_offset(index, got_type))
6774 return;
6775
6776 // We are doing a static link. Just mark it as belong to module 1,
6777 // the executable.
6778 unsigned int got_offset = this->add_constant(1);
6779 object->set_local_got_offset(index, got_type, got_offset);
6780 got_offset = this->add_constant(0);
6781 this->static_relocs_.push_back(Static_reloc(got_offset,
6782 elfcpp::R_ARM_TLS_DTPOFF32,
6783 object, index));
6784 }
6785
6786 template<bool big_endian>
6787 void
6788 Arm_output_data_got<big_endian>::do_write(Output_file* of)
6789 {
6790 // Call parent to write out GOT.
6791 Output_data_got<32, big_endian>::do_write(of);
6792
6793 // We are done if there is no fix up.
6794 if (this->static_relocs_.empty())
6795 return;
6796
6797 gold_assert(parameters->doing_static_link());
6798
6799 const off_t offset = this->offset();
6800 const section_size_type oview_size =
6801 convert_to_section_size_type(this->data_size());
6802 unsigned char* const oview = of->get_output_view(offset, oview_size);
6803
6804 Output_segment* tls_segment = this->layout_->tls_segment();
6805 gold_assert(tls_segment != NULL);
6806
6807 // The thread pointer $tp points to the TCB, which is followed by the
6808 // TLS. So we need to adjust $tp relative addressing by this amount.
6809 Arm_address aligned_tcb_size =
6810 align_address(ARM_TCB_SIZE, tls_segment->maximum_alignment());
6811
6812 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
6813 {
6814 Static_reloc& reloc(this->static_relocs_[i]);
6815
6816 Arm_address value;
6817 if (!reloc.symbol_is_global())
6818 {
6819 Sized_relobj<32, big_endian>* object = reloc.relobj();
6820 const Symbol_value<32>* psymval =
6821 reloc.relobj()->local_symbol(reloc.index());
6822
6823 // We are doing static linking. Issue an error and skip this
6824 // relocation if the symbol is undefined or in a discarded_section.
6825 bool is_ordinary;
6826 unsigned int shndx = psymval->input_shndx(&is_ordinary);
6827 if ((shndx == elfcpp::SHN_UNDEF)
6828 || (is_ordinary
6829 && shndx != elfcpp::SHN_UNDEF
6830 && !object->is_section_included(shndx)
6831 && !this->symbol_table_->is_section_folded(object, shndx)))
6832 {
6833 gold_error(_("undefined or discarded local symbol %u from "
6834 " object %s in GOT"),
6835 reloc.index(), reloc.relobj()->name().c_str());
6836 continue;
6837 }
6838
6839 value = psymval->value(object, 0);
6840 }
6841 else
6842 {
6843 const Symbol* gsym = reloc.symbol();
6844 gold_assert(gsym != NULL);
6845 if (gsym->is_forwarder())
6846 gsym = this->symbol_table_->resolve_forwards(gsym);
6847
6848 // We are doing static linking. Issue an error and skip this
6849 // relocation if the symbol is undefined or in a discarded_section
6850 // unless it is a weakly_undefined symbol.
6851 if ((gsym->is_defined_in_discarded_section()
6852 || gsym->is_undefined())
6853 && !gsym->is_weak_undefined())
6854 {
6855 gold_error(_("undefined or discarded symbol %s in GOT"),
6856 gsym->name());
6857 continue;
6858 }
6859
6860 if (!gsym->is_weak_undefined())
6861 {
6862 const Sized_symbol<32>* sym =
6863 static_cast<const Sized_symbol<32>*>(gsym);
6864 value = sym->value();
6865 }
6866 else
6867 value = 0;
6868 }
6869
6870 unsigned got_offset = reloc.got_offset();
6871 gold_assert(got_offset < oview_size);
6872
6873 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
6874 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
6875 Valtype x;
6876 switch (reloc.r_type())
6877 {
6878 case elfcpp::R_ARM_TLS_DTPOFF32:
6879 x = value;
6880 break;
6881 case elfcpp::R_ARM_TLS_TPOFF32:
6882 x = value + aligned_tcb_size;
6883 break;
6884 default:
6885 gold_unreachable();
6886 }
6887 elfcpp::Swap<32, big_endian>::writeval(wv, x);
6888 }
6889
6890 of->write_output_view(offset, oview_size, oview);
6891 }
6892
6893 // A class to handle the PLT data.
6894
6895 template<bool big_endian>
6896 class Output_data_plt_arm : public Output_section_data
6897 {
6898 public:
6899 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
6900 Reloc_section;
6901
6902 Output_data_plt_arm(Layout*, Output_data_space*);
6903
6904 // Add an entry to the PLT.
6905 void
6906 add_entry(Symbol* gsym);
6907
6908 // Return the .rel.plt section data.
6909 const Reloc_section*
6910 rel_plt() const
6911 { return this->rel_; }
6912
6913 protected:
6914 void
6915 do_adjust_output_section(Output_section* os);
6916
6917 // Write to a map file.
6918 void
6919 do_print_to_mapfile(Mapfile* mapfile) const
6920 { mapfile->print_output_data(this, _("** PLT")); }
6921
6922 private:
6923 // Template for the first PLT entry.
6924 static const uint32_t first_plt_entry[5];
6925
6926 // Template for subsequent PLT entries.
6927 static const uint32_t plt_entry[3];
6928
6929 // Set the final size.
6930 void
6931 set_final_data_size()
6932 {
6933 this->set_data_size(sizeof(first_plt_entry)
6934 + this->count_ * sizeof(plt_entry));
6935 }
6936
6937 // Write out the PLT data.
6938 void
6939 do_write(Output_file*);
6940
6941 // The reloc section.
6942 Reloc_section* rel_;
6943 // The .got.plt section.
6944 Output_data_space* got_plt_;
6945 // The number of PLT entries.
6946 unsigned int count_;
6947 };
6948
6949 // Create the PLT section. The ordinary .got section is an argument,
6950 // since we need to refer to the start. We also create our own .got
6951 // section just for PLT entries.
6952
6953 template<bool big_endian>
6954 Output_data_plt_arm<big_endian>::Output_data_plt_arm(Layout* layout,
6955 Output_data_space* got_plt)
6956 : Output_section_data(4), got_plt_(got_plt), count_(0)
6957 {
6958 this->rel_ = new Reloc_section(false);
6959 layout->add_output_section_data(".rel.plt", elfcpp::SHT_REL,
6960 elfcpp::SHF_ALLOC, this->rel_, true, false,
6961 false, false);
6962 }
6963
6964 template<bool big_endian>
6965 void
6966 Output_data_plt_arm<big_endian>::do_adjust_output_section(Output_section* os)
6967 {
6968 os->set_entsize(0);
6969 }
6970
6971 // Add an entry to the PLT.
6972
6973 template<bool big_endian>
6974 void
6975 Output_data_plt_arm<big_endian>::add_entry(Symbol* gsym)
6976 {
6977 gold_assert(!gsym->has_plt_offset());
6978
6979 // Note that when setting the PLT offset we skip the initial
6980 // reserved PLT entry.
6981 gsym->set_plt_offset((this->count_) * sizeof(plt_entry)
6982 + sizeof(first_plt_entry));
6983
6984 ++this->count_;
6985
6986 section_offset_type got_offset = this->got_plt_->current_data_size();
6987
6988 // Every PLT entry needs a GOT entry which points back to the PLT
6989 // entry (this will be changed by the dynamic linker, normally
6990 // lazily when the function is called).
6991 this->got_plt_->set_current_data_size(got_offset + 4);
6992
6993 // Every PLT entry needs a reloc.
6994 gsym->set_needs_dynsym_entry();
6995 this->rel_->add_global(gsym, elfcpp::R_ARM_JUMP_SLOT, this->got_plt_,
6996 got_offset);
6997
6998 // Note that we don't need to save the symbol. The contents of the
6999 // PLT are independent of which symbols are used. The symbols only
7000 // appear in the relocations.
7001 }
7002
7003 // ARM PLTs.
7004 // FIXME: This is not very flexible. Right now this has only been tested
7005 // on armv5te. If we are to support additional architecture features like
7006 // Thumb-2 or BE8, we need to make this more flexible like GNU ld.
7007
7008 // The first entry in the PLT.
7009 template<bool big_endian>
7010 const uint32_t Output_data_plt_arm<big_endian>::first_plt_entry[5] =
7011 {
7012 0xe52de004, // str lr, [sp, #-4]!
7013 0xe59fe004, // ldr lr, [pc, #4]
7014 0xe08fe00e, // add lr, pc, lr
7015 0xe5bef008, // ldr pc, [lr, #8]!
7016 0x00000000, // &GOT[0] - .
7017 };
7018
7019 // Subsequent entries in the PLT.
7020
7021 template<bool big_endian>
7022 const uint32_t Output_data_plt_arm<big_endian>::plt_entry[3] =
7023 {
7024 0xe28fc600, // add ip, pc, #0xNN00000
7025 0xe28cca00, // add ip, ip, #0xNN000
7026 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
7027 };
7028
7029 // Write out the PLT. This uses the hand-coded instructions above,
7030 // and adjusts them as needed. This is all specified by the arm ELF
7031 // Processor Supplement.
7032
7033 template<bool big_endian>
7034 void
7035 Output_data_plt_arm<big_endian>::do_write(Output_file* of)
7036 {
7037 const off_t offset = this->offset();
7038 const section_size_type oview_size =
7039 convert_to_section_size_type(this->data_size());
7040 unsigned char* const oview = of->get_output_view(offset, oview_size);
7041
7042 const off_t got_file_offset = this->got_plt_->offset();
7043 const section_size_type got_size =
7044 convert_to_section_size_type(this->got_plt_->data_size());
7045 unsigned char* const got_view = of->get_output_view(got_file_offset,
7046 got_size);
7047 unsigned char* pov = oview;
7048
7049 Arm_address plt_address = this->address();
7050 Arm_address got_address = this->got_plt_->address();
7051
7052 // Write first PLT entry. All but the last word are constants.
7053 const size_t num_first_plt_words = (sizeof(first_plt_entry)
7054 / sizeof(plt_entry[0]));
7055 for (size_t i = 0; i < num_first_plt_words - 1; i++)
7056 elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
7057 // Last word in first PLT entry is &GOT[0] - .
7058 elfcpp::Swap<32, big_endian>::writeval(pov + 16,
7059 got_address - (plt_address + 16));
7060 pov += sizeof(first_plt_entry);
7061
7062 unsigned char* got_pov = got_view;
7063
7064 memset(got_pov, 0, 12);
7065 got_pov += 12;
7066
7067 const int rel_size = elfcpp::Elf_sizes<32>::rel_size;
7068 unsigned int plt_offset = sizeof(first_plt_entry);
7069 unsigned int plt_rel_offset = 0;
7070 unsigned int got_offset = 12;
7071 const unsigned int count = this->count_;
7072 for (unsigned int i = 0;
7073 i < count;
7074 ++i,
7075 pov += sizeof(plt_entry),
7076 got_pov += 4,
7077 plt_offset += sizeof(plt_entry),
7078 plt_rel_offset += rel_size,
7079 got_offset += 4)
7080 {
7081 // Set and adjust the PLT entry itself.
7082 int32_t offset = ((got_address + got_offset)
7083 - (plt_address + plt_offset + 8));
7084
7085 gold_assert(offset >= 0 && offset < 0x0fffffff);
7086 uint32_t plt_insn0 = plt_entry[0] | ((offset >> 20) & 0xff);
7087 elfcpp::Swap<32, big_endian>::writeval(pov, plt_insn0);
7088 uint32_t plt_insn1 = plt_entry[1] | ((offset >> 12) & 0xff);
7089 elfcpp::Swap<32, big_endian>::writeval(pov + 4, plt_insn1);
7090 uint32_t plt_insn2 = plt_entry[2] | (offset & 0xfff);
7091 elfcpp::Swap<32, big_endian>::writeval(pov + 8, plt_insn2);
7092
7093 // Set the entry in the GOT.
7094 elfcpp::Swap<32, big_endian>::writeval(got_pov, plt_address);
7095 }
7096
7097 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
7098 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
7099
7100 of->write_output_view(offset, oview_size, oview);
7101 of->write_output_view(got_file_offset, got_size, got_view);
7102 }
7103
7104 // Create a PLT entry for a global symbol.
7105
7106 template<bool big_endian>
7107 void
7108 Target_arm<big_endian>::make_plt_entry(Symbol_table* symtab, Layout* layout,
7109 Symbol* gsym)
7110 {
7111 if (gsym->has_plt_offset())
7112 return;
7113
7114 if (this->plt_ == NULL)
7115 {
7116 // Create the GOT sections first.
7117 this->got_section(symtab, layout);
7118
7119 this->plt_ = new Output_data_plt_arm<big_endian>(layout, this->got_plt_);
7120 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
7121 (elfcpp::SHF_ALLOC
7122 | elfcpp::SHF_EXECINSTR),
7123 this->plt_, false, false, false, false);
7124 }
7125 this->plt_->add_entry(gsym);
7126 }
7127
7128 // Get the section to use for TLS_DESC relocations.
7129
7130 template<bool big_endian>
7131 typename Target_arm<big_endian>::Reloc_section*
7132 Target_arm<big_endian>::rel_tls_desc_section(Layout* layout) const
7133 {
7134 return this->plt_section()->rel_tls_desc(layout);
7135 }
7136
7137 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
7138
7139 template<bool big_endian>
7140 void
7141 Target_arm<big_endian>::define_tls_base_symbol(
7142 Symbol_table* symtab,
7143 Layout* layout)
7144 {
7145 if (this->tls_base_symbol_defined_)
7146 return;
7147
7148 Output_segment* tls_segment = layout->tls_segment();
7149 if (tls_segment != NULL)
7150 {
7151 bool is_exec = parameters->options().output_is_executable();
7152 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
7153 Symbol_table::PREDEFINED,
7154 tls_segment, 0, 0,
7155 elfcpp::STT_TLS,
7156 elfcpp::STB_LOCAL,
7157 elfcpp::STV_HIDDEN, 0,
7158 (is_exec
7159 ? Symbol::SEGMENT_END
7160 : Symbol::SEGMENT_START),
7161 true);
7162 }
7163 this->tls_base_symbol_defined_ = true;
7164 }
7165
7166 // Create a GOT entry for the TLS module index.
7167
7168 template<bool big_endian>
7169 unsigned int
7170 Target_arm<big_endian>::got_mod_index_entry(
7171 Symbol_table* symtab,
7172 Layout* layout,
7173 Sized_relobj<32, big_endian>* object)
7174 {
7175 if (this->got_mod_index_offset_ == -1U)
7176 {
7177 gold_assert(symtab != NULL && layout != NULL && object != NULL);
7178 Arm_output_data_got<big_endian>* got = this->got_section(symtab, layout);
7179 unsigned int got_offset;
7180 if (!parameters->doing_static_link())
7181 {
7182 got_offset = got->add_constant(0);
7183 Reloc_section* rel_dyn = this->rel_dyn_section(layout);
7184 rel_dyn->add_local(object, 0, elfcpp::R_ARM_TLS_DTPMOD32, got,
7185 got_offset);
7186 }
7187 else
7188 {
7189 // We are doing a static link. Just mark it as belong to module 1,
7190 // the executable.
7191 got_offset = got->add_constant(1);
7192 }
7193
7194 got->add_constant(0);
7195 this->got_mod_index_offset_ = got_offset;
7196 }
7197 return this->got_mod_index_offset_;
7198 }
7199
7200 // Optimize the TLS relocation type based on what we know about the
7201 // symbol. IS_FINAL is true if the final address of this symbol is
7202 // known at link time.
7203
7204 template<bool big_endian>
7205 tls::Tls_optimization
7206 Target_arm<big_endian>::optimize_tls_reloc(bool, int)
7207 {
7208 // FIXME: Currently we do not do any TLS optimization.
7209 return tls::TLSOPT_NONE;
7210 }
7211
7212 // Report an unsupported relocation against a local symbol.
7213
7214 template<bool big_endian>
7215 void
7216 Target_arm<big_endian>::Scan::unsupported_reloc_local(
7217 Sized_relobj<32, big_endian>* object,
7218 unsigned int r_type)
7219 {
7220 gold_error(_("%s: unsupported reloc %u against local symbol"),
7221 object->name().c_str(), r_type);
7222 }
7223
7224 // We are about to emit a dynamic relocation of type R_TYPE. If the
7225 // dynamic linker does not support it, issue an error. The GNU linker
7226 // only issues a non-PIC error for an allocated read-only section.
7227 // Here we know the section is allocated, but we don't know that it is
7228 // read-only. But we check for all the relocation types which the
7229 // glibc dynamic linker supports, so it seems appropriate to issue an
7230 // error even if the section is not read-only.
7231
7232 template<bool big_endian>
7233 void
7234 Target_arm<big_endian>::Scan::check_non_pic(Relobj* object,
7235 unsigned int r_type)
7236 {
7237 switch (r_type)
7238 {
7239 // These are the relocation types supported by glibc for ARM.
7240 case elfcpp::R_ARM_RELATIVE:
7241 case elfcpp::R_ARM_COPY:
7242 case elfcpp::R_ARM_GLOB_DAT:
7243 case elfcpp::R_ARM_JUMP_SLOT:
7244 case elfcpp::R_ARM_ABS32:
7245 case elfcpp::R_ARM_ABS32_NOI:
7246 case elfcpp::R_ARM_PC24:
7247 // FIXME: The following 3 types are not supported by Android's dynamic
7248 // linker.
7249 case elfcpp::R_ARM_TLS_DTPMOD32:
7250 case elfcpp::R_ARM_TLS_DTPOFF32:
7251 case elfcpp::R_ARM_TLS_TPOFF32:
7252 return;
7253
7254 default:
7255 {
7256 // This prevents us from issuing more than one error per reloc
7257 // section. But we can still wind up issuing more than one
7258 // error per object file.
7259 if (this->issued_non_pic_error_)
7260 return;
7261 const Arm_reloc_property* reloc_property =
7262 arm_reloc_property_table->get_reloc_property(r_type);
7263 gold_assert(reloc_property != NULL);
7264 object->error(_("requires unsupported dynamic reloc %s; "
7265 "recompile with -fPIC"),
7266 reloc_property->name().c_str());
7267 this->issued_non_pic_error_ = true;
7268 return;
7269 }
7270
7271 case elfcpp::R_ARM_NONE:
7272 gold_unreachable();
7273 }
7274 }
7275
7276 // Scan a relocation for a local symbol.
7277 // FIXME: This only handles a subset of relocation types used by Android
7278 // on ARM v5te devices.
7279
7280 template<bool big_endian>
7281 inline void
7282 Target_arm<big_endian>::Scan::local(Symbol_table* symtab,
7283 Layout* layout,
7284 Target_arm* target,
7285 Sized_relobj<32, big_endian>* object,
7286 unsigned int data_shndx,
7287 Output_section* output_section,
7288 const elfcpp::Rel<32, big_endian>& reloc,
7289 unsigned int r_type,
7290 const elfcpp::Sym<32, big_endian>& lsym)
7291 {
7292 r_type = get_real_reloc_type(r_type);
7293 switch (r_type)
7294 {
7295 case elfcpp::R_ARM_NONE:
7296 case elfcpp::R_ARM_V4BX:
7297 case elfcpp::R_ARM_GNU_VTENTRY:
7298 case elfcpp::R_ARM_GNU_VTINHERIT:
7299 break;
7300
7301 case elfcpp::R_ARM_ABS32:
7302 case elfcpp::R_ARM_ABS32_NOI:
7303 // If building a shared library (or a position-independent
7304 // executable), we need to create a dynamic relocation for
7305 // this location. The relocation applied at link time will
7306 // apply the link-time value, so we flag the location with
7307 // an R_ARM_RELATIVE relocation so the dynamic loader can
7308 // relocate it easily.
7309 if (parameters->options().output_is_position_independent())
7310 {
7311 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7312 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
7313 // If we are to add more other reloc types than R_ARM_ABS32,
7314 // we need to add check_non_pic(object, r_type) here.
7315 rel_dyn->add_local_relative(object, r_sym, elfcpp::R_ARM_RELATIVE,
7316 output_section, data_shndx,
7317 reloc.get_r_offset());
7318 }
7319 break;
7320
7321 case elfcpp::R_ARM_ABS16:
7322 case elfcpp::R_ARM_ABS12:
7323 case elfcpp::R_ARM_THM_ABS5:
7324 case elfcpp::R_ARM_ABS8:
7325 case elfcpp::R_ARM_BASE_ABS:
7326 case elfcpp::R_ARM_MOVW_ABS_NC:
7327 case elfcpp::R_ARM_MOVT_ABS:
7328 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
7329 case elfcpp::R_ARM_THM_MOVT_ABS:
7330 // If building a shared library (or a position-independent
7331 // executable), we need to create a dynamic relocation for
7332 // this location. Because the addend needs to remain in the
7333 // data section, we need to be careful not to apply this
7334 // relocation statically.
7335 if (parameters->options().output_is_position_independent())
7336 {
7337 check_non_pic(object, r_type);
7338 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7339 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
7340 if (lsym.get_st_type() != elfcpp::STT_SECTION)
7341 rel_dyn->add_local(object, r_sym, r_type, output_section,
7342 data_shndx, reloc.get_r_offset());
7343 else
7344 {
7345 gold_assert(lsym.get_st_value() == 0);
7346 unsigned int shndx = lsym.get_st_shndx();
7347 bool is_ordinary;
7348 shndx = object->adjust_sym_shndx(r_sym, shndx,
7349 &is_ordinary);
7350 if (!is_ordinary)
7351 object->error(_("section symbol %u has bad shndx %u"),
7352 r_sym, shndx);
7353 else
7354 rel_dyn->add_local_section(object, shndx,
7355 r_type, output_section,
7356 data_shndx, reloc.get_r_offset());
7357 }
7358 }
7359 break;
7360
7361 case elfcpp::R_ARM_PC24:
7362 case elfcpp::R_ARM_REL32:
7363 case elfcpp::R_ARM_LDR_PC_G0:
7364 case elfcpp::R_ARM_SBREL32:
7365 case elfcpp::R_ARM_THM_CALL:
7366 case elfcpp::R_ARM_THM_PC8:
7367 case elfcpp::R_ARM_BASE_PREL:
7368 case elfcpp::R_ARM_PLT32:
7369 case elfcpp::R_ARM_CALL:
7370 case elfcpp::R_ARM_JUMP24:
7371 case elfcpp::R_ARM_THM_JUMP24:
7372 case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
7373 case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
7374 case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
7375 case elfcpp::R_ARM_SBREL31:
7376 case elfcpp::R_ARM_PREL31:
7377 case elfcpp::R_ARM_MOVW_PREL_NC:
7378 case elfcpp::R_ARM_MOVT_PREL:
7379 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
7380 case elfcpp::R_ARM_THM_MOVT_PREL:
7381 case elfcpp::R_ARM_THM_JUMP19:
7382 case elfcpp::R_ARM_THM_JUMP6:
7383 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
7384 case elfcpp::R_ARM_THM_PC12:
7385 case elfcpp::R_ARM_REL32_NOI:
7386 case elfcpp::R_ARM_ALU_PC_G0_NC:
7387 case elfcpp::R_ARM_ALU_PC_G0:
7388 case elfcpp::R_ARM_ALU_PC_G1_NC:
7389 case elfcpp::R_ARM_ALU_PC_G1:
7390 case elfcpp::R_ARM_ALU_PC_G2:
7391 case elfcpp::R_ARM_LDR_PC_G1:
7392 case elfcpp::R_ARM_LDR_PC_G2:
7393 case elfcpp::R_ARM_LDRS_PC_G0:
7394 case elfcpp::R_ARM_LDRS_PC_G1:
7395 case elfcpp::R_ARM_LDRS_PC_G2:
7396 case elfcpp::R_ARM_LDC_PC_G0:
7397 case elfcpp::R_ARM_LDC_PC_G1:
7398 case elfcpp::R_ARM_LDC_PC_G2:
7399 case elfcpp::R_ARM_ALU_SB_G0_NC:
7400 case elfcpp::R_ARM_ALU_SB_G0:
7401 case elfcpp::R_ARM_ALU_SB_G1_NC:
7402 case elfcpp::R_ARM_ALU_SB_G1:
7403 case elfcpp::R_ARM_ALU_SB_G2:
7404 case elfcpp::R_ARM_LDR_SB_G0:
7405 case elfcpp::R_ARM_LDR_SB_G1:
7406 case elfcpp::R_ARM_LDR_SB_G2:
7407 case elfcpp::R_ARM_LDRS_SB_G0:
7408 case elfcpp::R_ARM_LDRS_SB_G1:
7409 case elfcpp::R_ARM_LDRS_SB_G2:
7410 case elfcpp::R_ARM_LDC_SB_G0:
7411 case elfcpp::R_ARM_LDC_SB_G1:
7412 case elfcpp::R_ARM_LDC_SB_G2:
7413 case elfcpp::R_ARM_MOVW_BREL_NC:
7414 case elfcpp::R_ARM_MOVT_BREL:
7415 case elfcpp::R_ARM_MOVW_BREL:
7416 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
7417 case elfcpp::R_ARM_THM_MOVT_BREL:
7418 case elfcpp::R_ARM_THM_MOVW_BREL:
7419 case elfcpp::R_ARM_THM_JUMP11:
7420 case elfcpp::R_ARM_THM_JUMP8:
7421 // We don't need to do anything for a relative addressing relocation
7422 // against a local symbol if it does not reference the GOT.
7423 break;
7424
7425 case elfcpp::R_ARM_GOTOFF32:
7426 case elfcpp::R_ARM_GOTOFF12:
7427 // We need a GOT section:
7428 target->got_section(symtab, layout);
7429 break;
7430
7431 case elfcpp::R_ARM_GOT_BREL:
7432 case elfcpp::R_ARM_GOT_PREL:
7433 {
7434 // The symbol requires a GOT entry.
7435 Arm_output_data_got<big_endian>* got =
7436 target->got_section(symtab, layout);
7437 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
7438 if (got->add_local(object, r_sym, GOT_TYPE_STANDARD))
7439 {
7440 // If we are generating a shared object, we need to add a
7441 // dynamic RELATIVE relocation for this symbol's GOT entry.
7442 if (parameters->options().output_is_position_independent())
7443 {
7444 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7445 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
7446 rel_dyn->add_local_relative(
7447 object, r_sym, elfcpp::R_ARM_RELATIVE, got,
7448 object->local_got_offset(r_sym, GOT_TYPE_STANDARD));
7449 }
7450 }
7451 }
7452 break;
7453
7454 case elfcpp::R_ARM_TARGET1:
7455 case elfcpp::R_ARM_TARGET2:
7456 // This should have been mapped to another type already.
7457 // Fall through.
7458 case elfcpp::R_ARM_COPY:
7459 case elfcpp::R_ARM_GLOB_DAT:
7460 case elfcpp::R_ARM_JUMP_SLOT:
7461 case elfcpp::R_ARM_RELATIVE:
7462 // These are relocations which should only be seen by the
7463 // dynamic linker, and should never be seen here.
7464 gold_error(_("%s: unexpected reloc %u in object file"),
7465 object->name().c_str(), r_type);
7466 break;
7467
7468
7469 // These are initial TLS relocs, which are expected when
7470 // linking.
7471 case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
7472 case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
7473 case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
7474 case elfcpp::R_ARM_TLS_IE32: // Initial-exec
7475 case elfcpp::R_ARM_TLS_LE32: // Local-exec
7476 {
7477 bool output_is_shared = parameters->options().shared();
7478 const tls::Tls_optimization optimized_type
7479 = Target_arm<big_endian>::optimize_tls_reloc(!output_is_shared,
7480 r_type);
7481 switch (r_type)
7482 {
7483 case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
7484 if (optimized_type == tls::TLSOPT_NONE)
7485 {
7486 // Create a pair of GOT entries for the module index and
7487 // dtv-relative offset.
7488 Arm_output_data_got<big_endian>* got
7489 = target->got_section(symtab, layout);
7490 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
7491 unsigned int shndx = lsym.get_st_shndx();
7492 bool is_ordinary;
7493 shndx = object->adjust_sym_shndx(r_sym, shndx, &is_ordinary);
7494 if (!is_ordinary)
7495 {
7496 object->error(_("local symbol %u has bad shndx %u"),
7497 r_sym, shndx);
7498 break;
7499 }
7500
7501 if (!parameters->doing_static_link())
7502 got->add_local_pair_with_rel(object, r_sym, shndx,
7503 GOT_TYPE_TLS_PAIR,
7504 target->rel_dyn_section(layout),
7505 elfcpp::R_ARM_TLS_DTPMOD32, 0);
7506 else
7507 got->add_tls_gd32_with_static_reloc(GOT_TYPE_TLS_PAIR,
7508 object, r_sym);
7509 }
7510 else
7511 // FIXME: TLS optimization not supported yet.
7512 gold_unreachable();
7513 break;
7514
7515 case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
7516 if (optimized_type == tls::TLSOPT_NONE)
7517 {
7518 // Create a GOT entry for the module index.
7519 target->got_mod_index_entry(symtab, layout, object);
7520 }
7521 else
7522 // FIXME: TLS optimization not supported yet.
7523 gold_unreachable();
7524 break;
7525
7526 case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
7527 break;
7528
7529 case elfcpp::R_ARM_TLS_IE32: // Initial-exec
7530 layout->set_has_static_tls();
7531 if (optimized_type == tls::TLSOPT_NONE)
7532 {
7533 // Create a GOT entry for the tp-relative offset.
7534 Arm_output_data_got<big_endian>* got
7535 = target->got_section(symtab, layout);
7536 unsigned int r_sym =
7537 elfcpp::elf_r_sym<32>(reloc.get_r_info());
7538 if (!parameters->doing_static_link())
7539 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
7540 target->rel_dyn_section(layout),
7541 elfcpp::R_ARM_TLS_TPOFF32);
7542 else if (!object->local_has_got_offset(r_sym,
7543 GOT_TYPE_TLS_OFFSET))
7544 {
7545 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
7546 unsigned int got_offset =
7547 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
7548 got->add_static_reloc(got_offset,
7549 elfcpp::R_ARM_TLS_TPOFF32, object,
7550 r_sym);
7551 }
7552 }
7553 else
7554 // FIXME: TLS optimization not supported yet.
7555 gold_unreachable();
7556 break;
7557
7558 case elfcpp::R_ARM_TLS_LE32: // Local-exec
7559 layout->set_has_static_tls();
7560 if (output_is_shared)
7561 {
7562 // We need to create a dynamic relocation.
7563 gold_assert(lsym.get_st_type() != elfcpp::STT_SECTION);
7564 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
7565 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7566 rel_dyn->add_local(object, r_sym, elfcpp::R_ARM_TLS_TPOFF32,
7567 output_section, data_shndx,
7568 reloc.get_r_offset());
7569 }
7570 break;
7571
7572 default:
7573 gold_unreachable();
7574 }
7575 }
7576 break;
7577
7578 default:
7579 unsupported_reloc_local(object, r_type);
7580 break;
7581 }
7582 }
7583
7584 // Report an unsupported relocation against a global symbol.
7585
7586 template<bool big_endian>
7587 void
7588 Target_arm<big_endian>::Scan::unsupported_reloc_global(
7589 Sized_relobj<32, big_endian>* object,
7590 unsigned int r_type,
7591 Symbol* gsym)
7592 {
7593 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
7594 object->name().c_str(), r_type, gsym->demangled_name().c_str());
7595 }
7596
7597 // Scan a relocation for a global symbol.
7598
7599 template<bool big_endian>
7600 inline void
7601 Target_arm<big_endian>::Scan::global(Symbol_table* symtab,
7602 Layout* layout,
7603 Target_arm* target,
7604 Sized_relobj<32, big_endian>* object,
7605 unsigned int data_shndx,
7606 Output_section* output_section,
7607 const elfcpp::Rel<32, big_endian>& reloc,
7608 unsigned int r_type,
7609 Symbol* gsym)
7610 {
7611 // A reference to _GLOBAL_OFFSET_TABLE_ implies that we need a got
7612 // section. We check here to avoid creating a dynamic reloc against
7613 // _GLOBAL_OFFSET_TABLE_.
7614 if (!target->has_got_section()
7615 && strcmp(gsym->name(), "_GLOBAL_OFFSET_TABLE_") == 0)
7616 target->got_section(symtab, layout);
7617
7618 r_type = get_real_reloc_type(r_type);
7619 switch (r_type)
7620 {
7621 case elfcpp::R_ARM_NONE:
7622 case elfcpp::R_ARM_V4BX:
7623 case elfcpp::R_ARM_GNU_VTENTRY:
7624 case elfcpp::R_ARM_GNU_VTINHERIT:
7625 break;
7626
7627 case elfcpp::R_ARM_ABS32:
7628 case elfcpp::R_ARM_ABS16:
7629 case elfcpp::R_ARM_ABS12:
7630 case elfcpp::R_ARM_THM_ABS5:
7631 case elfcpp::R_ARM_ABS8:
7632 case elfcpp::R_ARM_BASE_ABS:
7633 case elfcpp::R_ARM_MOVW_ABS_NC:
7634 case elfcpp::R_ARM_MOVT_ABS:
7635 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
7636 case elfcpp::R_ARM_THM_MOVT_ABS:
7637 case elfcpp::R_ARM_ABS32_NOI:
7638 // Absolute addressing relocations.
7639 {
7640 // Make a PLT entry if necessary.
7641 if (this->symbol_needs_plt_entry(gsym))
7642 {
7643 target->make_plt_entry(symtab, layout, gsym);
7644 // Since this is not a PC-relative relocation, we may be
7645 // taking the address of a function. In that case we need to
7646 // set the entry in the dynamic symbol table to the address of
7647 // the PLT entry.
7648 if (gsym->is_from_dynobj() && !parameters->options().shared())
7649 gsym->set_needs_dynsym_value();
7650 }
7651 // Make a dynamic relocation if necessary.
7652 if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF))
7653 {
7654 if (gsym->may_need_copy_reloc())
7655 {
7656 target->copy_reloc(symtab, layout, object,
7657 data_shndx, output_section, gsym, reloc);
7658 }
7659 else if ((r_type == elfcpp::R_ARM_ABS32
7660 || r_type == elfcpp::R_ARM_ABS32_NOI)
7661 && gsym->can_use_relative_reloc(false))
7662 {
7663 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7664 rel_dyn->add_global_relative(gsym, elfcpp::R_ARM_RELATIVE,
7665 output_section, object,
7666 data_shndx, reloc.get_r_offset());
7667 }
7668 else
7669 {
7670 check_non_pic(object, r_type);
7671 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7672 rel_dyn->add_global(gsym, r_type, output_section, object,
7673 data_shndx, reloc.get_r_offset());
7674 }
7675 }
7676 }
7677 break;
7678
7679 case elfcpp::R_ARM_GOTOFF32:
7680 case elfcpp::R_ARM_GOTOFF12:
7681 // We need a GOT section.
7682 target->got_section(symtab, layout);
7683 break;
7684
7685 case elfcpp::R_ARM_REL32:
7686 case elfcpp::R_ARM_LDR_PC_G0:
7687 case elfcpp::R_ARM_SBREL32:
7688 case elfcpp::R_ARM_THM_PC8:
7689 case elfcpp::R_ARM_BASE_PREL:
7690 case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
7691 case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
7692 case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
7693 case elfcpp::R_ARM_MOVW_PREL_NC:
7694 case elfcpp::R_ARM_MOVT_PREL:
7695 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
7696 case elfcpp::R_ARM_THM_MOVT_PREL:
7697 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
7698 case elfcpp::R_ARM_THM_PC12:
7699 case elfcpp::R_ARM_REL32_NOI:
7700 case elfcpp::R_ARM_ALU_PC_G0_NC:
7701 case elfcpp::R_ARM_ALU_PC_G0:
7702 case elfcpp::R_ARM_ALU_PC_G1_NC:
7703 case elfcpp::R_ARM_ALU_PC_G1:
7704 case elfcpp::R_ARM_ALU_PC_G2:
7705 case elfcpp::R_ARM_LDR_PC_G1:
7706 case elfcpp::R_ARM_LDR_PC_G2:
7707 case elfcpp::R_ARM_LDRS_PC_G0:
7708 case elfcpp::R_ARM_LDRS_PC_G1:
7709 case elfcpp::R_ARM_LDRS_PC_G2:
7710 case elfcpp::R_ARM_LDC_PC_G0:
7711 case elfcpp::R_ARM_LDC_PC_G1:
7712 case elfcpp::R_ARM_LDC_PC_G2:
7713 case elfcpp::R_ARM_ALU_SB_G0_NC:
7714 case elfcpp::R_ARM_ALU_SB_G0:
7715 case elfcpp::R_ARM_ALU_SB_G1_NC:
7716 case elfcpp::R_ARM_ALU_SB_G1:
7717 case elfcpp::R_ARM_ALU_SB_G2:
7718 case elfcpp::R_ARM_LDR_SB_G0:
7719 case elfcpp::R_ARM_LDR_SB_G1:
7720 case elfcpp::R_ARM_LDR_SB_G2:
7721 case elfcpp::R_ARM_LDRS_SB_G0:
7722 case elfcpp::R_ARM_LDRS_SB_G1:
7723 case elfcpp::R_ARM_LDRS_SB_G2:
7724 case elfcpp::R_ARM_LDC_SB_G0:
7725 case elfcpp::R_ARM_LDC_SB_G1:
7726 case elfcpp::R_ARM_LDC_SB_G2:
7727 case elfcpp::R_ARM_MOVW_BREL_NC:
7728 case elfcpp::R_ARM_MOVT_BREL:
7729 case elfcpp::R_ARM_MOVW_BREL:
7730 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
7731 case elfcpp::R_ARM_THM_MOVT_BREL:
7732 case elfcpp::R_ARM_THM_MOVW_BREL:
7733 // Relative addressing relocations.
7734 {
7735 // Make a dynamic relocation if necessary.
7736 int flags = Symbol::NON_PIC_REF;
7737 if (gsym->needs_dynamic_reloc(flags))
7738 {
7739 if (target->may_need_copy_reloc(gsym))
7740 {
7741 target->copy_reloc(symtab, layout, object,
7742 data_shndx, output_section, gsym, reloc);
7743 }
7744 else
7745 {
7746 check_non_pic(object, r_type);
7747 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7748 rel_dyn->add_global(gsym, r_type, output_section, object,
7749 data_shndx, reloc.get_r_offset());
7750 }
7751 }
7752 }
7753 break;
7754
7755 case elfcpp::R_ARM_PC24:
7756 case elfcpp::R_ARM_THM_CALL:
7757 case elfcpp::R_ARM_PLT32:
7758 case elfcpp::R_ARM_CALL:
7759 case elfcpp::R_ARM_JUMP24:
7760 case elfcpp::R_ARM_THM_JUMP24:
7761 case elfcpp::R_ARM_SBREL31:
7762 case elfcpp::R_ARM_PREL31:
7763 case elfcpp::R_ARM_THM_JUMP19:
7764 case elfcpp::R_ARM_THM_JUMP6:
7765 case elfcpp::R_ARM_THM_JUMP11:
7766 case elfcpp::R_ARM_THM_JUMP8:
7767 // All the relocation above are branches except for the PREL31 ones.
7768 // A PREL31 relocation can point to a personality function in a shared
7769 // library. In that case we want to use a PLT because we want to
7770 // call the personality routine and the dyanmic linkers we care about
7771 // do not support dynamic PREL31 relocations. An REL31 relocation may
7772 // point to a function whose unwinding behaviour is being described but
7773 // we will not mistakenly generate a PLT for that because we should use
7774 // a local section symbol.
7775
7776 // If the symbol is fully resolved, this is just a relative
7777 // local reloc. Otherwise we need a PLT entry.
7778 if (gsym->final_value_is_known())
7779 break;
7780 // If building a shared library, we can also skip the PLT entry
7781 // if the symbol is defined in the output file and is protected
7782 // or hidden.
7783 if (gsym->is_defined()
7784 && !gsym->is_from_dynobj()
7785 && !gsym->is_preemptible())
7786 break;
7787 target->make_plt_entry(symtab, layout, gsym);
7788 break;
7789
7790 case elfcpp::R_ARM_GOT_BREL:
7791 case elfcpp::R_ARM_GOT_ABS:
7792 case elfcpp::R_ARM_GOT_PREL:
7793 {
7794 // The symbol requires a GOT entry.
7795 Arm_output_data_got<big_endian>* got =
7796 target->got_section(symtab, layout);
7797 if (gsym->final_value_is_known())
7798 got->add_global(gsym, GOT_TYPE_STANDARD);
7799 else
7800 {
7801 // If this symbol is not fully resolved, we need to add a
7802 // GOT entry with a dynamic relocation.
7803 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7804 if (gsym->is_from_dynobj()
7805 || gsym->is_undefined()
7806 || gsym->is_preemptible())
7807 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
7808 rel_dyn, elfcpp::R_ARM_GLOB_DAT);
7809 else
7810 {
7811 if (got->add_global(gsym, GOT_TYPE_STANDARD))
7812 rel_dyn->add_global_relative(
7813 gsym, elfcpp::R_ARM_RELATIVE, got,
7814 gsym->got_offset(GOT_TYPE_STANDARD));
7815 }
7816 }
7817 }
7818 break;
7819
7820 case elfcpp::R_ARM_TARGET1:
7821 case elfcpp::R_ARM_TARGET2:
7822 // These should have been mapped to other types already.
7823 // Fall through.
7824 case elfcpp::R_ARM_COPY:
7825 case elfcpp::R_ARM_GLOB_DAT:
7826 case elfcpp::R_ARM_JUMP_SLOT:
7827 case elfcpp::R_ARM_RELATIVE:
7828 // These are relocations which should only be seen by the
7829 // dynamic linker, and should never be seen here.
7830 gold_error(_("%s: unexpected reloc %u in object file"),
7831 object->name().c_str(), r_type);
7832 break;
7833
7834 // These are initial tls relocs, which are expected when
7835 // linking.
7836 case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
7837 case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
7838 case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
7839 case elfcpp::R_ARM_TLS_IE32: // Initial-exec
7840 case elfcpp::R_ARM_TLS_LE32: // Local-exec
7841 {
7842 const bool is_final = gsym->final_value_is_known();
7843 const tls::Tls_optimization optimized_type
7844 = Target_arm<big_endian>::optimize_tls_reloc(is_final, r_type);
7845 switch (r_type)
7846 {
7847 case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
7848 if (optimized_type == tls::TLSOPT_NONE)
7849 {
7850 // Create a pair of GOT entries for the module index and
7851 // dtv-relative offset.
7852 Arm_output_data_got<big_endian>* got
7853 = target->got_section(symtab, layout);
7854 if (!parameters->doing_static_link())
7855 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
7856 target->rel_dyn_section(layout),
7857 elfcpp::R_ARM_TLS_DTPMOD32,
7858 elfcpp::R_ARM_TLS_DTPOFF32);
7859 else
7860 got->add_tls_gd32_with_static_reloc(GOT_TYPE_TLS_PAIR, gsym);
7861 }
7862 else
7863 // FIXME: TLS optimization not supported yet.
7864 gold_unreachable();
7865 break;
7866
7867 case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
7868 if (optimized_type == tls::TLSOPT_NONE)
7869 {
7870 // Create a GOT entry for the module index.
7871 target->got_mod_index_entry(symtab, layout, object);
7872 }
7873 else
7874 // FIXME: TLS optimization not supported yet.
7875 gold_unreachable();
7876 break;
7877
7878 case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
7879 break;
7880
7881 case elfcpp::R_ARM_TLS_IE32: // Initial-exec
7882 layout->set_has_static_tls();
7883 if (optimized_type == tls::TLSOPT_NONE)
7884 {
7885 // Create a GOT entry for the tp-relative offset.
7886 Arm_output_data_got<big_endian>* got
7887 = target->got_section(symtab, layout);
7888 if (!parameters->doing_static_link())
7889 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
7890 target->rel_dyn_section(layout),
7891 elfcpp::R_ARM_TLS_TPOFF32);
7892 else if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
7893 {
7894 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
7895 unsigned int got_offset =
7896 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
7897 got->add_static_reloc(got_offset,
7898 elfcpp::R_ARM_TLS_TPOFF32, gsym);
7899 }
7900 }
7901 else
7902 // FIXME: TLS optimization not supported yet.
7903 gold_unreachable();
7904 break;
7905
7906 case elfcpp::R_ARM_TLS_LE32: // Local-exec
7907 layout->set_has_static_tls();
7908 if (parameters->options().shared())
7909 {
7910 // We need to create a dynamic relocation.
7911 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
7912 rel_dyn->add_global(gsym, elfcpp::R_ARM_TLS_TPOFF32,
7913 output_section, object,
7914 data_shndx, reloc.get_r_offset());
7915 }
7916 break;
7917
7918 default:
7919 gold_unreachable();
7920 }
7921 }
7922 break;
7923
7924 default:
7925 unsupported_reloc_global(object, r_type, gsym);
7926 break;
7927 }
7928 }
7929
7930 // Process relocations for gc.
7931
7932 template<bool big_endian>
7933 void
7934 Target_arm<big_endian>::gc_process_relocs(Symbol_table* symtab,
7935 Layout* layout,
7936 Sized_relobj<32, big_endian>* object,
7937 unsigned int data_shndx,
7938 unsigned int,
7939 const unsigned char* prelocs,
7940 size_t reloc_count,
7941 Output_section* output_section,
7942 bool needs_special_offset_handling,
7943 size_t local_symbol_count,
7944 const unsigned char* plocal_symbols)
7945 {
7946 typedef Target_arm<big_endian> Arm;
7947 typedef typename Target_arm<big_endian>::Scan Scan;
7948
7949 gold::gc_process_relocs<32, big_endian, Arm, elfcpp::SHT_REL, Scan>(
7950 symtab,
7951 layout,
7952 this,
7953 object,
7954 data_shndx,
7955 prelocs,
7956 reloc_count,
7957 output_section,
7958 needs_special_offset_handling,
7959 local_symbol_count,
7960 plocal_symbols);
7961 }
7962
7963 // Scan relocations for a section.
7964
7965 template<bool big_endian>
7966 void
7967 Target_arm<big_endian>::scan_relocs(Symbol_table* symtab,
7968 Layout* layout,
7969 Sized_relobj<32, big_endian>* object,
7970 unsigned int data_shndx,
7971 unsigned int sh_type,
7972 const unsigned char* prelocs,
7973 size_t reloc_count,
7974 Output_section* output_section,
7975 bool needs_special_offset_handling,
7976 size_t local_symbol_count,
7977 const unsigned char* plocal_symbols)
7978 {
7979 typedef typename Target_arm<big_endian>::Scan Scan;
7980 if (sh_type == elfcpp::SHT_RELA)
7981 {
7982 gold_error(_("%s: unsupported RELA reloc section"),
7983 object->name().c_str());
7984 return;
7985 }
7986
7987 gold::scan_relocs<32, big_endian, Target_arm, elfcpp::SHT_REL, Scan>(
7988 symtab,
7989 layout,
7990 this,
7991 object,
7992 data_shndx,
7993 prelocs,
7994 reloc_count,
7995 output_section,
7996 needs_special_offset_handling,
7997 local_symbol_count,
7998 plocal_symbols);
7999 }
8000
8001 // Finalize the sections.
8002
8003 template<bool big_endian>
8004 void
8005 Target_arm<big_endian>::do_finalize_sections(
8006 Layout* layout,
8007 const Input_objects* input_objects,
8008 Symbol_table* symtab)
8009 {
8010 // Merge processor-specific flags.
8011 for (Input_objects::Relobj_iterator p = input_objects->relobj_begin();
8012 p != input_objects->relobj_end();
8013 ++p)
8014 {
8015 Arm_relobj<big_endian>* arm_relobj =
8016 Arm_relobj<big_endian>::as_arm_relobj(*p);
8017 if (arm_relobj->merge_flags_and_attributes())
8018 {
8019 this->merge_processor_specific_flags(
8020 arm_relobj->name(),
8021 arm_relobj->processor_specific_flags());
8022 this->merge_object_attributes(arm_relobj->name().c_str(),
8023 arm_relobj->attributes_section_data());
8024 }
8025 }
8026
8027 for (Input_objects::Dynobj_iterator p = input_objects->dynobj_begin();
8028 p != input_objects->dynobj_end();
8029 ++p)
8030 {
8031 Arm_dynobj<big_endian>* arm_dynobj =
8032 Arm_dynobj<big_endian>::as_arm_dynobj(*p);
8033 this->merge_processor_specific_flags(
8034 arm_dynobj->name(),
8035 arm_dynobj->processor_specific_flags());
8036 this->merge_object_attributes(arm_dynobj->name().c_str(),
8037 arm_dynobj->attributes_section_data());
8038 }
8039
8040 // Create an empty uninitialized attribute section if we still don't have it
8041 // at this moment. This happens if there is no attributes sections in all
8042 // inputs.
8043 if (this->attributes_section_data_ == NULL)
8044 this->attributes_section_data_ = new Attributes_section_data(NULL, 0);
8045
8046 // Check BLX use.
8047 const Object_attribute* cpu_arch_attr =
8048 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
8049 if (cpu_arch_attr->int_value() > elfcpp::TAG_CPU_ARCH_V4)
8050 this->set_may_use_blx(true);
8051
8052 // Check if we need to use Cortex-A8 workaround.
8053 if (parameters->options().user_set_fix_cortex_a8())
8054 this->fix_cortex_a8_ = parameters->options().fix_cortex_a8();
8055 else
8056 {
8057 // If neither --fix-cortex-a8 nor --no-fix-cortex-a8 is used, turn on
8058 // Cortex-A8 erratum workaround for ARMv7-A or ARMv7 with unknown
8059 // profile.
8060 const Object_attribute* cpu_arch_profile_attr =
8061 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
8062 this->fix_cortex_a8_ =
8063 (cpu_arch_attr->int_value() == elfcpp::TAG_CPU_ARCH_V7
8064 && (cpu_arch_profile_attr->int_value() == 'A'
8065 || cpu_arch_profile_attr->int_value() == 0));
8066 }
8067
8068 // Check if we can use V4BX interworking.
8069 // The V4BX interworking stub contains BX instruction,
8070 // which is not specified for some profiles.
8071 if (this->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING
8072 && !this->may_use_blx())
8073 gold_error(_("unable to provide V4BX reloc interworking fix up; "
8074 "the target profile does not support BX instruction"));
8075
8076 // Fill in some more dynamic tags.
8077 const Reloc_section* rel_plt = (this->plt_ == NULL
8078 ? NULL
8079 : this->plt_->rel_plt());
8080 layout->add_target_dynamic_tags(true, this->got_plt_, rel_plt,
8081 this->rel_dyn_, true, false);
8082
8083 // Emit any relocs we saved in an attempt to avoid generating COPY
8084 // relocs.
8085 if (this->copy_relocs_.any_saved_relocs())
8086 this->copy_relocs_.emit(this->rel_dyn_section(layout));
8087
8088 // Handle the .ARM.exidx section.
8089 Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
8090 if (exidx_section != NULL
8091 && exidx_section->type() == elfcpp::SHT_ARM_EXIDX
8092 && !parameters->options().relocatable())
8093 {
8094 // Create __exidx_start and __exdix_end symbols.
8095 symtab->define_in_output_data("__exidx_start", NULL,
8096 Symbol_table::PREDEFINED,
8097 exidx_section, 0, 0, elfcpp::STT_OBJECT,
8098 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
8099 false, true);
8100 symtab->define_in_output_data("__exidx_end", NULL,
8101 Symbol_table::PREDEFINED,
8102 exidx_section, 0, 0, elfcpp::STT_OBJECT,
8103 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
8104 true, true);
8105
8106 // For the ARM target, we need to add a PT_ARM_EXIDX segment for
8107 // the .ARM.exidx section.
8108 if (!layout->script_options()->saw_phdrs_clause())
8109 {
8110 gold_assert(layout->find_output_segment(elfcpp::PT_ARM_EXIDX, 0, 0)
8111 == NULL);
8112 Output_segment* exidx_segment =
8113 layout->make_output_segment(elfcpp::PT_ARM_EXIDX, elfcpp::PF_R);
8114 exidx_segment->add_output_section(exidx_section, elfcpp::PF_R,
8115 false);
8116 }
8117 }
8118
8119 // Create an .ARM.attributes section unless we have no regular input
8120 // object. In that case the output will be empty.
8121 if (input_objects->number_of_relobjs() != 0)
8122 {
8123 Output_attributes_section_data* attributes_section =
8124 new Output_attributes_section_data(*this->attributes_section_data_);
8125 layout->add_output_section_data(".ARM.attributes",
8126 elfcpp::SHT_ARM_ATTRIBUTES, 0,
8127 attributes_section, false, false, false,
8128 false);
8129 }
8130 }
8131
8132 // Return whether a direct absolute static relocation needs to be applied.
8133 // In cases where Scan::local() or Scan::global() has created
8134 // a dynamic relocation other than R_ARM_RELATIVE, the addend
8135 // of the relocation is carried in the data, and we must not
8136 // apply the static relocation.
8137
8138 template<bool big_endian>
8139 inline bool
8140 Target_arm<big_endian>::Relocate::should_apply_static_reloc(
8141 const Sized_symbol<32>* gsym,
8142 int ref_flags,
8143 bool is_32bit,
8144 Output_section* output_section)
8145 {
8146 // If the output section is not allocated, then we didn't call
8147 // scan_relocs, we didn't create a dynamic reloc, and we must apply
8148 // the reloc here.
8149 if ((output_section->flags() & elfcpp::SHF_ALLOC) == 0)
8150 return true;
8151
8152 // For local symbols, we will have created a non-RELATIVE dynamic
8153 // relocation only if (a) the output is position independent,
8154 // (b) the relocation is absolute (not pc- or segment-relative), and
8155 // (c) the relocation is not 32 bits wide.
8156 if (gsym == NULL)
8157 return !(parameters->options().output_is_position_independent()
8158 && (ref_flags & Symbol::ABSOLUTE_REF)
8159 && !is_32bit);
8160
8161 // For global symbols, we use the same helper routines used in the
8162 // scan pass. If we did not create a dynamic relocation, or if we
8163 // created a RELATIVE dynamic relocation, we should apply the static
8164 // relocation.
8165 bool has_dyn = gsym->needs_dynamic_reloc(ref_flags);
8166 bool is_rel = (ref_flags & Symbol::ABSOLUTE_REF)
8167 && gsym->can_use_relative_reloc(ref_flags
8168 & Symbol::FUNCTION_CALL);
8169 return !has_dyn || is_rel;
8170 }
8171
8172 // Perform a relocation.
8173
8174 template<bool big_endian>
8175 inline bool
8176 Target_arm<big_endian>::Relocate::relocate(
8177 const Relocate_info<32, big_endian>* relinfo,
8178 Target_arm* target,
8179 Output_section *output_section,
8180 size_t relnum,
8181 const elfcpp::Rel<32, big_endian>& rel,
8182 unsigned int r_type,
8183 const Sized_symbol<32>* gsym,
8184 const Symbol_value<32>* psymval,
8185 unsigned char* view,
8186 Arm_address address,
8187 section_size_type view_size)
8188 {
8189 typedef Arm_relocate_functions<big_endian> Arm_relocate_functions;
8190
8191 r_type = get_real_reloc_type(r_type);
8192 const Arm_reloc_property* reloc_property =
8193 arm_reloc_property_table->get_implemented_static_reloc_property(r_type);
8194 if (reloc_property == NULL)
8195 {
8196 std::string reloc_name =
8197 arm_reloc_property_table->reloc_name_in_error_message(r_type);
8198 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
8199 _("cannot relocate %s in object file"),
8200 reloc_name.c_str());
8201 return true;
8202 }
8203
8204 const Arm_relobj<big_endian>* object =
8205 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
8206
8207 // If the final branch target of a relocation is THUMB instruction, this
8208 // is 1. Otherwise it is 0.
8209 Arm_address thumb_bit = 0;
8210 Symbol_value<32> symval;
8211 bool is_weakly_undefined_without_plt = false;
8212 if (relnum != Target_arm<big_endian>::fake_relnum_for_stubs)
8213 {
8214 if (gsym != NULL)
8215 {
8216 // This is a global symbol. Determine if we use PLT and if the
8217 // final target is THUMB.
8218 if (gsym->use_plt_offset(reloc_is_non_pic(r_type)))
8219 {
8220 // This uses a PLT, change the symbol value.
8221 symval.set_output_value(target->plt_section()->address()
8222 + gsym->plt_offset());
8223 psymval = &symval;
8224 }
8225 else if (gsym->is_weak_undefined())
8226 {
8227 // This is a weakly undefined symbol and we do not use PLT
8228 // for this relocation. A branch targeting this symbol will
8229 // be converted into an NOP.
8230 is_weakly_undefined_without_plt = true;
8231 }
8232 else
8233 {
8234 // Set thumb bit if symbol:
8235 // -Has type STT_ARM_TFUNC or
8236 // -Has type STT_FUNC, is defined and with LSB in value set.
8237 thumb_bit =
8238 (((gsym->type() == elfcpp::STT_ARM_TFUNC)
8239 || (gsym->type() == elfcpp::STT_FUNC
8240 && !gsym->is_undefined()
8241 && ((psymval->value(object, 0) & 1) != 0)))
8242 ? 1
8243 : 0);
8244 }
8245 }
8246 else
8247 {
8248 // This is a local symbol. Determine if the final target is THUMB.
8249 // We saved this information when all the local symbols were read.
8250 elfcpp::Elf_types<32>::Elf_WXword r_info = rel.get_r_info();
8251 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
8252 thumb_bit = object->local_symbol_is_thumb_function(r_sym) ? 1 : 0;
8253 }
8254 }
8255 else
8256 {
8257 // This is a fake relocation synthesized for a stub. It does not have
8258 // a real symbol. We just look at the LSB of the symbol value to
8259 // determine if the target is THUMB or not.
8260 thumb_bit = ((psymval->value(object, 0) & 1) != 0);
8261 }
8262
8263 // Strip LSB if this points to a THUMB target.
8264 if (thumb_bit != 0
8265 && reloc_property->uses_thumb_bit()
8266 && ((psymval->value(object, 0) & 1) != 0))
8267 {
8268 Arm_address stripped_value =
8269 psymval->value(object, 0) & ~static_cast<Arm_address>(1);
8270 symval.set_output_value(stripped_value);
8271 psymval = &symval;
8272 }
8273
8274 // Get the GOT offset if needed.
8275 // The GOT pointer points to the end of the GOT section.
8276 // We need to subtract the size of the GOT section to get
8277 // the actual offset to use in the relocation.
8278 bool have_got_offset = false;
8279 unsigned int got_offset = 0;
8280 switch (r_type)
8281 {
8282 case elfcpp::R_ARM_GOT_BREL:
8283 case elfcpp::R_ARM_GOT_PREL:
8284 if (gsym != NULL)
8285 {
8286 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
8287 got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
8288 - target->got_size());
8289 }
8290 else
8291 {
8292 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
8293 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
8294 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
8295 - target->got_size());
8296 }
8297 have_got_offset = true;
8298 break;
8299
8300 default:
8301 break;
8302 }
8303
8304 // To look up relocation stubs, we need to pass the symbol table index of
8305 // a local symbol.
8306 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
8307
8308 // Get the addressing origin of the output segment defining the
8309 // symbol gsym if needed (AAELF 4.6.1.2 Relocation types).
8310 Arm_address sym_origin = 0;
8311 if (reloc_property->uses_symbol_base())
8312 {
8313 if (r_type == elfcpp::R_ARM_BASE_ABS && gsym == NULL)
8314 // R_ARM_BASE_ABS with the NULL symbol will give the
8315 // absolute address of the GOT origin (GOT_ORG) (see ARM IHI
8316 // 0044C (AAELF): 4.6.1.8 Proxy generating relocations).
8317 sym_origin = target->got_plt_section()->address();
8318 else if (gsym == NULL)
8319 sym_origin = 0;
8320 else if (gsym->source() == Symbol::IN_OUTPUT_SEGMENT)
8321 sym_origin = gsym->output_segment()->vaddr();
8322 else if (gsym->source() == Symbol::IN_OUTPUT_DATA)
8323 sym_origin = gsym->output_data()->address();
8324
8325 // TODO: Assumes the segment base to be zero for the global symbols
8326 // till the proper support for the segment-base-relative addressing
8327 // will be implemented. This is consistent with GNU ld.
8328 }
8329
8330 // For relative addressing relocation, find out the relative address base.
8331 Arm_address relative_address_base = 0;
8332 switch(reloc_property->relative_address_base())
8333 {
8334 case Arm_reloc_property::RAB_NONE:
8335 // Relocations with relative address bases RAB_TLS and RAB_tp are
8336 // handled by relocate_tls. So we do not need to do anything here.
8337 case Arm_reloc_property::RAB_TLS:
8338 case Arm_reloc_property::RAB_tp:
8339 break;
8340 case Arm_reloc_property::RAB_B_S:
8341 relative_address_base = sym_origin;
8342 break;
8343 case Arm_reloc_property::RAB_GOT_ORG:
8344 relative_address_base = target->got_plt_section()->address();
8345 break;
8346 case Arm_reloc_property::RAB_P:
8347 relative_address_base = address;
8348 break;
8349 case Arm_reloc_property::RAB_Pa:
8350 relative_address_base = address & 0xfffffffcU;
8351 break;
8352 default:
8353 gold_unreachable();
8354 }
8355
8356 typename Arm_relocate_functions::Status reloc_status =
8357 Arm_relocate_functions::STATUS_OKAY;
8358 bool check_overflow = reloc_property->checks_overflow();
8359 switch (r_type)
8360 {
8361 case elfcpp::R_ARM_NONE:
8362 break;
8363
8364 case elfcpp::R_ARM_ABS8:
8365 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8366 output_section))
8367 reloc_status = Arm_relocate_functions::abs8(view, object, psymval);
8368 break;
8369
8370 case elfcpp::R_ARM_ABS12:
8371 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8372 output_section))
8373 reloc_status = Arm_relocate_functions::abs12(view, object, psymval);
8374 break;
8375
8376 case elfcpp::R_ARM_ABS16:
8377 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8378 output_section))
8379 reloc_status = Arm_relocate_functions::abs16(view, object, psymval);
8380 break;
8381
8382 case elfcpp::R_ARM_ABS32:
8383 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
8384 output_section))
8385 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
8386 thumb_bit);
8387 break;
8388
8389 case elfcpp::R_ARM_ABS32_NOI:
8390 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
8391 output_section))
8392 // No thumb bit for this relocation: (S + A)
8393 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
8394 0);
8395 break;
8396
8397 case elfcpp::R_ARM_MOVW_ABS_NC:
8398 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8399 output_section))
8400 reloc_status = Arm_relocate_functions::movw(view, object, psymval,
8401 0, thumb_bit,
8402 check_overflow);
8403 break;
8404
8405 case elfcpp::R_ARM_MOVT_ABS:
8406 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8407 output_section))
8408 reloc_status = Arm_relocate_functions::movt(view, object, psymval, 0);
8409 break;
8410
8411 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
8412 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8413 output_section))
8414 reloc_status = Arm_relocate_functions::thm_movw(view, object, psymval,
8415 0, thumb_bit, false);
8416 break;
8417
8418 case elfcpp::R_ARM_THM_MOVT_ABS:
8419 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8420 output_section))
8421 reloc_status = Arm_relocate_functions::thm_movt(view, object,
8422 psymval, 0);
8423 break;
8424
8425 case elfcpp::R_ARM_MOVW_PREL_NC:
8426 case elfcpp::R_ARM_MOVW_BREL_NC:
8427 case elfcpp::R_ARM_MOVW_BREL:
8428 reloc_status =
8429 Arm_relocate_functions::movw(view, object, psymval,
8430 relative_address_base, thumb_bit,
8431 check_overflow);
8432 break;
8433
8434 case elfcpp::R_ARM_MOVT_PREL:
8435 case elfcpp::R_ARM_MOVT_BREL:
8436 reloc_status =
8437 Arm_relocate_functions::movt(view, object, psymval,
8438 relative_address_base);
8439 break;
8440
8441 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
8442 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
8443 case elfcpp::R_ARM_THM_MOVW_BREL:
8444 reloc_status =
8445 Arm_relocate_functions::thm_movw(view, object, psymval,
8446 relative_address_base,
8447 thumb_bit, check_overflow);
8448 break;
8449
8450 case elfcpp::R_ARM_THM_MOVT_PREL:
8451 case elfcpp::R_ARM_THM_MOVT_BREL:
8452 reloc_status =
8453 Arm_relocate_functions::thm_movt(view, object, psymval,
8454 relative_address_base);
8455 break;
8456
8457 case elfcpp::R_ARM_REL32:
8458 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
8459 address, thumb_bit);
8460 break;
8461
8462 case elfcpp::R_ARM_THM_ABS5:
8463 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8464 output_section))
8465 reloc_status = Arm_relocate_functions::thm_abs5(view, object, psymval);
8466 break;
8467
8468 // Thumb long branches.
8469 case elfcpp::R_ARM_THM_CALL:
8470 case elfcpp::R_ARM_THM_XPC22:
8471 case elfcpp::R_ARM_THM_JUMP24:
8472 reloc_status =
8473 Arm_relocate_functions::thumb_branch_common(
8474 r_type, relinfo, view, gsym, object, r_sym, psymval, address,
8475 thumb_bit, is_weakly_undefined_without_plt);
8476 break;
8477
8478 case elfcpp::R_ARM_GOTOFF32:
8479 {
8480 Arm_address got_origin;
8481 got_origin = target->got_plt_section()->address();
8482 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
8483 got_origin, thumb_bit);
8484 }
8485 break;
8486
8487 case elfcpp::R_ARM_BASE_PREL:
8488 gold_assert(gsym != NULL);
8489 reloc_status =
8490 Arm_relocate_functions::base_prel(view, sym_origin, address);
8491 break;
8492
8493 case elfcpp::R_ARM_BASE_ABS:
8494 {
8495 if (!should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
8496 output_section))
8497 break;
8498
8499 reloc_status = Arm_relocate_functions::base_abs(view, sym_origin);
8500 }
8501 break;
8502
8503 case elfcpp::R_ARM_GOT_BREL:
8504 gold_assert(have_got_offset);
8505 reloc_status = Arm_relocate_functions::got_brel(view, got_offset);
8506 break;
8507
8508 case elfcpp::R_ARM_GOT_PREL:
8509 gold_assert(have_got_offset);
8510 // Get the address origin for GOT PLT, which is allocated right
8511 // after the GOT section, to calculate an absolute address of
8512 // the symbol GOT entry (got_origin + got_offset).
8513 Arm_address got_origin;
8514 got_origin = target->got_plt_section()->address();
8515 reloc_status = Arm_relocate_functions::got_prel(view,
8516 got_origin + got_offset,
8517 address);
8518 break;
8519
8520 case elfcpp::R_ARM_PLT32:
8521 case elfcpp::R_ARM_CALL:
8522 case elfcpp::R_ARM_JUMP24:
8523 case elfcpp::R_ARM_XPC25:
8524 gold_assert(gsym == NULL
8525 || gsym->has_plt_offset()
8526 || gsym->final_value_is_known()
8527 || (gsym->is_defined()
8528 && !gsym->is_from_dynobj()
8529 && !gsym->is_preemptible()));
8530 reloc_status =
8531 Arm_relocate_functions::arm_branch_common(
8532 r_type, relinfo, view, gsym, object, r_sym, psymval, address,
8533 thumb_bit, is_weakly_undefined_without_plt);
8534 break;
8535
8536 case elfcpp::R_ARM_THM_JUMP19:
8537 reloc_status =
8538 Arm_relocate_functions::thm_jump19(view, object, psymval, address,
8539 thumb_bit);
8540 break;
8541
8542 case elfcpp::R_ARM_THM_JUMP6:
8543 reloc_status =
8544 Arm_relocate_functions::thm_jump6(view, object, psymval, address);
8545 break;
8546
8547 case elfcpp::R_ARM_THM_JUMP8:
8548 reloc_status =
8549 Arm_relocate_functions::thm_jump8(view, object, psymval, address);
8550 break;
8551
8552 case elfcpp::R_ARM_THM_JUMP11:
8553 reloc_status =
8554 Arm_relocate_functions::thm_jump11(view, object, psymval, address);
8555 break;
8556
8557 case elfcpp::R_ARM_PREL31:
8558 reloc_status = Arm_relocate_functions::prel31(view, object, psymval,
8559 address, thumb_bit);
8560 break;
8561
8562 case elfcpp::R_ARM_V4BX:
8563 if (target->fix_v4bx() > General_options::FIX_V4BX_NONE)
8564 {
8565 const bool is_v4bx_interworking =
8566 (target->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING);
8567 reloc_status =
8568 Arm_relocate_functions::v4bx(relinfo, view, object, address,
8569 is_v4bx_interworking);
8570 }
8571 break;
8572
8573 case elfcpp::R_ARM_THM_PC8:
8574 reloc_status =
8575 Arm_relocate_functions::thm_pc8(view, object, psymval, address);
8576 break;
8577
8578 case elfcpp::R_ARM_THM_PC12:
8579 reloc_status =
8580 Arm_relocate_functions::thm_pc12(view, object, psymval, address);
8581 break;
8582
8583 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
8584 reloc_status =
8585 Arm_relocate_functions::thm_alu11(view, object, psymval, address,
8586 thumb_bit);
8587 break;
8588
8589 case elfcpp::R_ARM_ALU_PC_G0_NC:
8590 case elfcpp::R_ARM_ALU_PC_G0:
8591 case elfcpp::R_ARM_ALU_PC_G1_NC:
8592 case elfcpp::R_ARM_ALU_PC_G1:
8593 case elfcpp::R_ARM_ALU_PC_G2:
8594 case elfcpp::R_ARM_ALU_SB_G0_NC:
8595 case elfcpp::R_ARM_ALU_SB_G0:
8596 case elfcpp::R_ARM_ALU_SB_G1_NC:
8597 case elfcpp::R_ARM_ALU_SB_G1:
8598 case elfcpp::R_ARM_ALU_SB_G2:
8599 reloc_status =
8600 Arm_relocate_functions::arm_grp_alu(view, object, psymval,
8601 reloc_property->group_index(),
8602 relative_address_base,
8603 thumb_bit, check_overflow);
8604 break;
8605
8606 case elfcpp::R_ARM_LDR_PC_G0:
8607 case elfcpp::R_ARM_LDR_PC_G1:
8608 case elfcpp::R_ARM_LDR_PC_G2:
8609 case elfcpp::R_ARM_LDR_SB_G0:
8610 case elfcpp::R_ARM_LDR_SB_G1:
8611 case elfcpp::R_ARM_LDR_SB_G2:
8612 reloc_status =
8613 Arm_relocate_functions::arm_grp_ldr(view, object, psymval,
8614 reloc_property->group_index(),
8615 relative_address_base);
8616 break;
8617
8618 case elfcpp::R_ARM_LDRS_PC_G0:
8619 case elfcpp::R_ARM_LDRS_PC_G1:
8620 case elfcpp::R_ARM_LDRS_PC_G2:
8621 case elfcpp::R_ARM_LDRS_SB_G0:
8622 case elfcpp::R_ARM_LDRS_SB_G1:
8623 case elfcpp::R_ARM_LDRS_SB_G2:
8624 reloc_status =
8625 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval,
8626 reloc_property->group_index(),
8627 relative_address_base);
8628 break;
8629
8630 case elfcpp::R_ARM_LDC_PC_G0:
8631 case elfcpp::R_ARM_LDC_PC_G1:
8632 case elfcpp::R_ARM_LDC_PC_G2:
8633 case elfcpp::R_ARM_LDC_SB_G0:
8634 case elfcpp::R_ARM_LDC_SB_G1:
8635 case elfcpp::R_ARM_LDC_SB_G2:
8636 reloc_status =
8637 Arm_relocate_functions::arm_grp_ldc(view, object, psymval,
8638 reloc_property->group_index(),
8639 relative_address_base);
8640 break;
8641
8642 // These are initial tls relocs, which are expected when
8643 // linking.
8644 case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
8645 case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
8646 case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
8647 case elfcpp::R_ARM_TLS_IE32: // Initial-exec
8648 case elfcpp::R_ARM_TLS_LE32: // Local-exec
8649 reloc_status =
8650 this->relocate_tls(relinfo, target, relnum, rel, r_type, gsym, psymval,
8651 view, address, view_size);
8652 break;
8653
8654 default:
8655 gold_unreachable();
8656 }
8657
8658 // Report any errors.
8659 switch (reloc_status)
8660 {
8661 case Arm_relocate_functions::STATUS_OKAY:
8662 break;
8663 case Arm_relocate_functions::STATUS_OVERFLOW:
8664 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
8665 _("relocation overflow in %s"),
8666 reloc_property->name().c_str());
8667 break;
8668 case Arm_relocate_functions::STATUS_BAD_RELOC:
8669 gold_error_at_location(
8670 relinfo,
8671 relnum,
8672 rel.get_r_offset(),
8673 _("unexpected opcode while processing relocation %s"),
8674 reloc_property->name().c_str());
8675 break;
8676 default:
8677 gold_unreachable();
8678 }
8679
8680 return true;
8681 }
8682
8683 // Perform a TLS relocation.
8684
8685 template<bool big_endian>
8686 inline typename Arm_relocate_functions<big_endian>::Status
8687 Target_arm<big_endian>::Relocate::relocate_tls(
8688 const Relocate_info<32, big_endian>* relinfo,
8689 Target_arm<big_endian>* target,
8690 size_t relnum,
8691 const elfcpp::Rel<32, big_endian>& rel,
8692 unsigned int r_type,
8693 const Sized_symbol<32>* gsym,
8694 const Symbol_value<32>* psymval,
8695 unsigned char* view,
8696 elfcpp::Elf_types<32>::Elf_Addr address,
8697 section_size_type /*view_size*/ )
8698 {
8699 typedef Arm_relocate_functions<big_endian> ArmRelocFuncs;
8700 typedef Relocate_functions<32, big_endian> RelocFuncs;
8701 Output_segment* tls_segment = relinfo->layout->tls_segment();
8702
8703 const Sized_relobj<32, big_endian>* object = relinfo->object;
8704
8705 elfcpp::Elf_types<32>::Elf_Addr value = psymval->value(object, 0);
8706
8707 const bool is_final = (gsym == NULL
8708 ? !parameters->options().shared()
8709 : gsym->final_value_is_known());
8710 const tls::Tls_optimization optimized_type
8711 = Target_arm<big_endian>::optimize_tls_reloc(is_final, r_type);
8712 switch (r_type)
8713 {
8714 case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
8715 {
8716 unsigned int got_type = GOT_TYPE_TLS_PAIR;
8717 unsigned int got_offset;
8718 if (gsym != NULL)
8719 {
8720 gold_assert(gsym->has_got_offset(got_type));
8721 got_offset = gsym->got_offset(got_type) - target->got_size();
8722 }
8723 else
8724 {
8725 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
8726 gold_assert(object->local_has_got_offset(r_sym, got_type));
8727 got_offset = (object->local_got_offset(r_sym, got_type)
8728 - target->got_size());
8729 }
8730 if (optimized_type == tls::TLSOPT_NONE)
8731 {
8732 Arm_address got_entry =
8733 target->got_plt_section()->address() + got_offset;
8734
8735 // Relocate the field with the PC relative offset of the pair of
8736 // GOT entries.
8737 RelocFuncs::pcrel32(view, got_entry, address);
8738 return ArmRelocFuncs::STATUS_OKAY;
8739 }
8740 }
8741 break;
8742
8743 case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
8744 if (optimized_type == tls::TLSOPT_NONE)
8745 {
8746 // Relocate the field with the offset of the GOT entry for
8747 // the module index.
8748 unsigned int got_offset;
8749 got_offset = (target->got_mod_index_entry(NULL, NULL, NULL)
8750 - target->got_size());
8751 Arm_address got_entry =
8752 target->got_plt_section()->address() + got_offset;
8753
8754 // Relocate the field with the PC relative offset of the pair of
8755 // GOT entries.
8756 RelocFuncs::pcrel32(view, got_entry, address);
8757 return ArmRelocFuncs::STATUS_OKAY;
8758 }
8759 break;
8760
8761 case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
8762 RelocFuncs::rel32(view, value);
8763 return ArmRelocFuncs::STATUS_OKAY;
8764
8765 case elfcpp::R_ARM_TLS_IE32: // Initial-exec
8766 if (optimized_type == tls::TLSOPT_NONE)
8767 {
8768 // Relocate the field with the offset of the GOT entry for
8769 // the tp-relative offset of the symbol.
8770 unsigned int got_type = GOT_TYPE_TLS_OFFSET;
8771 unsigned int got_offset;
8772 if (gsym != NULL)
8773 {
8774 gold_assert(gsym->has_got_offset(got_type));
8775 got_offset = gsym->got_offset(got_type);
8776 }
8777 else
8778 {
8779 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
8780 gold_assert(object->local_has_got_offset(r_sym, got_type));
8781 got_offset = object->local_got_offset(r_sym, got_type);
8782 }
8783
8784 // All GOT offsets are relative to the end of the GOT.
8785 got_offset -= target->got_size();
8786
8787 Arm_address got_entry =
8788 target->got_plt_section()->address() + got_offset;
8789
8790 // Relocate the field with the PC relative offset of the GOT entry.
8791 RelocFuncs::pcrel32(view, got_entry, address);
8792 return ArmRelocFuncs::STATUS_OKAY;
8793 }
8794 break;
8795
8796 case elfcpp::R_ARM_TLS_LE32: // Local-exec
8797 // If we're creating a shared library, a dynamic relocation will
8798 // have been created for this location, so do not apply it now.
8799 if (!parameters->options().shared())
8800 {
8801 gold_assert(tls_segment != NULL);
8802
8803 // $tp points to the TCB, which is followed by the TLS, so we
8804 // need to add TCB size to the offset.
8805 Arm_address aligned_tcb_size =
8806 align_address(ARM_TCB_SIZE, tls_segment->maximum_alignment());
8807 RelocFuncs::rel32(view, value + aligned_tcb_size);
8808
8809 }
8810 return ArmRelocFuncs::STATUS_OKAY;
8811
8812 default:
8813 gold_unreachable();
8814 }
8815
8816 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
8817 _("unsupported reloc %u"),
8818 r_type);
8819 return ArmRelocFuncs::STATUS_BAD_RELOC;
8820 }
8821
8822 // Relocate section data.
8823
8824 template<bool big_endian>
8825 void
8826 Target_arm<big_endian>::relocate_section(
8827 const Relocate_info<32, big_endian>* relinfo,
8828 unsigned int sh_type,
8829 const unsigned char* prelocs,
8830 size_t reloc_count,
8831 Output_section* output_section,
8832 bool needs_special_offset_handling,
8833 unsigned char* view,
8834 Arm_address address,
8835 section_size_type view_size,
8836 const Reloc_symbol_changes* reloc_symbol_changes)
8837 {
8838 typedef typename Target_arm<big_endian>::Relocate Arm_relocate;
8839 gold_assert(sh_type == elfcpp::SHT_REL);
8840
8841 // See if we are relocating a relaxed input section. If so, the view
8842 // covers the whole output section and we need to adjust accordingly.
8843 if (needs_special_offset_handling)
8844 {
8845 const Output_relaxed_input_section* poris =
8846 output_section->find_relaxed_input_section(relinfo->object,
8847 relinfo->data_shndx);
8848 if (poris != NULL)
8849 {
8850 Arm_address section_address = poris->address();
8851 section_size_type section_size = poris->data_size();
8852
8853 gold_assert((section_address >= address)
8854 && ((section_address + section_size)
8855 <= (address + view_size)));
8856
8857 off_t offset = section_address - address;
8858 view += offset;
8859 address += offset;
8860 view_size = section_size;
8861 }
8862 }
8863
8864 gold::relocate_section<32, big_endian, Target_arm, elfcpp::SHT_REL,
8865 Arm_relocate>(
8866 relinfo,
8867 this,
8868 prelocs,
8869 reloc_count,
8870 output_section,
8871 needs_special_offset_handling,
8872 view,
8873 address,
8874 view_size,
8875 reloc_symbol_changes);
8876 }
8877
8878 // Return the size of a relocation while scanning during a relocatable
8879 // link.
8880
8881 template<bool big_endian>
8882 unsigned int
8883 Target_arm<big_endian>::Relocatable_size_for_reloc::get_size_for_reloc(
8884 unsigned int r_type,
8885 Relobj* object)
8886 {
8887 r_type = get_real_reloc_type(r_type);
8888 const Arm_reloc_property* arp =
8889 arm_reloc_property_table->get_implemented_static_reloc_property(r_type);
8890 if (arp != NULL)
8891 return arp->size();
8892 else
8893 {
8894 std::string reloc_name =
8895 arm_reloc_property_table->reloc_name_in_error_message(r_type);
8896 gold_error(_("%s: unexpected %s in object file"),
8897 object->name().c_str(), reloc_name.c_str());
8898 return 0;
8899 }
8900 }
8901
8902 // Scan the relocs during a relocatable link.
8903
8904 template<bool big_endian>
8905 void
8906 Target_arm<big_endian>::scan_relocatable_relocs(
8907 Symbol_table* symtab,
8908 Layout* layout,
8909 Sized_relobj<32, big_endian>* object,
8910 unsigned int data_shndx,
8911 unsigned int sh_type,
8912 const unsigned char* prelocs,
8913 size_t reloc_count,
8914 Output_section* output_section,
8915 bool needs_special_offset_handling,
8916 size_t local_symbol_count,
8917 const unsigned char* plocal_symbols,
8918 Relocatable_relocs* rr)
8919 {
8920 gold_assert(sh_type == elfcpp::SHT_REL);
8921
8922 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_REL,
8923 Relocatable_size_for_reloc> Scan_relocatable_relocs;
8924
8925 gold::scan_relocatable_relocs<32, big_endian, elfcpp::SHT_REL,
8926 Scan_relocatable_relocs>(
8927 symtab,
8928 layout,
8929 object,
8930 data_shndx,
8931 prelocs,
8932 reloc_count,
8933 output_section,
8934 needs_special_offset_handling,
8935 local_symbol_count,
8936 plocal_symbols,
8937 rr);
8938 }
8939
8940 // Relocate a section during a relocatable link.
8941
8942 template<bool big_endian>
8943 void
8944 Target_arm<big_endian>::relocate_for_relocatable(
8945 const Relocate_info<32, big_endian>* relinfo,
8946 unsigned int sh_type,
8947 const unsigned char* prelocs,
8948 size_t reloc_count,
8949 Output_section* output_section,
8950 off_t offset_in_output_section,
8951 const Relocatable_relocs* rr,
8952 unsigned char* view,
8953 Arm_address view_address,
8954 section_size_type view_size,
8955 unsigned char* reloc_view,
8956 section_size_type reloc_view_size)
8957 {
8958 gold_assert(sh_type == elfcpp::SHT_REL);
8959
8960 gold::relocate_for_relocatable<32, big_endian, elfcpp::SHT_REL>(
8961 relinfo,
8962 prelocs,
8963 reloc_count,
8964 output_section,
8965 offset_in_output_section,
8966 rr,
8967 view,
8968 view_address,
8969 view_size,
8970 reloc_view,
8971 reloc_view_size);
8972 }
8973
8974 // Return the value to use for a dynamic symbol which requires special
8975 // treatment. This is how we support equality comparisons of function
8976 // pointers across shared library boundaries, as described in the
8977 // processor specific ABI supplement.
8978
8979 template<bool big_endian>
8980 uint64_t
8981 Target_arm<big_endian>::do_dynsym_value(const Symbol* gsym) const
8982 {
8983 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
8984 return this->plt_section()->address() + gsym->plt_offset();
8985 }
8986
8987 // Map platform-specific relocs to real relocs
8988 //
8989 template<bool big_endian>
8990 unsigned int
8991 Target_arm<big_endian>::get_real_reloc_type (unsigned int r_type)
8992 {
8993 switch (r_type)
8994 {
8995 case elfcpp::R_ARM_TARGET1:
8996 // This is either R_ARM_ABS32 or R_ARM_REL32;
8997 return elfcpp::R_ARM_ABS32;
8998
8999 case elfcpp::R_ARM_TARGET2:
9000 // This can be any reloc type but ususally is R_ARM_GOT_PREL
9001 return elfcpp::R_ARM_GOT_PREL;
9002
9003 default:
9004 return r_type;
9005 }
9006 }
9007
9008 // Whether if two EABI versions V1 and V2 are compatible.
9009
9010 template<bool big_endian>
9011 bool
9012 Target_arm<big_endian>::are_eabi_versions_compatible(
9013 elfcpp::Elf_Word v1,
9014 elfcpp::Elf_Word v2)
9015 {
9016 // v4 and v5 are the same spec before and after it was released,
9017 // so allow mixing them.
9018 if ((v1 == elfcpp::EF_ARM_EABI_VER4 && v2 == elfcpp::EF_ARM_EABI_VER5)
9019 || (v1 == elfcpp::EF_ARM_EABI_VER5 && v2 == elfcpp::EF_ARM_EABI_VER4))
9020 return true;
9021
9022 return v1 == v2;
9023 }
9024
9025 // Combine FLAGS from an input object called NAME and the processor-specific
9026 // flags in the ELF header of the output. Much of this is adapted from the
9027 // processor-specific flags merging code in elf32_arm_merge_private_bfd_data
9028 // in bfd/elf32-arm.c.
9029
9030 template<bool big_endian>
9031 void
9032 Target_arm<big_endian>::merge_processor_specific_flags(
9033 const std::string& name,
9034 elfcpp::Elf_Word flags)
9035 {
9036 if (this->are_processor_specific_flags_set())
9037 {
9038 elfcpp::Elf_Word out_flags = this->processor_specific_flags();
9039
9040 // Nothing to merge if flags equal to those in output.
9041 if (flags == out_flags)
9042 return;
9043
9044 // Complain about various flag mismatches.
9045 elfcpp::Elf_Word version1 = elfcpp::arm_eabi_version(flags);
9046 elfcpp::Elf_Word version2 = elfcpp::arm_eabi_version(out_flags);
9047 if (!this->are_eabi_versions_compatible(version1, version2)
9048 && parameters->options().warn_mismatch())
9049 gold_error(_("Source object %s has EABI version %d but output has "
9050 "EABI version %d."),
9051 name.c_str(),
9052 (flags & elfcpp::EF_ARM_EABIMASK) >> 24,
9053 (out_flags & elfcpp::EF_ARM_EABIMASK) >> 24);
9054 }
9055 else
9056 {
9057 // If the input is the default architecture and had the default
9058 // flags then do not bother setting the flags for the output
9059 // architecture, instead allow future merges to do this. If no
9060 // future merges ever set these flags then they will retain their
9061 // uninitialised values, which surprise surprise, correspond
9062 // to the default values.
9063 if (flags == 0)
9064 return;
9065
9066 // This is the first time, just copy the flags.
9067 // We only copy the EABI version for now.
9068 this->set_processor_specific_flags(flags & elfcpp::EF_ARM_EABIMASK);
9069 }
9070 }
9071
9072 // Adjust ELF file header.
9073 template<bool big_endian>
9074 void
9075 Target_arm<big_endian>::do_adjust_elf_header(
9076 unsigned char* view,
9077 int len) const
9078 {
9079 gold_assert(len == elfcpp::Elf_sizes<32>::ehdr_size);
9080
9081 elfcpp::Ehdr<32, big_endian> ehdr(view);
9082 unsigned char e_ident[elfcpp::EI_NIDENT];
9083 memcpy(e_ident, ehdr.get_e_ident(), elfcpp::EI_NIDENT);
9084
9085 if (elfcpp::arm_eabi_version(this->processor_specific_flags())
9086 == elfcpp::EF_ARM_EABI_UNKNOWN)
9087 e_ident[elfcpp::EI_OSABI] = elfcpp::ELFOSABI_ARM;
9088 else
9089 e_ident[elfcpp::EI_OSABI] = 0;
9090 e_ident[elfcpp::EI_ABIVERSION] = 0;
9091
9092 // FIXME: Do EF_ARM_BE8 adjustment.
9093
9094 elfcpp::Ehdr_write<32, big_endian> oehdr(view);
9095 oehdr.put_e_ident(e_ident);
9096 }
9097
9098 // do_make_elf_object to override the same function in the base class.
9099 // We need to use a target-specific sub-class of Sized_relobj<32, big_endian>
9100 // to store ARM specific information. Hence we need to have our own
9101 // ELF object creation.
9102
9103 template<bool big_endian>
9104 Object*
9105 Target_arm<big_endian>::do_make_elf_object(
9106 const std::string& name,
9107 Input_file* input_file,
9108 off_t offset, const elfcpp::Ehdr<32, big_endian>& ehdr)
9109 {
9110 int et = ehdr.get_e_type();
9111 if (et == elfcpp::ET_REL)
9112 {
9113 Arm_relobj<big_endian>* obj =
9114 new Arm_relobj<big_endian>(name, input_file, offset, ehdr);
9115 obj->setup();
9116 return obj;
9117 }
9118 else if (et == elfcpp::ET_DYN)
9119 {
9120 Sized_dynobj<32, big_endian>* obj =
9121 new Arm_dynobj<big_endian>(name, input_file, offset, ehdr);
9122 obj->setup();
9123 return obj;
9124 }
9125 else
9126 {
9127 gold_error(_("%s: unsupported ELF file type %d"),
9128 name.c_str(), et);
9129 return NULL;
9130 }
9131 }
9132
9133 // Read the architecture from the Tag_also_compatible_with attribute, if any.
9134 // Returns -1 if no architecture could be read.
9135 // This is adapted from get_secondary_compatible_arch() in bfd/elf32-arm.c.
9136
9137 template<bool big_endian>
9138 int
9139 Target_arm<big_endian>::get_secondary_compatible_arch(
9140 const Attributes_section_data* pasd)
9141 {
9142 const Object_attribute *known_attributes =
9143 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
9144
9145 // Note: the tag and its argument below are uleb128 values, though
9146 // currently-defined values fit in one byte for each.
9147 const std::string& sv =
9148 known_attributes[elfcpp::Tag_also_compatible_with].string_value();
9149 if (sv.size() == 2
9150 && sv.data()[0] == elfcpp::Tag_CPU_arch
9151 && (sv.data()[1] & 128) != 128)
9152 return sv.data()[1];
9153
9154 // This tag is "safely ignorable", so don't complain if it looks funny.
9155 return -1;
9156 }
9157
9158 // Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9159 // The tag is removed if ARCH is -1.
9160 // This is adapted from set_secondary_compatible_arch() in bfd/elf32-arm.c.
9161
9162 template<bool big_endian>
9163 void
9164 Target_arm<big_endian>::set_secondary_compatible_arch(
9165 Attributes_section_data* pasd,
9166 int arch)
9167 {
9168 Object_attribute *known_attributes =
9169 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
9170
9171 if (arch == -1)
9172 {
9173 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value("");
9174 return;
9175 }
9176
9177 // Note: the tag and its argument below are uleb128 values, though
9178 // currently-defined values fit in one byte for each.
9179 char sv[3];
9180 sv[0] = elfcpp::Tag_CPU_arch;
9181 gold_assert(arch != 0);
9182 sv[1] = arch;
9183 sv[2] = '\0';
9184
9185 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value(sv);
9186 }
9187
9188 // Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9189 // into account.
9190 // This is adapted from tag_cpu_arch_combine() in bfd/elf32-arm.c.
9191
9192 template<bool big_endian>
9193 int
9194 Target_arm<big_endian>::tag_cpu_arch_combine(
9195 const char* name,
9196 int oldtag,
9197 int* secondary_compat_out,
9198 int newtag,
9199 int secondary_compat)
9200 {
9201 #define T(X) elfcpp::TAG_CPU_ARCH_##X
9202 static const int v6t2[] =
9203 {
9204 T(V6T2), // PRE_V4.
9205 T(V6T2), // V4.
9206 T(V6T2), // V4T.
9207 T(V6T2), // V5T.
9208 T(V6T2), // V5TE.
9209 T(V6T2), // V5TEJ.
9210 T(V6T2), // V6.
9211 T(V7), // V6KZ.
9212 T(V6T2) // V6T2.
9213 };
9214 static const int v6k[] =
9215 {
9216 T(V6K), // PRE_V4.
9217 T(V6K), // V4.
9218 T(V6K), // V4T.
9219 T(V6K), // V5T.
9220 T(V6K), // V5TE.
9221 T(V6K), // V5TEJ.
9222 T(V6K), // V6.
9223 T(V6KZ), // V6KZ.
9224 T(V7), // V6T2.
9225 T(V6K) // V6K.
9226 };
9227 static const int v7[] =
9228 {
9229 T(V7), // PRE_V4.
9230 T(V7), // V4.
9231 T(V7), // V4T.
9232 T(V7), // V5T.
9233 T(V7), // V5TE.
9234 T(V7), // V5TEJ.
9235 T(V7), // V6.
9236 T(V7), // V6KZ.
9237 T(V7), // V6T2.
9238 T(V7), // V6K.
9239 T(V7) // V7.
9240 };
9241 static const int v6_m[] =
9242 {
9243 -1, // PRE_V4.
9244 -1, // V4.
9245 T(V6K), // V4T.
9246 T(V6K), // V5T.
9247 T(V6K), // V5TE.
9248 T(V6K), // V5TEJ.
9249 T(V6K), // V6.
9250 T(V6KZ), // V6KZ.
9251 T(V7), // V6T2.
9252 T(V6K), // V6K.
9253 T(V7), // V7.
9254 T(V6_M) // V6_M.
9255 };
9256 static const int v6s_m[] =
9257 {
9258 -1, // PRE_V4.
9259 -1, // V4.
9260 T(V6K), // V4T.
9261 T(V6K), // V5T.
9262 T(V6K), // V5TE.
9263 T(V6K), // V5TEJ.
9264 T(V6K), // V6.
9265 T(V6KZ), // V6KZ.
9266 T(V7), // V6T2.
9267 T(V6K), // V6K.
9268 T(V7), // V7.
9269 T(V6S_M), // V6_M.
9270 T(V6S_M) // V6S_M.
9271 };
9272 static const int v7e_m[] =
9273 {
9274 -1, // PRE_V4.
9275 -1, // V4.
9276 T(V7E_M), // V4T.
9277 T(V7E_M), // V5T.
9278 T(V7E_M), // V5TE.
9279 T(V7E_M), // V5TEJ.
9280 T(V7E_M), // V6.
9281 T(V7E_M), // V6KZ.
9282 T(V7E_M), // V6T2.
9283 T(V7E_M), // V6K.
9284 T(V7E_M), // V7.
9285 T(V7E_M), // V6_M.
9286 T(V7E_M), // V6S_M.
9287 T(V7E_M) // V7E_M.
9288 };
9289 static const int v4t_plus_v6_m[] =
9290 {
9291 -1, // PRE_V4.
9292 -1, // V4.
9293 T(V4T), // V4T.
9294 T(V5T), // V5T.
9295 T(V5TE), // V5TE.
9296 T(V5TEJ), // V5TEJ.
9297 T(V6), // V6.
9298 T(V6KZ), // V6KZ.
9299 T(V6T2), // V6T2.
9300 T(V6K), // V6K.
9301 T(V7), // V7.
9302 T(V6_M), // V6_M.
9303 T(V6S_M), // V6S_M.
9304 T(V7E_M), // V7E_M.
9305 T(V4T_PLUS_V6_M) // V4T plus V6_M.
9306 };
9307 static const int *comb[] =
9308 {
9309 v6t2,
9310 v6k,
9311 v7,
9312 v6_m,
9313 v6s_m,
9314 v7e_m,
9315 // Pseudo-architecture.
9316 v4t_plus_v6_m
9317 };
9318
9319 // Check we've not got a higher architecture than we know about.
9320
9321 if (oldtag >= elfcpp::MAX_TAG_CPU_ARCH || newtag >= elfcpp::MAX_TAG_CPU_ARCH)
9322 {
9323 gold_error(_("%s: unknown CPU architecture"), name);
9324 return -1;
9325 }
9326
9327 // Override old tag if we have a Tag_also_compatible_with on the output.
9328
9329 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9330 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9331 oldtag = T(V4T_PLUS_V6_M);
9332
9333 // And override the new tag if we have a Tag_also_compatible_with on the
9334 // input.
9335
9336 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9337 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9338 newtag = T(V4T_PLUS_V6_M);
9339
9340 // Architectures before V6KZ add features monotonically.
9341 int tagh = std::max(oldtag, newtag);
9342 if (tagh <= elfcpp::TAG_CPU_ARCH_V6KZ)
9343 return tagh;
9344
9345 int tagl = std::min(oldtag, newtag);
9346 int result = comb[tagh - T(V6T2)][tagl];
9347
9348 // Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9349 // as the canonical version.
9350 if (result == T(V4T_PLUS_V6_M))
9351 {
9352 result = T(V4T);
9353 *secondary_compat_out = T(V6_M);
9354 }
9355 else
9356 *secondary_compat_out = -1;
9357
9358 if (result == -1)
9359 {
9360 gold_error(_("%s: conflicting CPU architectures %d/%d"),
9361 name, oldtag, newtag);
9362 return -1;
9363 }
9364
9365 return result;
9366 #undef T
9367 }
9368
9369 // Helper to print AEABI enum tag value.
9370
9371 template<bool big_endian>
9372 std::string
9373 Target_arm<big_endian>::aeabi_enum_name(unsigned int value)
9374 {
9375 static const char *aeabi_enum_names[] =
9376 { "", "variable-size", "32-bit", "" };
9377 const size_t aeabi_enum_names_size =
9378 sizeof(aeabi_enum_names) / sizeof(aeabi_enum_names[0]);
9379
9380 if (value < aeabi_enum_names_size)
9381 return std::string(aeabi_enum_names[value]);
9382 else
9383 {
9384 char buffer[100];
9385 sprintf(buffer, "<unknown value %u>", value);
9386 return std::string(buffer);
9387 }
9388 }
9389
9390 // Return the string value to store in TAG_CPU_name.
9391
9392 template<bool big_endian>
9393 std::string
9394 Target_arm<big_endian>::tag_cpu_name_value(unsigned int value)
9395 {
9396 static const char *name_table[] = {
9397 // These aren't real CPU names, but we can't guess
9398 // that from the architecture version alone.
9399 "Pre v4",
9400 "ARM v4",
9401 "ARM v4T",
9402 "ARM v5T",
9403 "ARM v5TE",
9404 "ARM v5TEJ",
9405 "ARM v6",
9406 "ARM v6KZ",
9407 "ARM v6T2",
9408 "ARM v6K",
9409 "ARM v7",
9410 "ARM v6-M",
9411 "ARM v6S-M",
9412 "ARM v7E-M"
9413 };
9414 const size_t name_table_size = sizeof(name_table) / sizeof(name_table[0]);
9415
9416 if (value < name_table_size)
9417 return std::string(name_table[value]);
9418 else
9419 {
9420 char buffer[100];
9421 sprintf(buffer, "<unknown CPU value %u>", value);
9422 return std::string(buffer);
9423 }
9424 }
9425
9426 // Merge object attributes from input file called NAME with those of the
9427 // output. The input object attributes are in the object pointed by PASD.
9428
9429 template<bool big_endian>
9430 void
9431 Target_arm<big_endian>::merge_object_attributes(
9432 const char* name,
9433 const Attributes_section_data* pasd)
9434 {
9435 // Return if there is no attributes section data.
9436 if (pasd == NULL)
9437 return;
9438
9439 // If output has no object attributes, just copy.
9440 const int vendor = Object_attribute::OBJ_ATTR_PROC;
9441 if (this->attributes_section_data_ == NULL)
9442 {
9443 this->attributes_section_data_ = new Attributes_section_data(*pasd);
9444 Object_attribute* out_attr =
9445 this->attributes_section_data_->known_attributes(vendor);
9446
9447 // We do not output objects with Tag_MPextension_use_legacy - we move
9448 // the attribute's value to Tag_MPextension_use. */
9449 if (out_attr[elfcpp::Tag_MPextension_use_legacy].int_value() != 0)
9450 {
9451 if (out_attr[elfcpp::Tag_MPextension_use].int_value() != 0
9452 && out_attr[elfcpp::Tag_MPextension_use_legacy].int_value()
9453 != out_attr[elfcpp::Tag_MPextension_use].int_value())
9454 {
9455 gold_error(_("%s has both the current and legacy "
9456 "Tag_MPextension_use attributes"),
9457 name);
9458 }
9459
9460 out_attr[elfcpp::Tag_MPextension_use] =
9461 out_attr[elfcpp::Tag_MPextension_use_legacy];
9462 out_attr[elfcpp::Tag_MPextension_use_legacy].set_type(0);
9463 out_attr[elfcpp::Tag_MPextension_use_legacy].set_int_value(0);
9464 }
9465
9466 return;
9467 }
9468
9469 const Object_attribute* in_attr = pasd->known_attributes(vendor);
9470 Object_attribute* out_attr =
9471 this->attributes_section_data_->known_attributes(vendor);
9472
9473 // This needs to happen before Tag_ABI_FP_number_model is merged. */
9474 if (in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
9475 != out_attr[elfcpp::Tag_ABI_VFP_args].int_value())
9476 {
9477 // Ignore mismatches if the object doesn't use floating point. */
9478 if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value() == 0)
9479 out_attr[elfcpp::Tag_ABI_VFP_args].set_int_value(
9480 in_attr[elfcpp::Tag_ABI_VFP_args].int_value());
9481 else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value() != 0
9482 && parameters->options().warn_mismatch())
9483 gold_error(_("%s uses VFP register arguments, output does not"),
9484 name);
9485 }
9486
9487 for (int i = 4; i < Vendor_object_attributes::NUM_KNOWN_ATTRIBUTES; ++i)
9488 {
9489 // Merge this attribute with existing attributes.
9490 switch (i)
9491 {
9492 case elfcpp::Tag_CPU_raw_name:
9493 case elfcpp::Tag_CPU_name:
9494 // These are merged after Tag_CPU_arch.
9495 break;
9496
9497 case elfcpp::Tag_ABI_optimization_goals:
9498 case elfcpp::Tag_ABI_FP_optimization_goals:
9499 // Use the first value seen.
9500 break;
9501
9502 case elfcpp::Tag_CPU_arch:
9503 {
9504 unsigned int saved_out_attr = out_attr->int_value();
9505 // Merge Tag_CPU_arch and Tag_also_compatible_with.
9506 int secondary_compat =
9507 this->get_secondary_compatible_arch(pasd);
9508 int secondary_compat_out =
9509 this->get_secondary_compatible_arch(
9510 this->attributes_section_data_);
9511 out_attr[i].set_int_value(
9512 tag_cpu_arch_combine(name, out_attr[i].int_value(),
9513 &secondary_compat_out,
9514 in_attr[i].int_value(),
9515 secondary_compat));
9516 this->set_secondary_compatible_arch(this->attributes_section_data_,
9517 secondary_compat_out);
9518
9519 // Merge Tag_CPU_name and Tag_CPU_raw_name.
9520 if (out_attr[i].int_value() == saved_out_attr)
9521 ; // Leave the names alone.
9522 else if (out_attr[i].int_value() == in_attr[i].int_value())
9523 {
9524 // The output architecture has been changed to match the
9525 // input architecture. Use the input names.
9526 out_attr[elfcpp::Tag_CPU_name].set_string_value(
9527 in_attr[elfcpp::Tag_CPU_name].string_value());
9528 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value(
9529 in_attr[elfcpp::Tag_CPU_raw_name].string_value());
9530 }
9531 else
9532 {
9533 out_attr[elfcpp::Tag_CPU_name].set_string_value("");
9534 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value("");
9535 }
9536
9537 // If we still don't have a value for Tag_CPU_name,
9538 // make one up now. Tag_CPU_raw_name remains blank.
9539 if (out_attr[elfcpp::Tag_CPU_name].string_value() == "")
9540 {
9541 const std::string cpu_name =
9542 this->tag_cpu_name_value(out_attr[i].int_value());
9543 // FIXME: If we see an unknown CPU, this will be set
9544 // to "<unknown CPU n>", where n is the attribute value.
9545 // This is different from BFD, which leaves the name alone.
9546 out_attr[elfcpp::Tag_CPU_name].set_string_value(cpu_name);
9547 }
9548 }
9549 break;
9550
9551 case elfcpp::Tag_ARM_ISA_use:
9552 case elfcpp::Tag_THUMB_ISA_use:
9553 case elfcpp::Tag_WMMX_arch:
9554 case elfcpp::Tag_Advanced_SIMD_arch:
9555 // ??? Do Advanced_SIMD (NEON) and WMMX conflict?
9556 case elfcpp::Tag_ABI_FP_rounding:
9557 case elfcpp::Tag_ABI_FP_exceptions:
9558 case elfcpp::Tag_ABI_FP_user_exceptions:
9559 case elfcpp::Tag_ABI_FP_number_model:
9560 case elfcpp::Tag_VFP_HP_extension:
9561 case elfcpp::Tag_CPU_unaligned_access:
9562 case elfcpp::Tag_T2EE_use:
9563 case elfcpp::Tag_Virtualization_use:
9564 case elfcpp::Tag_MPextension_use:
9565 // Use the largest value specified.
9566 if (in_attr[i].int_value() > out_attr[i].int_value())
9567 out_attr[i].set_int_value(in_attr[i].int_value());
9568 break;
9569
9570 case elfcpp::Tag_ABI_align8_preserved:
9571 case elfcpp::Tag_ABI_PCS_RO_data:
9572 // Use the smallest value specified.
9573 if (in_attr[i].int_value() < out_attr[i].int_value())
9574 out_attr[i].set_int_value(in_attr[i].int_value());
9575 break;
9576
9577 case elfcpp::Tag_ABI_align8_needed:
9578 if ((in_attr[i].int_value() > 0 || out_attr[i].int_value() > 0)
9579 && (in_attr[elfcpp::Tag_ABI_align8_preserved].int_value() == 0
9580 || (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
9581 == 0)))
9582 {
9583 // This error message should be enabled once all non-conformant
9584 // binaries in the toolchain have had the attributes set
9585 // properly.
9586 // gold_error(_("output 8-byte data alignment conflicts with %s"),
9587 // name);
9588 }
9589 // Fall through.
9590 case elfcpp::Tag_ABI_FP_denormal:
9591 case elfcpp::Tag_ABI_PCS_GOT_use:
9592 {
9593 // These tags have 0 = don't care, 1 = strong requirement,
9594 // 2 = weak requirement.
9595 static const int order_021[3] = {0, 2, 1};
9596
9597 // Use the "greatest" from the sequence 0, 2, 1, or the largest
9598 // value if greater than 2 (for future-proofing).
9599 if ((in_attr[i].int_value() > 2
9600 && in_attr[i].int_value() > out_attr[i].int_value())
9601 || (in_attr[i].int_value() <= 2
9602 && out_attr[i].int_value() <= 2
9603 && (order_021[in_attr[i].int_value()]
9604 > order_021[out_attr[i].int_value()])))
9605 out_attr[i].set_int_value(in_attr[i].int_value());
9606 }
9607 break;
9608
9609 case elfcpp::Tag_CPU_arch_profile:
9610 if (out_attr[i].int_value() != in_attr[i].int_value())
9611 {
9612 // 0 will merge with anything.
9613 // 'A' and 'S' merge to 'A'.
9614 // 'R' and 'S' merge to 'R'.
9615 // 'M' and 'A|R|S' is an error.
9616 if (out_attr[i].int_value() == 0
9617 || (out_attr[i].int_value() == 'S'
9618 && (in_attr[i].int_value() == 'A'
9619 || in_attr[i].int_value() == 'R')))
9620 out_attr[i].set_int_value(in_attr[i].int_value());
9621 else if (in_attr[i].int_value() == 0
9622 || (in_attr[i].int_value() == 'S'
9623 && (out_attr[i].int_value() == 'A'
9624 || out_attr[i].int_value() == 'R')))
9625 ; // Do nothing.
9626 else if (parameters->options().warn_mismatch())
9627 {
9628 gold_error
9629 (_("conflicting architecture profiles %c/%c"),
9630 in_attr[i].int_value() ? in_attr[i].int_value() : '0',
9631 out_attr[i].int_value() ? out_attr[i].int_value() : '0');
9632 }
9633 }
9634 break;
9635 case elfcpp::Tag_VFP_arch:
9636 {
9637 static const struct
9638 {
9639 int ver;
9640 int regs;
9641 } vfp_versions[7] =
9642 {
9643 {0, 0},
9644 {1, 16},
9645 {2, 16},
9646 {3, 32},
9647 {3, 16},
9648 {4, 32},
9649 {4, 16}
9650 };
9651
9652 // Values greater than 6 aren't defined, so just pick the
9653 // biggest.
9654 if (in_attr[i].int_value() > 6
9655 && in_attr[i].int_value() > out_attr[i].int_value())
9656 {
9657 *out_attr = *in_attr;
9658 break;
9659 }
9660 // The output uses the superset of input features
9661 // (ISA version) and registers.
9662 int ver = std::max(vfp_versions[in_attr[i].int_value()].ver,
9663 vfp_versions[out_attr[i].int_value()].ver);
9664 int regs = std::max(vfp_versions[in_attr[i].int_value()].regs,
9665 vfp_versions[out_attr[i].int_value()].regs);
9666 // This assumes all possible supersets are also a valid
9667 // options.
9668 int newval;
9669 for (newval = 6; newval > 0; newval--)
9670 {
9671 if (regs == vfp_versions[newval].regs
9672 && ver == vfp_versions[newval].ver)
9673 break;
9674 }
9675 out_attr[i].set_int_value(newval);
9676 }
9677 break;
9678 case elfcpp::Tag_PCS_config:
9679 if (out_attr[i].int_value() == 0)
9680 out_attr[i].set_int_value(in_attr[i].int_value());
9681 else if (in_attr[i].int_value() != 0
9682 && out_attr[i].int_value() != 0
9683 && parameters->options().warn_mismatch())
9684 {
9685 // It's sometimes ok to mix different configs, so this is only
9686 // a warning.
9687 gold_warning(_("%s: conflicting platform configuration"), name);
9688 }
9689 break;
9690 case elfcpp::Tag_ABI_PCS_R9_use:
9691 if (in_attr[i].int_value() != out_attr[i].int_value()
9692 && out_attr[i].int_value() != elfcpp::AEABI_R9_unused
9693 && in_attr[i].int_value() != elfcpp::AEABI_R9_unused
9694 && parameters->options().warn_mismatch())
9695 {
9696 gold_error(_("%s: conflicting use of R9"), name);
9697 }
9698 if (out_attr[i].int_value() == elfcpp::AEABI_R9_unused)
9699 out_attr[i].set_int_value(in_attr[i].int_value());
9700 break;
9701 case elfcpp::Tag_ABI_PCS_RW_data:
9702 if (in_attr[i].int_value() == elfcpp::AEABI_PCS_RW_data_SBrel
9703 && (in_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
9704 != elfcpp::AEABI_R9_SB)
9705 && (out_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
9706 != elfcpp::AEABI_R9_unused)
9707 && parameters->options().warn_mismatch())
9708 {
9709 gold_error(_("%s: SB relative addressing conflicts with use "
9710 "of R9"),
9711 name);
9712 }
9713 // Use the smallest value specified.
9714 if (in_attr[i].int_value() < out_attr[i].int_value())
9715 out_attr[i].set_int_value(in_attr[i].int_value());
9716 break;
9717 case elfcpp::Tag_ABI_PCS_wchar_t:
9718 // FIXME: Make it possible to turn off this warning.
9719 if (out_attr[i].int_value()
9720 && in_attr[i].int_value()
9721 && out_attr[i].int_value() != in_attr[i].int_value()
9722 && parameters->options().warn_mismatch())
9723 {
9724 gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
9725 "use %u-byte wchar_t; use of wchar_t values "
9726 "across objects may fail"),
9727 name, in_attr[i].int_value(),
9728 out_attr[i].int_value());
9729 }
9730 else if (in_attr[i].int_value() && !out_attr[i].int_value())
9731 out_attr[i].set_int_value(in_attr[i].int_value());
9732 break;
9733 case elfcpp::Tag_ABI_enum_size:
9734 if (in_attr[i].int_value() != elfcpp::AEABI_enum_unused)
9735 {
9736 if (out_attr[i].int_value() == elfcpp::AEABI_enum_unused
9737 || out_attr[i].int_value() == elfcpp::AEABI_enum_forced_wide)
9738 {
9739 // The existing object is compatible with anything.
9740 // Use whatever requirements the new object has.
9741 out_attr[i].set_int_value(in_attr[i].int_value());
9742 }
9743 // FIXME: Make it possible to turn off this warning.
9744 else if (in_attr[i].int_value() != elfcpp::AEABI_enum_forced_wide
9745 && out_attr[i].int_value() != in_attr[i].int_value()
9746 && parameters->options().warn_mismatch())
9747 {
9748 unsigned int in_value = in_attr[i].int_value();
9749 unsigned int out_value = out_attr[i].int_value();
9750 gold_warning(_("%s uses %s enums yet the output is to use "
9751 "%s enums; use of enum values across objects "
9752 "may fail"),
9753 name,
9754 this->aeabi_enum_name(in_value).c_str(),
9755 this->aeabi_enum_name(out_value).c_str());
9756 }
9757 }
9758 break;
9759 case elfcpp::Tag_ABI_VFP_args:
9760 // Aready done.
9761 break;
9762 case elfcpp::Tag_ABI_WMMX_args:
9763 if (in_attr[i].int_value() != out_attr[i].int_value()
9764 && parameters->options().warn_mismatch())
9765 {
9766 gold_error(_("%s uses iWMMXt register arguments, output does "
9767 "not"),
9768 name);
9769 }
9770 break;
9771 case Object_attribute::Tag_compatibility:
9772 // Merged in target-independent code.
9773 break;
9774 case elfcpp::Tag_ABI_HardFP_use:
9775 // 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP).
9776 if ((in_attr[i].int_value() == 1 && out_attr[i].int_value() == 2)
9777 || (in_attr[i].int_value() == 2 && out_attr[i].int_value() == 1))
9778 out_attr[i].set_int_value(3);
9779 else if (in_attr[i].int_value() > out_attr[i].int_value())
9780 out_attr[i].set_int_value(in_attr[i].int_value());
9781 break;
9782 case elfcpp::Tag_ABI_FP_16bit_format:
9783 if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
9784 {
9785 if (in_attr[i].int_value() != out_attr[i].int_value()
9786 && parameters->options().warn_mismatch())
9787 gold_error(_("fp16 format mismatch between %s and output"),
9788 name);
9789 }
9790 if (in_attr[i].int_value() != 0)
9791 out_attr[i].set_int_value(in_attr[i].int_value());
9792 break;
9793
9794 case elfcpp::Tag_DIV_use:
9795 // This tag is set to zero if we can use UDIV and SDIV in Thumb
9796 // mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
9797 // SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
9798 // CPU. We will merge as follows: If the input attribute's value
9799 // is one then the output attribute's value remains unchanged. If
9800 // the input attribute's value is zero or two then if the output
9801 // attribute's value is one the output value is set to the input
9802 // value, otherwise the output value must be the same as the
9803 // inputs. */
9804 if (in_attr[i].int_value() != 1 && out_attr[i].int_value() != 1)
9805 {
9806 if (in_attr[i].int_value() != out_attr[i].int_value())
9807 {
9808 gold_error(_("DIV usage mismatch between %s and output"),
9809 name);
9810 }
9811 }
9812
9813 if (in_attr[i].int_value() != 1)
9814 out_attr[i].set_int_value(in_attr[i].int_value());
9815
9816 break;
9817
9818 case elfcpp::Tag_MPextension_use_legacy:
9819 // We don't output objects with Tag_MPextension_use_legacy - we
9820 // move the value to Tag_MPextension_use.
9821 if (in_attr[i].int_value() != 0
9822 && in_attr[elfcpp::Tag_MPextension_use].int_value() != 0)
9823 {
9824 if (in_attr[elfcpp::Tag_MPextension_use].int_value()
9825 != in_attr[i].int_value())
9826 {
9827 gold_error(_("%s has has both the current and legacy "
9828 "Tag_MPextension_use attributes"),
9829 name);
9830 }
9831 }
9832
9833 if (in_attr[i].int_value()
9834 > out_attr[elfcpp::Tag_MPextension_use].int_value())
9835 out_attr[elfcpp::Tag_MPextension_use] = in_attr[i];
9836
9837 break;
9838
9839 case elfcpp::Tag_nodefaults:
9840 // This tag is set if it exists, but the value is unused (and is
9841 // typically zero). We don't actually need to do anything here -
9842 // the merge happens automatically when the type flags are merged
9843 // below.
9844 break;
9845 case elfcpp::Tag_also_compatible_with:
9846 // Already done in Tag_CPU_arch.
9847 break;
9848 case elfcpp::Tag_conformance:
9849 // Keep the attribute if it matches. Throw it away otherwise.
9850 // No attribute means no claim to conform.
9851 if (in_attr[i].string_value() != out_attr[i].string_value())
9852 out_attr[i].set_string_value("");
9853 break;
9854
9855 default:
9856 {
9857 const char* err_object = NULL;
9858
9859 // The "known_obj_attributes" table does contain some undefined
9860 // attributes. Ensure that there are unused.
9861 if (out_attr[i].int_value() != 0
9862 || out_attr[i].string_value() != "")
9863 err_object = "output";
9864 else if (in_attr[i].int_value() != 0
9865 || in_attr[i].string_value() != "")
9866 err_object = name;
9867
9868 if (err_object != NULL
9869 && parameters->options().warn_mismatch())
9870 {
9871 // Attribute numbers >=64 (mod 128) can be safely ignored.
9872 if ((i & 127) < 64)
9873 gold_error(_("%s: unknown mandatory EABI object attribute "
9874 "%d"),
9875 err_object, i);
9876 else
9877 gold_warning(_("%s: unknown EABI object attribute %d"),
9878 err_object, i);
9879 }
9880
9881 // Only pass on attributes that match in both inputs.
9882 if (!in_attr[i].matches(out_attr[i]))
9883 {
9884 out_attr[i].set_int_value(0);
9885 out_attr[i].set_string_value("");
9886 }
9887 }
9888 }
9889
9890 // If out_attr was copied from in_attr then it won't have a type yet.
9891 if (in_attr[i].type() && !out_attr[i].type())
9892 out_attr[i].set_type(in_attr[i].type());
9893 }
9894
9895 // Merge Tag_compatibility attributes and any common GNU ones.
9896 this->attributes_section_data_->merge(name, pasd);
9897
9898 // Check for any attributes not known on ARM.
9899 typedef Vendor_object_attributes::Other_attributes Other_attributes;
9900 const Other_attributes* in_other_attributes = pasd->other_attributes(vendor);
9901 Other_attributes::const_iterator in_iter = in_other_attributes->begin();
9902 Other_attributes* out_other_attributes =
9903 this->attributes_section_data_->other_attributes(vendor);
9904 Other_attributes::iterator out_iter = out_other_attributes->begin();
9905
9906 while (in_iter != in_other_attributes->end()
9907 || out_iter != out_other_attributes->end())
9908 {
9909 const char* err_object = NULL;
9910 int err_tag = 0;
9911
9912 // The tags for each list are in numerical order.
9913 // If the tags are equal, then merge.
9914 if (out_iter != out_other_attributes->end()
9915 && (in_iter == in_other_attributes->end()
9916 || in_iter->first > out_iter->first))
9917 {
9918 // This attribute only exists in output. We can't merge, and we
9919 // don't know what the tag means, so delete it.
9920 err_object = "output";
9921 err_tag = out_iter->first;
9922 int saved_tag = out_iter->first;
9923 delete out_iter->second;
9924 out_other_attributes->erase(out_iter);
9925 out_iter = out_other_attributes->upper_bound(saved_tag);
9926 }
9927 else if (in_iter != in_other_attributes->end()
9928 && (out_iter != out_other_attributes->end()
9929 || in_iter->first < out_iter->first))
9930 {
9931 // This attribute only exists in input. We can't merge, and we
9932 // don't know what the tag means, so ignore it.
9933 err_object = name;
9934 err_tag = in_iter->first;
9935 ++in_iter;
9936 }
9937 else // The tags are equal.
9938 {
9939 // As present, all attributes in the list are unknown, and
9940 // therefore can't be merged meaningfully.
9941 err_object = "output";
9942 err_tag = out_iter->first;
9943
9944 // Only pass on attributes that match in both inputs.
9945 if (!in_iter->second->matches(*(out_iter->second)))
9946 {
9947 // No match. Delete the attribute.
9948 int saved_tag = out_iter->first;
9949 delete out_iter->second;
9950 out_other_attributes->erase(out_iter);
9951 out_iter = out_other_attributes->upper_bound(saved_tag);
9952 }
9953 else
9954 {
9955 // Matched. Keep the attribute and move to the next.
9956 ++out_iter;
9957 ++in_iter;
9958 }
9959 }
9960
9961 if (err_object && parameters->options().warn_mismatch())
9962 {
9963 // Attribute numbers >=64 (mod 128) can be safely ignored. */
9964 if ((err_tag & 127) < 64)
9965 {
9966 gold_error(_("%s: unknown mandatory EABI object attribute %d"),
9967 err_object, err_tag);
9968 }
9969 else
9970 {
9971 gold_warning(_("%s: unknown EABI object attribute %d"),
9972 err_object, err_tag);
9973 }
9974 }
9975 }
9976 }
9977
9978 // Stub-generation methods for Target_arm.
9979
9980 // Make a new Arm_input_section object.
9981
9982 template<bool big_endian>
9983 Arm_input_section<big_endian>*
9984 Target_arm<big_endian>::new_arm_input_section(
9985 Relobj* relobj,
9986 unsigned int shndx)
9987 {
9988 Section_id sid(relobj, shndx);
9989
9990 Arm_input_section<big_endian>* arm_input_section =
9991 new Arm_input_section<big_endian>(relobj, shndx);
9992 arm_input_section->init();
9993
9994 // Register new Arm_input_section in map for look-up.
9995 std::pair<typename Arm_input_section_map::iterator, bool> ins =
9996 this->arm_input_section_map_.insert(std::make_pair(sid, arm_input_section));
9997
9998 // Make sure that it we have not created another Arm_input_section
9999 // for this input section already.
10000 gold_assert(ins.second);
10001
10002 return arm_input_section;
10003 }
10004
10005 // Find the Arm_input_section object corresponding to the SHNDX-th input
10006 // section of RELOBJ.
10007
10008 template<bool big_endian>
10009 Arm_input_section<big_endian>*
10010 Target_arm<big_endian>::find_arm_input_section(
10011 Relobj* relobj,
10012 unsigned int shndx) const
10013 {
10014 Section_id sid(relobj, shndx);
10015 typename Arm_input_section_map::const_iterator p =
10016 this->arm_input_section_map_.find(sid);
10017 return (p != this->arm_input_section_map_.end()) ? p->second : NULL;
10018 }
10019
10020 // Make a new stub table.
10021
10022 template<bool big_endian>
10023 Stub_table<big_endian>*
10024 Target_arm<big_endian>::new_stub_table(Arm_input_section<big_endian>* owner)
10025 {
10026 Stub_table<big_endian>* stub_table =
10027 new Stub_table<big_endian>(owner);
10028 this->stub_tables_.push_back(stub_table);
10029
10030 stub_table->set_address(owner->address() + owner->data_size());
10031 stub_table->set_file_offset(owner->offset() + owner->data_size());
10032 stub_table->finalize_data_size();
10033
10034 return stub_table;
10035 }
10036
10037 // Scan a relocation for stub generation.
10038
10039 template<bool big_endian>
10040 void
10041 Target_arm<big_endian>::scan_reloc_for_stub(
10042 const Relocate_info<32, big_endian>* relinfo,
10043 unsigned int r_type,
10044 const Sized_symbol<32>* gsym,
10045 unsigned int r_sym,
10046 const Symbol_value<32>* psymval,
10047 elfcpp::Elf_types<32>::Elf_Swxword addend,
10048 Arm_address address)
10049 {
10050 typedef typename Target_arm<big_endian>::Relocate Relocate;
10051
10052 const Arm_relobj<big_endian>* arm_relobj =
10053 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
10054
10055 bool target_is_thumb;
10056 Symbol_value<32> symval;
10057 if (gsym != NULL)
10058 {
10059 // This is a global symbol. Determine if we use PLT and if the
10060 // final target is THUMB.
10061 if (gsym->use_plt_offset(Relocate::reloc_is_non_pic(r_type)))
10062 {
10063 // This uses a PLT, change the symbol value.
10064 symval.set_output_value(this->plt_section()->address()
10065 + gsym->plt_offset());
10066 psymval = &symval;
10067 target_is_thumb = false;
10068 }
10069 else if (gsym->is_undefined())
10070 // There is no need to generate a stub symbol is undefined.
10071 return;
10072 else
10073 {
10074 target_is_thumb =
10075 ((gsym->type() == elfcpp::STT_ARM_TFUNC)
10076 || (gsym->type() == elfcpp::STT_FUNC
10077 && !gsym->is_undefined()
10078 && ((psymval->value(arm_relobj, 0) & 1) != 0)));
10079 }
10080 }
10081 else
10082 {
10083 // This is a local symbol. Determine if the final target is THUMB.
10084 target_is_thumb = arm_relobj->local_symbol_is_thumb_function(r_sym);
10085 }
10086
10087 // Strip LSB if this points to a THUMB target.
10088 const Arm_reloc_property* reloc_property =
10089 arm_reloc_property_table->get_implemented_static_reloc_property(r_type);
10090 gold_assert(reloc_property != NULL);
10091 if (target_is_thumb
10092 && reloc_property->uses_thumb_bit()
10093 && ((psymval->value(arm_relobj, 0) & 1) != 0))
10094 {
10095 Arm_address stripped_value =
10096 psymval->value(arm_relobj, 0) & ~static_cast<Arm_address>(1);
10097 symval.set_output_value(stripped_value);
10098 psymval = &symval;
10099 }
10100
10101 // Get the symbol value.
10102 Symbol_value<32>::Value value = psymval->value(arm_relobj, 0);
10103
10104 // Owing to pipelining, the PC relative branches below actually skip
10105 // two instructions when the branch offset is 0.
10106 Arm_address destination;
10107 switch (r_type)
10108 {
10109 case elfcpp::R_ARM_CALL:
10110 case elfcpp::R_ARM_JUMP24:
10111 case elfcpp::R_ARM_PLT32:
10112 // ARM branches.
10113 destination = value + addend + 8;
10114 break;
10115 case elfcpp::R_ARM_THM_CALL:
10116 case elfcpp::R_ARM_THM_XPC22:
10117 case elfcpp::R_ARM_THM_JUMP24:
10118 case elfcpp::R_ARM_THM_JUMP19:
10119 // THUMB branches.
10120 destination = value + addend + 4;
10121 break;
10122 default:
10123 gold_unreachable();
10124 }
10125
10126 Reloc_stub* stub = NULL;
10127 Stub_type stub_type =
10128 Reloc_stub::stub_type_for_reloc(r_type, address, destination,
10129 target_is_thumb);
10130 if (stub_type != arm_stub_none)
10131 {
10132 // Try looking up an existing stub from a stub table.
10133 Stub_table<big_endian>* stub_table =
10134 arm_relobj->stub_table(relinfo->data_shndx);
10135 gold_assert(stub_table != NULL);
10136
10137 // Locate stub by destination.
10138 Reloc_stub::Key stub_key(stub_type, gsym, arm_relobj, r_sym, addend);
10139
10140 // Create a stub if there is not one already
10141 stub = stub_table->find_reloc_stub(stub_key);
10142 if (stub == NULL)
10143 {
10144 // create a new stub and add it to stub table.
10145 stub = this->stub_factory().make_reloc_stub(stub_type);
10146 stub_table->add_reloc_stub(stub, stub_key);
10147 }
10148
10149 // Record the destination address.
10150 stub->set_destination_address(destination
10151 | (target_is_thumb ? 1 : 0));
10152 }
10153
10154 // For Cortex-A8, we need to record a relocation at 4K page boundary.
10155 if (this->fix_cortex_a8_
10156 && (r_type == elfcpp::R_ARM_THM_JUMP24
10157 || r_type == elfcpp::R_ARM_THM_JUMP19
10158 || r_type == elfcpp::R_ARM_THM_CALL
10159 || r_type == elfcpp::R_ARM_THM_XPC22)
10160 && (address & 0xfffU) == 0xffeU)
10161 {
10162 // Found a candidate. Note we haven't checked the destination is
10163 // within 4K here: if we do so (and don't create a record) we can't
10164 // tell that a branch should have been relocated when scanning later.
10165 this->cortex_a8_relocs_info_[address] =
10166 new Cortex_a8_reloc(stub, r_type,
10167 destination | (target_is_thumb ? 1 : 0));
10168 }
10169 }
10170
10171 // This function scans a relocation sections for stub generation.
10172 // The template parameter Relocate must be a class type which provides
10173 // a single function, relocate(), which implements the machine
10174 // specific part of a relocation.
10175
10176 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
10177 // SHT_REL or SHT_RELA.
10178
10179 // PRELOCS points to the relocation data. RELOC_COUNT is the number
10180 // of relocs. OUTPUT_SECTION is the output section.
10181 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
10182 // mapped to output offsets.
10183
10184 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
10185 // VIEW_SIZE is the size. These refer to the input section, unless
10186 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
10187 // the output section.
10188
10189 template<bool big_endian>
10190 template<int sh_type>
10191 void inline
10192 Target_arm<big_endian>::scan_reloc_section_for_stubs(
10193 const Relocate_info<32, big_endian>* relinfo,
10194 const unsigned char* prelocs,
10195 size_t reloc_count,
10196 Output_section* output_section,
10197 bool needs_special_offset_handling,
10198 const unsigned char* view,
10199 elfcpp::Elf_types<32>::Elf_Addr view_address,
10200 section_size_type)
10201 {
10202 typedef typename Reloc_types<sh_type, 32, big_endian>::Reloc Reltype;
10203 const int reloc_size =
10204 Reloc_types<sh_type, 32, big_endian>::reloc_size;
10205
10206 Arm_relobj<big_endian>* arm_object =
10207 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
10208 unsigned int local_count = arm_object->local_symbol_count();
10209
10210 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
10211
10212 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
10213 {
10214 Reltype reloc(prelocs);
10215
10216 typename elfcpp::Elf_types<32>::Elf_WXword r_info = reloc.get_r_info();
10217 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
10218 unsigned int r_type = elfcpp::elf_r_type<32>(r_info);
10219
10220 r_type = this->get_real_reloc_type(r_type);
10221
10222 // Only a few relocation types need stubs.
10223 if ((r_type != elfcpp::R_ARM_CALL)
10224 && (r_type != elfcpp::R_ARM_JUMP24)
10225 && (r_type != elfcpp::R_ARM_PLT32)
10226 && (r_type != elfcpp::R_ARM_THM_CALL)
10227 && (r_type != elfcpp::R_ARM_THM_XPC22)
10228 && (r_type != elfcpp::R_ARM_THM_JUMP24)
10229 && (r_type != elfcpp::R_ARM_THM_JUMP19)
10230 && (r_type != elfcpp::R_ARM_V4BX))
10231 continue;
10232
10233 section_offset_type offset =
10234 convert_to_section_size_type(reloc.get_r_offset());
10235
10236 if (needs_special_offset_handling)
10237 {
10238 offset = output_section->output_offset(relinfo->object,
10239 relinfo->data_shndx,
10240 offset);
10241 if (offset == -1)
10242 continue;
10243 }
10244
10245 // Create a v4bx stub if --fix-v4bx-interworking is used.
10246 if (r_type == elfcpp::R_ARM_V4BX)
10247 {
10248 if (this->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING)
10249 {
10250 // Get the BX instruction.
10251 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
10252 const Valtype* wv =
10253 reinterpret_cast<const Valtype*>(view + offset);
10254 elfcpp::Elf_types<32>::Elf_Swxword insn =
10255 elfcpp::Swap<32, big_endian>::readval(wv);
10256 const uint32_t reg = (insn & 0xf);
10257
10258 if (reg < 0xf)
10259 {
10260 // Try looking up an existing stub from a stub table.
10261 Stub_table<big_endian>* stub_table =
10262 arm_object->stub_table(relinfo->data_shndx);
10263 gold_assert(stub_table != NULL);
10264
10265 if (stub_table->find_arm_v4bx_stub(reg) == NULL)
10266 {
10267 // create a new stub and add it to stub table.
10268 Arm_v4bx_stub* stub =
10269 this->stub_factory().make_arm_v4bx_stub(reg);
10270 gold_assert(stub != NULL);
10271 stub_table->add_arm_v4bx_stub(stub);
10272 }
10273 }
10274 }
10275 continue;
10276 }
10277
10278 // Get the addend.
10279 Stub_addend_reader<sh_type, big_endian> stub_addend_reader;
10280 elfcpp::Elf_types<32>::Elf_Swxword addend =
10281 stub_addend_reader(r_type, view + offset, reloc);
10282
10283 const Sized_symbol<32>* sym;
10284
10285 Symbol_value<32> symval;
10286 const Symbol_value<32> *psymval;
10287 if (r_sym < local_count)
10288 {
10289 sym = NULL;
10290 psymval = arm_object->local_symbol(r_sym);
10291
10292 // If the local symbol belongs to a section we are discarding,
10293 // and that section is a debug section, try to find the
10294 // corresponding kept section and map this symbol to its
10295 // counterpart in the kept section. The symbol must not
10296 // correspond to a section we are folding.
10297 bool is_ordinary;
10298 unsigned int shndx = psymval->input_shndx(&is_ordinary);
10299 if (is_ordinary
10300 && shndx != elfcpp::SHN_UNDEF
10301 && !arm_object->is_section_included(shndx)
10302 && !(relinfo->symtab->is_section_folded(arm_object, shndx)))
10303 {
10304 if (comdat_behavior == CB_UNDETERMINED)
10305 {
10306 std::string name =
10307 arm_object->section_name(relinfo->data_shndx);
10308 comdat_behavior = get_comdat_behavior(name.c_str());
10309 }
10310 if (comdat_behavior == CB_PRETEND)
10311 {
10312 bool found;
10313 typename elfcpp::Elf_types<32>::Elf_Addr value =
10314 arm_object->map_to_kept_section(shndx, &found);
10315 if (found)
10316 symval.set_output_value(value + psymval->input_value());
10317 else
10318 symval.set_output_value(0);
10319 }
10320 else
10321 {
10322 symval.set_output_value(0);
10323 }
10324 symval.set_no_output_symtab_entry();
10325 psymval = &symval;
10326 }
10327 }
10328 else
10329 {
10330 const Symbol* gsym = arm_object->global_symbol(r_sym);
10331 gold_assert(gsym != NULL);
10332 if (gsym->is_forwarder())
10333 gsym = relinfo->symtab->resolve_forwards(gsym);
10334
10335 sym = static_cast<const Sized_symbol<32>*>(gsym);
10336 if (sym->has_symtab_index())
10337 symval.set_output_symtab_index(sym->symtab_index());
10338 else
10339 symval.set_no_output_symtab_entry();
10340
10341 // We need to compute the would-be final value of this global
10342 // symbol.
10343 const Symbol_table* symtab = relinfo->symtab;
10344 const Sized_symbol<32>* sized_symbol =
10345 symtab->get_sized_symbol<32>(gsym);
10346 Symbol_table::Compute_final_value_status status;
10347 Arm_address value =
10348 symtab->compute_final_value<32>(sized_symbol, &status);
10349
10350 // Skip this if the symbol has not output section.
10351 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
10352 continue;
10353
10354 symval.set_output_value(value);
10355 psymval = &symval;
10356 }
10357
10358 // If symbol is a section symbol, we don't know the actual type of
10359 // destination. Give up.
10360 if (psymval->is_section_symbol())
10361 continue;
10362
10363 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
10364 addend, view_address + offset);
10365 }
10366 }
10367
10368 // Scan an input section for stub generation.
10369
10370 template<bool big_endian>
10371 void
10372 Target_arm<big_endian>::scan_section_for_stubs(
10373 const Relocate_info<32, big_endian>* relinfo,
10374 unsigned int sh_type,
10375 const unsigned char* prelocs,
10376 size_t reloc_count,
10377 Output_section* output_section,
10378 bool needs_special_offset_handling,
10379 const unsigned char* view,
10380 Arm_address view_address,
10381 section_size_type view_size)
10382 {
10383 if (sh_type == elfcpp::SHT_REL)
10384 this->scan_reloc_section_for_stubs<elfcpp::SHT_REL>(
10385 relinfo,
10386 prelocs,
10387 reloc_count,
10388 output_section,
10389 needs_special_offset_handling,
10390 view,
10391 view_address,
10392 view_size);
10393 else if (sh_type == elfcpp::SHT_RELA)
10394 // We do not support RELA type relocations yet. This is provided for
10395 // completeness.
10396 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
10397 relinfo,
10398 prelocs,
10399 reloc_count,
10400 output_section,
10401 needs_special_offset_handling,
10402 view,
10403 view_address,
10404 view_size);
10405 else
10406 gold_unreachable();
10407 }
10408
10409 // Group input sections for stub generation.
10410 //
10411 // We goup input sections in an output sections so that the total size,
10412 // including any padding space due to alignment is smaller than GROUP_SIZE
10413 // unless the only input section in group is bigger than GROUP_SIZE already.
10414 // Then an ARM stub table is created to follow the last input section
10415 // in group. For each group an ARM stub table is created an is placed
10416 // after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
10417 // extend the group after the stub table.
10418
10419 template<bool big_endian>
10420 void
10421 Target_arm<big_endian>::group_sections(
10422 Layout* layout,
10423 section_size_type group_size,
10424 bool stubs_always_after_branch)
10425 {
10426 // Group input sections and insert stub table
10427 Layout::Section_list section_list;
10428 layout->get_allocated_sections(&section_list);
10429 for (Layout::Section_list::const_iterator p = section_list.begin();
10430 p != section_list.end();
10431 ++p)
10432 {
10433 Arm_output_section<big_endian>* output_section =
10434 Arm_output_section<big_endian>::as_arm_output_section(*p);
10435 output_section->group_sections(group_size, stubs_always_after_branch,
10436 this);
10437 }
10438 }
10439
10440 // Relaxation hook. This is where we do stub generation.
10441
10442 template<bool big_endian>
10443 bool
10444 Target_arm<big_endian>::do_relax(
10445 int pass,
10446 const Input_objects* input_objects,
10447 Symbol_table* symtab,
10448 Layout* layout)
10449 {
10450 // No need to generate stubs if this is a relocatable link.
10451 gold_assert(!parameters->options().relocatable());
10452
10453 // If this is the first pass, we need to group input sections into
10454 // stub groups.
10455 bool done_exidx_fixup = false;
10456 typedef typename Stub_table_list::iterator Stub_table_iterator;
10457 if (pass == 1)
10458 {
10459 // Determine the stub group size. The group size is the absolute
10460 // value of the parameter --stub-group-size. If --stub-group-size
10461 // is passed a negative value, we restict stubs to be always after
10462 // the stubbed branches.
10463 int32_t stub_group_size_param =
10464 parameters->options().stub_group_size();
10465 bool stubs_always_after_branch = stub_group_size_param < 0;
10466 section_size_type stub_group_size = abs(stub_group_size_param);
10467
10468 // The Cortex-A8 erratum fix depends on stubs not being in the same 4K
10469 // page as the first half of a 32-bit branch straddling two 4K pages.
10470 // This is a crude way of enforcing that.
10471 if (this->fix_cortex_a8_)
10472 stubs_always_after_branch = true;
10473
10474 if (stub_group_size == 1)
10475 {
10476 // Default value.
10477 // Thumb branch range is +-4MB has to be used as the default
10478 // maximum size (a given section can contain both ARM and Thumb
10479 // code, so the worst case has to be taken into account). If we are
10480 // fixing cortex-a8 errata, the branch range has to be even smaller,
10481 // since wide conditional branch has a range of +-1MB only.
10482 //
10483 // This value is 24K less than that, which allows for 2025
10484 // 12-byte stubs. If we exceed that, then we will fail to link.
10485 // The user will have to relink with an explicit group size
10486 // option.
10487 if (this->fix_cortex_a8_)
10488 stub_group_size = 1024276;
10489 else
10490 stub_group_size = 4170000;
10491 }
10492
10493 group_sections(layout, stub_group_size, stubs_always_after_branch);
10494
10495 // Also fix .ARM.exidx section coverage.
10496 Output_section* os = layout->find_output_section(".ARM.exidx");
10497 if (os != NULL && os->type() == elfcpp::SHT_ARM_EXIDX)
10498 {
10499 Arm_output_section<big_endian>* exidx_output_section =
10500 Arm_output_section<big_endian>::as_arm_output_section(os);
10501 this->fix_exidx_coverage(layout, exidx_output_section, symtab);
10502 done_exidx_fixup = true;
10503 }
10504 }
10505 else
10506 {
10507 // If this is not the first pass, addresses and file offsets have
10508 // been reset at this point, set them here.
10509 for (Stub_table_iterator sp = this->stub_tables_.begin();
10510 sp != this->stub_tables_.end();
10511 ++sp)
10512 {
10513 Arm_input_section<big_endian>* owner = (*sp)->owner();
10514 off_t off = align_address(owner->original_size(),
10515 (*sp)->addralign());
10516 (*sp)->set_address_and_file_offset(owner->address() + off,
10517 owner->offset() + off);
10518 }
10519 }
10520
10521 // The Cortex-A8 stubs are sensitive to layout of code sections. At the
10522 // beginning of each relaxation pass, just blow away all the stubs.
10523 // Alternatively, we could selectively remove only the stubs and reloc
10524 // information for code sections that have moved since the last pass.
10525 // That would require more book-keeping.
10526 if (this->fix_cortex_a8_)
10527 {
10528 // Clear all Cortex-A8 reloc information.
10529 for (typename Cortex_a8_relocs_info::const_iterator p =
10530 this->cortex_a8_relocs_info_.begin();
10531 p != this->cortex_a8_relocs_info_.end();
10532 ++p)
10533 delete p->second;
10534 this->cortex_a8_relocs_info_.clear();
10535
10536 // Remove all Cortex-A8 stubs.
10537 for (Stub_table_iterator sp = this->stub_tables_.begin();
10538 sp != this->stub_tables_.end();
10539 ++sp)
10540 (*sp)->remove_all_cortex_a8_stubs();
10541 }
10542
10543 // Scan relocs for relocation stubs
10544 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
10545 op != input_objects->relobj_end();
10546 ++op)
10547 {
10548 Arm_relobj<big_endian>* arm_relobj =
10549 Arm_relobj<big_endian>::as_arm_relobj(*op);
10550 arm_relobj->scan_sections_for_stubs(this, symtab, layout);
10551 }
10552
10553 // Check all stub tables to see if any of them have their data sizes
10554 // or addresses alignments changed. These are the only things that
10555 // matter.
10556 bool any_stub_table_changed = false;
10557 Unordered_set<const Output_section*> sections_needing_adjustment;
10558 for (Stub_table_iterator sp = this->stub_tables_.begin();
10559 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
10560 ++sp)
10561 {
10562 if ((*sp)->update_data_size_and_addralign())
10563 {
10564 // Update data size of stub table owner.
10565 Arm_input_section<big_endian>* owner = (*sp)->owner();
10566 uint64_t address = owner->address();
10567 off_t offset = owner->offset();
10568 owner->reset_address_and_file_offset();
10569 owner->set_address_and_file_offset(address, offset);
10570
10571 sections_needing_adjustment.insert(owner->output_section());
10572 any_stub_table_changed = true;
10573 }
10574 }
10575
10576 // Output_section_data::output_section() returns a const pointer but we
10577 // need to update output sections, so we record all output sections needing
10578 // update above and scan the sections here to find out what sections need
10579 // to be updated.
10580 for(Layout::Section_list::const_iterator p = layout->section_list().begin();
10581 p != layout->section_list().end();
10582 ++p)
10583 {
10584 if (sections_needing_adjustment.find(*p)
10585 != sections_needing_adjustment.end())
10586 (*p)->set_section_offsets_need_adjustment();
10587 }
10588
10589 // Stop relaxation if no EXIDX fix-up and no stub table change.
10590 bool continue_relaxation = done_exidx_fixup || any_stub_table_changed;
10591
10592 // Finalize the stubs in the last relaxation pass.
10593 if (!continue_relaxation)
10594 {
10595 for (Stub_table_iterator sp = this->stub_tables_.begin();
10596 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
10597 ++sp)
10598 (*sp)->finalize_stubs();
10599
10600 // Update output local symbol counts of objects if necessary.
10601 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
10602 op != input_objects->relobj_end();
10603 ++op)
10604 {
10605 Arm_relobj<big_endian>* arm_relobj =
10606 Arm_relobj<big_endian>::as_arm_relobj(*op);
10607
10608 // Update output local symbol counts. We need to discard local
10609 // symbols defined in parts of input sections that are discarded by
10610 // relaxation.
10611 if (arm_relobj->output_local_symbol_count_needs_update())
10612 arm_relobj->update_output_local_symbol_count();
10613 }
10614 }
10615
10616 return continue_relaxation;
10617 }
10618
10619 // Relocate a stub.
10620
10621 template<bool big_endian>
10622 void
10623 Target_arm<big_endian>::relocate_stub(
10624 Stub* stub,
10625 const Relocate_info<32, big_endian>* relinfo,
10626 Output_section* output_section,
10627 unsigned char* view,
10628 Arm_address address,
10629 section_size_type view_size)
10630 {
10631 Relocate relocate;
10632 const Stub_template* stub_template = stub->stub_template();
10633 for (size_t i = 0; i < stub_template->reloc_count(); i++)
10634 {
10635 size_t reloc_insn_index = stub_template->reloc_insn_index(i);
10636 const Insn_template* insn = &stub_template->insns()[reloc_insn_index];
10637
10638 unsigned int r_type = insn->r_type();
10639 section_size_type reloc_offset = stub_template->reloc_offset(i);
10640 section_size_type reloc_size = insn->size();
10641 gold_assert(reloc_offset + reloc_size <= view_size);
10642
10643 // This is the address of the stub destination.
10644 Arm_address target = stub->reloc_target(i) + insn->reloc_addend();
10645 Symbol_value<32> symval;
10646 symval.set_output_value(target);
10647
10648 // Synthesize a fake reloc just in case. We don't have a symbol so
10649 // we use 0.
10650 unsigned char reloc_buffer[elfcpp::Elf_sizes<32>::rel_size];
10651 memset(reloc_buffer, 0, sizeof(reloc_buffer));
10652 elfcpp::Rel_write<32, big_endian> reloc_write(reloc_buffer);
10653 reloc_write.put_r_offset(reloc_offset);
10654 reloc_write.put_r_info(elfcpp::elf_r_info<32>(0, r_type));
10655 elfcpp::Rel<32, big_endian> rel(reloc_buffer);
10656
10657 relocate.relocate(relinfo, this, output_section,
10658 this->fake_relnum_for_stubs, rel, r_type,
10659 NULL, &symval, view + reloc_offset,
10660 address + reloc_offset, reloc_size);
10661 }
10662 }
10663
10664 // Determine whether an object attribute tag takes an integer, a
10665 // string or both.
10666
10667 template<bool big_endian>
10668 int
10669 Target_arm<big_endian>::do_attribute_arg_type(int tag) const
10670 {
10671 if (tag == Object_attribute::Tag_compatibility)
10672 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
10673 | Object_attribute::ATTR_TYPE_FLAG_STR_VAL);
10674 else if (tag == elfcpp::Tag_nodefaults)
10675 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
10676 | Object_attribute::ATTR_TYPE_FLAG_NO_DEFAULT);
10677 else if (tag == elfcpp::Tag_CPU_raw_name || tag == elfcpp::Tag_CPU_name)
10678 return Object_attribute::ATTR_TYPE_FLAG_STR_VAL;
10679 else if (tag < 32)
10680 return Object_attribute::ATTR_TYPE_FLAG_INT_VAL;
10681 else
10682 return ((tag & 1) != 0
10683 ? Object_attribute::ATTR_TYPE_FLAG_STR_VAL
10684 : Object_attribute::ATTR_TYPE_FLAG_INT_VAL);
10685 }
10686
10687 // Reorder attributes.
10688 //
10689 // The ABI defines that Tag_conformance should be emitted first, and that
10690 // Tag_nodefaults should be second (if either is defined). This sets those
10691 // two positions, and bumps up the position of all the remaining tags to
10692 // compensate.
10693
10694 template<bool big_endian>
10695 int
10696 Target_arm<big_endian>::do_attributes_order(int num) const
10697 {
10698 // Reorder the known object attributes in output. We want to move
10699 // Tag_conformance to position 4 and Tag_conformance to position 5
10700 // and shift eveything between 4 .. Tag_conformance - 1 to make room.
10701 if (num == 4)
10702 return elfcpp::Tag_conformance;
10703 if (num == 5)
10704 return elfcpp::Tag_nodefaults;
10705 if ((num - 2) < elfcpp::Tag_nodefaults)
10706 return num - 2;
10707 if ((num - 1) < elfcpp::Tag_conformance)
10708 return num - 1;
10709 return num;
10710 }
10711
10712 // Scan a span of THUMB code for Cortex-A8 erratum.
10713
10714 template<bool big_endian>
10715 void
10716 Target_arm<big_endian>::scan_span_for_cortex_a8_erratum(
10717 Arm_relobj<big_endian>* arm_relobj,
10718 unsigned int shndx,
10719 section_size_type span_start,
10720 section_size_type span_end,
10721 const unsigned char* view,
10722 Arm_address address)
10723 {
10724 // Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
10725 //
10726 // The opcode is BLX.W, BL.W, B.W, Bcc.W
10727 // The branch target is in the same 4KB region as the
10728 // first half of the branch.
10729 // The instruction before the branch is a 32-bit
10730 // length non-branch instruction.
10731 section_size_type i = span_start;
10732 bool last_was_32bit = false;
10733 bool last_was_branch = false;
10734 while (i < span_end)
10735 {
10736 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
10737 const Valtype* wv = reinterpret_cast<const Valtype*>(view + i);
10738 uint32_t insn = elfcpp::Swap<16, big_endian>::readval(wv);
10739 bool is_blx = false, is_b = false;
10740 bool is_bl = false, is_bcc = false;
10741
10742 bool insn_32bit = (insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000;
10743 if (insn_32bit)
10744 {
10745 // Load the rest of the insn (in manual-friendly order).
10746 insn = (insn << 16) | elfcpp::Swap<16, big_endian>::readval(wv + 1);
10747
10748 // Encoding T4: B<c>.W.
10749 is_b = (insn & 0xf800d000U) == 0xf0009000U;
10750 // Encoding T1: BL<c>.W.
10751 is_bl = (insn & 0xf800d000U) == 0xf000d000U;
10752 // Encoding T2: BLX<c>.W.
10753 is_blx = (insn & 0xf800d000U) == 0xf000c000U;
10754 // Encoding T3: B<c>.W (not permitted in IT block).
10755 is_bcc = ((insn & 0xf800d000U) == 0xf0008000U
10756 && (insn & 0x07f00000U) != 0x03800000U);
10757 }
10758
10759 bool is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
10760
10761 // If this instruction is a 32-bit THUMB branch that crosses a 4K
10762 // page boundary and it follows 32-bit non-branch instruction,
10763 // we need to work around.
10764 if (is_32bit_branch
10765 && ((address + i) & 0xfffU) == 0xffeU
10766 && last_was_32bit
10767 && !last_was_branch)
10768 {
10769 // Check to see if there is a relocation stub for this branch.
10770 bool force_target_arm = false;
10771 bool force_target_thumb = false;
10772 const Cortex_a8_reloc* cortex_a8_reloc = NULL;
10773 Cortex_a8_relocs_info::const_iterator p =
10774 this->cortex_a8_relocs_info_.find(address + i);
10775
10776 if (p != this->cortex_a8_relocs_info_.end())
10777 {
10778 cortex_a8_reloc = p->second;
10779 bool target_is_thumb = (cortex_a8_reloc->destination() & 1) != 0;
10780
10781 if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
10782 && !target_is_thumb)
10783 force_target_arm = true;
10784 else if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
10785 && target_is_thumb)
10786 force_target_thumb = true;
10787 }
10788
10789 off_t offset;
10790 Stub_type stub_type = arm_stub_none;
10791
10792 // Check if we have an offending branch instruction.
10793 uint16_t upper_insn = (insn >> 16) & 0xffffU;
10794 uint16_t lower_insn = insn & 0xffffU;
10795 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
10796
10797 if (cortex_a8_reloc != NULL
10798 && cortex_a8_reloc->reloc_stub() != NULL)
10799 // We've already made a stub for this instruction, e.g.
10800 // it's a long branch or a Thumb->ARM stub. Assume that
10801 // stub will suffice to work around the A8 erratum (see
10802 // setting of always_after_branch above).
10803 ;
10804 else if (is_bcc)
10805 {
10806 offset = RelocFuncs::thumb32_cond_branch_offset(upper_insn,
10807 lower_insn);
10808 stub_type = arm_stub_a8_veneer_b_cond;
10809 }
10810 else if (is_b || is_bl || is_blx)
10811 {
10812 offset = RelocFuncs::thumb32_branch_offset(upper_insn,
10813 lower_insn);
10814 if (is_blx)
10815 offset &= ~3;
10816
10817 stub_type = (is_blx
10818 ? arm_stub_a8_veneer_blx
10819 : (is_bl
10820 ? arm_stub_a8_veneer_bl
10821 : arm_stub_a8_veneer_b));
10822 }
10823
10824 if (stub_type != arm_stub_none)
10825 {
10826 Arm_address pc_for_insn = address + i + 4;
10827
10828 // The original instruction is a BL, but the target is
10829 // an ARM instruction. If we were not making a stub,
10830 // the BL would have been converted to a BLX. Use the
10831 // BLX stub instead in that case.
10832 if (this->may_use_blx() && force_target_arm
10833 && stub_type == arm_stub_a8_veneer_bl)
10834 {
10835 stub_type = arm_stub_a8_veneer_blx;
10836 is_blx = true;
10837 is_bl = false;
10838 }
10839 // Conversely, if the original instruction was
10840 // BLX but the target is Thumb mode, use the BL stub.
10841 else if (force_target_thumb
10842 && stub_type == arm_stub_a8_veneer_blx)
10843 {
10844 stub_type = arm_stub_a8_veneer_bl;
10845 is_blx = false;
10846 is_bl = true;
10847 }
10848
10849 if (is_blx)
10850 pc_for_insn &= ~3;
10851
10852 // If we found a relocation, use the proper destination,
10853 // not the offset in the (unrelocated) instruction.
10854 // Note this is always done if we switched the stub type above.
10855 if (cortex_a8_reloc != NULL)
10856 offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
10857
10858 Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
10859
10860 // Add a new stub if destination address in in the same page.
10861 if (((address + i) & ~0xfffU) == (target & ~0xfffU))
10862 {
10863 Cortex_a8_stub* stub =
10864 this->stub_factory_.make_cortex_a8_stub(stub_type,
10865 arm_relobj, shndx,
10866 address + i,
10867 target, insn);
10868 Stub_table<big_endian>* stub_table =
10869 arm_relobj->stub_table(shndx);
10870 gold_assert(stub_table != NULL);
10871 stub_table->add_cortex_a8_stub(address + i, stub);
10872 }
10873 }
10874 }
10875
10876 i += insn_32bit ? 4 : 2;
10877 last_was_32bit = insn_32bit;
10878 last_was_branch = is_32bit_branch;
10879 }
10880 }
10881
10882 // Apply the Cortex-A8 workaround.
10883
10884 template<bool big_endian>
10885 void
10886 Target_arm<big_endian>::apply_cortex_a8_workaround(
10887 const Cortex_a8_stub* stub,
10888 Arm_address stub_address,
10889 unsigned char* insn_view,
10890 Arm_address insn_address)
10891 {
10892 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
10893 Valtype* wv = reinterpret_cast<Valtype*>(insn_view);
10894 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
10895 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
10896 off_t branch_offset = stub_address - (insn_address + 4);
10897
10898 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
10899 switch (stub->stub_template()->type())
10900 {
10901 case arm_stub_a8_veneer_b_cond:
10902 // For a conditional branch, we re-write it to be a uncondition
10903 // branch to the stub. We use the THUMB-2 encoding here.
10904 upper_insn = 0xf000U;
10905 lower_insn = 0xb800U;
10906 // Fall through
10907 case arm_stub_a8_veneer_b:
10908 case arm_stub_a8_veneer_bl:
10909 case arm_stub_a8_veneer_blx:
10910 if ((lower_insn & 0x5000U) == 0x4000U)
10911 // For a BLX instruction, make sure that the relocation is
10912 // rounded up to a word boundary. This follows the semantics of
10913 // the instruction which specifies that bit 1 of the target
10914 // address will come from bit 1 of the base address.
10915 branch_offset = (branch_offset + 2) & ~3;
10916
10917 // Put BRANCH_OFFSET back into the insn.
10918 gold_assert(!utils::has_overflow<25>(branch_offset));
10919 upper_insn = RelocFuncs::thumb32_branch_upper(upper_insn, branch_offset);
10920 lower_insn = RelocFuncs::thumb32_branch_lower(lower_insn, branch_offset);
10921 break;
10922
10923 default:
10924 gold_unreachable();
10925 }
10926
10927 // Put the relocated value back in the object file:
10928 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
10929 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
10930 }
10931
10932 template<bool big_endian>
10933 class Target_selector_arm : public Target_selector
10934 {
10935 public:
10936 Target_selector_arm()
10937 : Target_selector(elfcpp::EM_ARM, 32, big_endian,
10938 (big_endian ? "elf32-bigarm" : "elf32-littlearm"))
10939 { }
10940
10941 Target*
10942 do_instantiate_target()
10943 { return new Target_arm<big_endian>(); }
10944 };
10945
10946 // Fix .ARM.exidx section coverage.
10947
10948 template<bool big_endian>
10949 void
10950 Target_arm<big_endian>::fix_exidx_coverage(
10951 Layout* layout,
10952 Arm_output_section<big_endian>* exidx_section,
10953 Symbol_table* symtab)
10954 {
10955 // We need to look at all the input sections in output in ascending
10956 // order of of output address. We do that by building a sorted list
10957 // of output sections by addresses. Then we looks at the output sections
10958 // in order. The input sections in an output section are already sorted
10959 // by addresses within the output section.
10960
10961 typedef std::set<Output_section*, output_section_address_less_than>
10962 Sorted_output_section_list;
10963 Sorted_output_section_list sorted_output_sections;
10964 Layout::Section_list section_list;
10965 layout->get_allocated_sections(&section_list);
10966 for (Layout::Section_list::const_iterator p = section_list.begin();
10967 p != section_list.end();
10968 ++p)
10969 {
10970 // We only care about output sections that contain executable code.
10971 if (((*p)->flags() & elfcpp::SHF_EXECINSTR) != 0)
10972 sorted_output_sections.insert(*p);
10973 }
10974
10975 // Go over the output sections in ascending order of output addresses.
10976 typedef typename Arm_output_section<big_endian>::Text_section_list
10977 Text_section_list;
10978 Text_section_list sorted_text_sections;
10979 for(typename Sorted_output_section_list::iterator p =
10980 sorted_output_sections.begin();
10981 p != sorted_output_sections.end();
10982 ++p)
10983 {
10984 Arm_output_section<big_endian>* arm_output_section =
10985 Arm_output_section<big_endian>::as_arm_output_section(*p);
10986 arm_output_section->append_text_sections_to_list(&sorted_text_sections);
10987 }
10988
10989 exidx_section->fix_exidx_coverage(layout, sorted_text_sections, symtab,
10990 merge_exidx_entries());
10991 }
10992
10993 Target_selector_arm<false> target_selector_arm;
10994 Target_selector_arm<true> target_selector_armbe;
10995
10996 } // End anonymous namespace.
This page took 0.27597 seconds and 5 git commands to generate.