X-Git-Url: http://git.efficios.com/?a=blobdiff_plain;f=bfd%2Felf32-arm.c;h=1921780edb2ffdd7afe53791a73c5ed4a0e8dc1f;hb=91d6fa6a035cc7d0b7be5c99c194a64cb80924b0;hp=919d4585396522a4b99ba7057644cde1fd9767f1;hpb=461a49cacae7574c0f380957f125e96401f170e8;p=deliverable%2Fbinutils-gdb.git diff --git a/bfd/elf32-arm.c b/bfd/elf32-arm.c index 919d458539..1921780edb 100644 --- a/bfd/elf32-arm.c +++ b/bfd/elf32-arm.c @@ -1,6 +1,6 @@ /* 32-bit ELF support for ARM Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, - 2008 Free Software Foundation, Inc. + 2008, 2009 Free Software Foundation, Inc. This file is part of BFD, the Binary File Descriptor library. @@ -20,6 +20,8 @@ MA 02110-1301, USA. */ #include "sysdep.h" +#include + #include "bfd.h" #include "libiberty.h" #include "libbfd.h" @@ -59,7 +61,10 @@ #define ARM_ELF_ABI_VERSION 0 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM -static struct elf_backend_data elf32_arm_vxworks_bed; +static bfd_boolean elf32_arm_write_section (bfd *output_bfd, + struct bfd_link_info *link_info, + asection *sec, + bfd_byte *contents); /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g. R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO @@ -1881,7 +1886,8 @@ typedef unsigned short int insn16; interworkable. */ #define INTERWORK_FLAG(abfd) \ (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \ - || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK)) + || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \ + || ((abfd)->flags & BFD_LINKER_CREATED)) /* The linker script knows the section names for placement. The entry_names are used to do simple name mangling on the stubs. @@ -2018,24 +2024,21 @@ enum stub_insn_type DATA_TYPE }; -enum stub_reloc_type - { - STUB_RELOC_NONE = 0, - STUB_RELOC_ABS, - STUB_RELOC_PIC, - }; - -#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} -#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} -#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} -#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} -#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} +#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0} +/* A bit of a hack. A Thumb conditional branch, in which the proper condition + is inserted in arm_build_one_stub(). */ +#define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1} +#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0} +#define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)} +#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0} +#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)} +#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)} typedef struct { bfd_vma data; enum stub_insn_type type; - enum stub_reloc_type reloc_type; + unsigned int r_type; int reloc_addend; } insn_sequence; @@ -2056,9 +2059,7 @@ static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] = DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ }; -/* Thumb -> Thumb long branch stub. Used on architectures which - support only this mode, or on V4T where it is expensive to switch - to ARM. */ +/* Thumb -> Thumb long branch stub. Used on M-profile architectures. */ static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] = { THUMB16_INSN(0xb401), /* push {r0} */ @@ -2070,6 +2071,17 @@ static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] = DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ }; +/* V4T Thumb -> Thumb long branch stub. Using the stack is not + allowed. */ +static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] = + { + THUMB16_INSN(0x4778), /* bx pc */ + THUMB16_INSN(0x46c0), /* nop */ + ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */ + ARM_INSN(0xe12fff1c), /* bx ip */ + DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */ + }; + /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not available. */ static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] = @@ -2089,28 +2101,146 @@ static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] = ARM_REL_INSN(0xea000000, -8), /* b (X-8) */ }; -/* ARM/Thumb -> ARM/Thumb long branch stub, PIC. On V5T and above, use +/* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use blx to reach the stub if necessary. */ -static const insn_sequence elf32_arm_stub_long_branch_any_any_pic[] = +static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] = { ARM_INSN(0xe59fc000), /* ldr r12, [pc] */ ARM_INSN(0xe08ff00c), /* add pc, pc, ip */ DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */ }; +/* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use + blx to reach the stub if necessary. We can not add into pc; + it is not guaranteed to mode switch (different in ARMv6 and + ARMv7). */ +static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] = + { + ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */ + ARM_INSN(0xe08fc00c), /* add ip, pc, ip */ + ARM_INSN(0xe12fff1c), /* bx ip */ + DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ + }; + +/* V4T ARM -> ARM long branch stub, PIC. */ +static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] = + { + ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */ + ARM_INSN(0xe08fc00c), /* add ip, pc, ip */ + ARM_INSN(0xe12fff1c), /* bx ip */ + DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ + }; + +/* V4T Thumb -> ARM long branch stub, PIC. */ +static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] = + { + THUMB16_INSN(0x4778), /* bx pc */ + THUMB16_INSN(0x46c0), /* nop */ + ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */ + ARM_INSN(0xe08cf00f), /* add pc, ip, pc */ + DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */ + }; + +/* Thumb -> Thumb long branch stub, PIC. Used on M-profile + architectures. */ +static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] = + { + THUMB16_INSN(0xb401), /* push {r0} */ + THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */ + THUMB16_INSN(0x46fc), /* mov ip, pc */ + THUMB16_INSN(0x4484), /* add ip, r0 */ + THUMB16_INSN(0xbc01), /* pop {r0} */ + THUMB16_INSN(0x4760), /* bx ip */ + DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */ + }; + +/* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not + allowed. */ +static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] = + { + THUMB16_INSN(0x4778), /* bx pc */ + THUMB16_INSN(0x46c0), /* nop */ + ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */ + ARM_INSN(0xe08fc00c), /* add ip, pc, ip */ + ARM_INSN(0xe12fff1c), /* bx ip */ + DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */ + }; + +/* Cortex-A8 erratum-workaround stubs. */ + +/* Stub used for conditional branches (which may be beyond +/-1MB away, so we + can't use a conditional branch to reach this stub). */ + +static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] = + { + THUMB16_BCOND_INSN(0xd001), /* b.n true. */ + THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */ + THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */ + }; + +/* Stub used for b.w and bl.w instructions. */ + +static const insn_sequence elf32_arm_stub_a8_veneer_b[] = + { + THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */ + }; + +static const insn_sequence elf32_arm_stub_a8_veneer_bl[] = + { + THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */ + }; + +/* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w + instruction (which switches to ARM mode) to point to this stub. Jump to the + real destination using an ARM-mode branch. */ + +static const insn_sequence elf32_arm_stub_a8_veneer_blx[] = + { + ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */ + }; + /* Section name for stubs is the associated section name plus this string. */ #define STUB_SUFFIX ".stub" -enum elf32_arm_stub_type -{ +/* One entry per long/short branch stub defined above. */ +#define DEF_STUBS \ + DEF_STUB(long_branch_any_any) \ + DEF_STUB(long_branch_v4t_arm_thumb) \ + DEF_STUB(long_branch_thumb_only) \ + DEF_STUB(long_branch_v4t_thumb_thumb) \ + DEF_STUB(long_branch_v4t_thumb_arm) \ + DEF_STUB(short_branch_v4t_thumb_arm) \ + DEF_STUB(long_branch_any_arm_pic) \ + DEF_STUB(long_branch_any_thumb_pic) \ + DEF_STUB(long_branch_v4t_thumb_thumb_pic) \ + DEF_STUB(long_branch_v4t_arm_thumb_pic) \ + DEF_STUB(long_branch_v4t_thumb_arm_pic) \ + DEF_STUB(long_branch_thumb_only_pic) \ + DEF_STUB(a8_veneer_b_cond) \ + DEF_STUB(a8_veneer_b) \ + DEF_STUB(a8_veneer_bl) \ + DEF_STUB(a8_veneer_blx) + +#define DEF_STUB(x) arm_stub_##x, +enum elf32_arm_stub_type { arm_stub_none, - arm_stub_long_branch_any_any, - arm_stub_long_branch_v4t_arm_thumb, - arm_stub_long_branch_thumb_only, - arm_stub_long_branch_v4t_thumb_arm, - arm_stub_short_branch_v4t_thumb_arm, - arm_stub_long_branch_any_any_pic, + DEF_STUBS + /* Note the first a8_veneer type */ + arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond +}; +#undef DEF_STUB + +typedef struct +{ + const insn_sequence* template_sequence; + int template_size; +} stub_def; + +#define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)}, +static const stub_def stub_definitions[] = { + {NULL, 0}, + DEF_STUBS }; struct elf32_arm_stub_hash_entry @@ -2129,6 +2259,13 @@ struct elf32_arm_stub_hash_entry bfd_vma target_value; asection *target_section; + /* Offset to apply to relocation referencing target_value. */ + bfd_vma target_addend; + + /* The instruction which caused this stub to be generated (only valid for + Cortex-A8 erratum workaround stubs at present). */ + unsigned long orig_insn; + /* The stub type. */ enum elf32_arm_stub_type stub_type; /* Its encoding size in bytes. */ @@ -2196,20 +2333,86 @@ typedef struct elf32_vfp11_erratum_list } elf32_vfp11_erratum_list; +typedef enum +{ + DELETE_EXIDX_ENTRY, + INSERT_EXIDX_CANTUNWIND_AT_END +} +arm_unwind_edit_type; + +/* A (sorted) list of edits to apply to an unwind table. */ +typedef struct arm_unwind_table_edit +{ + arm_unwind_edit_type type; + /* Note: we sometimes want to insert an unwind entry corresponding to a + section different from the one we're currently writing out, so record the + (text) section this edit relates to here. */ + asection *linked_section; + unsigned int index; + struct arm_unwind_table_edit *next; +} +arm_unwind_table_edit; + typedef struct _arm_elf_section_data { + /* Information about mapping symbols. */ struct bfd_elf_section_data elf; unsigned int mapcount; unsigned int mapsize; elf32_arm_section_map *map; + /* Information about CPU errata. */ unsigned int erratumcount; elf32_vfp11_erratum_list *erratumlist; + /* Information about unwind tables. */ + union + { + /* Unwind info attached to a text section. */ + struct + { + asection *arm_exidx_sec; + } text; + + /* Unwind info attached to an .ARM.exidx section. */ + struct + { + arm_unwind_table_edit *unwind_edit_list; + arm_unwind_table_edit *unwind_edit_tail; + } exidx; + } u; } _arm_elf_section_data; #define elf32_arm_section_data(sec) \ ((_arm_elf_section_data *) elf_section_data (sec)) +/* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum. + These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs), + so may be created multiple times: we use an array of these entries whilst + relaxing which we can refresh easily, then create stubs for each potentially + erratum-triggering instruction once we've settled on a solution. */ + +struct a8_erratum_fix { + bfd *input_bfd; + asection *section; + bfd_vma offset; + bfd_vma addend; + unsigned long orig_insn; + char *stub_name; + enum elf32_arm_stub_type stub_type; +}; + +/* A table of relocs applied to branches which might trigger Cortex-A8 + erratum. */ + +struct a8_erratum_reloc { + bfd_vma from; + bfd_vma destination; + unsigned int r_type; + unsigned char st_type; + const char *sym_name; + bfd_boolean non_a8_stub; +}; + /* The size of the thread control block. */ #define TCB_SIZE 8 @@ -2318,6 +2521,17 @@ struct elf32_arm_link_hash_entry ((struct elf32_arm_stub_hash_entry *) \ bfd_hash_lookup ((table), (string), (create), (copy))) +/* Array to keep track of which stub sections have been created, and + information on stub grouping. */ +struct map_stub +{ + /* This is the section to which stubs in the group will be + attached. */ + asection *link_sec; + /* The stub section. */ + asection *stub_sec; +}; + /* ARM ELF linker hash table. */ struct elf32_arm_link_hash_table { @@ -2341,6 +2555,12 @@ struct elf32_arm_link_hash_table veneers. */ bfd_size_type vfp11_erratum_glue_size; + /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This + holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and + elf32_arm_write_section(). */ + struct a8_erratum_fix *a8_erratum_fixes; + unsigned int num_a8_erratum_fixes; + /* An arbitrary input BFD chosen to hold the glue sections. */ bfd * bfd_of_glue_owner; @@ -2359,6 +2579,9 @@ struct elf32_arm_link_hash_table 2 = Generate v4 interworing stubs. */ int fix_v4bx; + /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */ + int fix_cortex_a8; + /* Nonzero if the ARM/Thumb BLX instructions are available for use. */ int use_blx; @@ -2406,8 +2629,8 @@ struct elf32_arm_link_hash_table bfd_vma offset; } tls_ldm_got; - /* Small local sym to section mapping cache. */ - struct sym_sec_cache sym_sec; + /* Small local sym cache. */ + struct sym_cache sym_cache; /* For convenience in allocate_dynrelocs. */ bfd * obfd; @@ -2424,14 +2647,7 @@ struct elf32_arm_link_hash_table /* Array to keep track of which stub sections have been created, and information on stub grouping. */ - struct map_stub - { - /* This is the section to which stubs in the group will be - attached. */ - asection *link_sec; - /* The stub section. */ - asection *stub_sec; - } *stub_group; + struct map_stub *stub_group; /* Assorted information used by elf32_arm_size_stubs. */ unsigned int bfd_count; @@ -2452,7 +2668,8 @@ elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry, /* Allocate the structure if it has not already been allocated by a subclass. */ if (ret == NULL) - ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry)); + ret = (struct elf32_arm_link_hash_entry *) + bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry)); if (ret == NULL) return (struct bfd_hash_entry *) ret; @@ -2486,8 +2703,8 @@ stub_hash_newfunc (struct bfd_hash_entry *entry, subclass. */ if (entry == NULL) { - entry = bfd_hash_allocate (table, - sizeof (struct elf32_arm_stub_hash_entry)); + entry = (struct bfd_hash_entry *) + bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry)); if (entry == NULL) return entry; } @@ -2504,12 +2721,15 @@ stub_hash_newfunc (struct bfd_hash_entry *entry, eh->stub_offset = 0; eh->target_value = 0; eh->target_section = NULL; + eh->target_addend = 0; + eh->orig_insn = 0; eh->stub_type = arm_stub_none; eh->stub_size = 0; eh->stub_template = NULL; eh->stub_template_size = 0; eh->h = NULL; eh->id_sec = NULL; + eh->output_name = NULL; } return entry; @@ -2536,15 +2756,9 @@ create_got_section (bfd *dynobj, struct bfd_link_info *info) if (!htab->sgot || !htab->sgotplt) abort (); - htab->srelgot = bfd_make_section_with_flags (dynobj, - RELOC_SECTION (htab, ".got"), - (SEC_ALLOC | SEC_LOAD - | SEC_HAS_CONTENTS - | SEC_IN_MEMORY - | SEC_LINKER_CREATED - | SEC_READONLY)); - if (htab->srelgot == NULL - || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2)) + htab->srelgot = bfd_get_section_by_name (dynobj, + RELOC_SECTION (htab, ".got")); + if (htab->srelgot == NULL) return FALSE; return TRUE; } @@ -2671,7 +2885,7 @@ elf32_arm_link_hash_table_create (bfd *abfd) struct elf32_arm_link_hash_table *ret; bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table); - ret = bfd_malloc (amt); + ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt); if (ret == NULL) return NULL; @@ -2698,6 +2912,7 @@ elf32_arm_link_hash_table_create (bfd *abfd) ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE; ret->vfp11_erratum_glue_size = 0; ret->num_vfp11_fixes = 0; + ret->fix_cortex_a8 = 0; ret->bfd_of_glue_owner = NULL; ret->byteswap_code = 0; ret->target1_is_rel = 0; @@ -2714,7 +2929,7 @@ elf32_arm_link_hash_table_create (bfd *abfd) ret->vxworks_p = 0; ret->symbian_p = 0; ret->use_rel = 1; - ret->sym_sec.abfd = NULL; + ret->sym_cache.abfd = NULL; ret->obfd = abfd; ret->tls_ldm_got.refcount = 0; ret->stub_bfd = NULL; @@ -2756,7 +2971,7 @@ using_thumb_only (struct elf32_arm_link_hash_table *globals) Tag_CPU_arch); int profile; - if (arch != TAG_CPU_ARCH_V7) + if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M) return FALSE; profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, @@ -2775,6 +2990,28 @@ using_thumb2 (struct elf32_arm_link_hash_table *globals) return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7; } +/* Determine what kind of NOPs are available. */ + +static bfd_boolean +arch_has_arm_nop (struct elf32_arm_link_hash_table *globals) +{ + const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, + Tag_CPU_arch); + return arch == TAG_CPU_ARCH_V6T2 + || arch == TAG_CPU_ARCH_V6K + || arch == TAG_CPU_ARCH_V7 + || arch == TAG_CPU_ARCH_V7E_M; +} + +static bfd_boolean +arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals) +{ + const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, + Tag_CPU_arch); + return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7 + || arch == TAG_CPU_ARCH_V7E_M); +} + static bfd_boolean arm_stub_is_thumb (enum elf32_arm_stub_type stub_type) { @@ -2783,6 +3020,8 @@ arm_stub_is_thumb (enum elf32_arm_stub_type stub_type) case arm_stub_long_branch_thumb_only: case arm_stub_long_branch_v4t_thumb_arm: case arm_stub_short_branch_v4t_thumb_arm: + case arm_stub_long_branch_v4t_thumb_arm_pic: + case arm_stub_long_branch_thumb_only_pic: return TRUE; case arm_stub_none: BFD_FAIL (); @@ -2813,6 +3052,7 @@ arm_type_of_stub (struct bfd_link_info *info, int thumb2; int thumb_only; enum elf32_arm_stub_type stub_type = arm_stub_none; + int use_plt = 0; /* We don't know the actual type of destination in case it is of type STT_SECTION: give up. */ @@ -2834,20 +3074,40 @@ arm_type_of_stub (struct bfd_link_info *info, r_type = ELF32_R_TYPE (rel->r_info); - /* If the call will go through a PLT entry then we do not need - glue. */ + /* Keep a simpler condition, for the sake of clarity. */ if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1) - return stub_type; - - if (r_type == R_ARM_THM_CALL) { + use_plt = 1; + /* Note when dealing with PLT entries: the main PLT stub is in + ARM mode, so if the branch is in Thumb mode, another + Thumb->ARM stub will be inserted later just before the ARM + PLT stub. We don't take this extra distance into account + here, because if a long branch stub is needed, we'll add a + Thumb->Arm one and branch directly to the ARM PLT entry + because it avoids spreading offset corrections in several + places. */ + } + + if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) + { + /* Handle cases where: + - this call goes too far (different Thumb/Thumb2 max + distance) + - it's a Thumb->Arm call and blx is not available, or it's a + Thumb->Arm branch (not bl). A stub is needed in this case, + but only if this call is not through a PLT entry. Indeed, + PLT stubs handle mode switching already. + */ if ((!thumb2 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET))) || (thumb2 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) - || ((st_type != STT_ARM_TFUNC) && !globals->use_blx)) + || ((st_type != STT_ARM_TFUNC) + && (((r_type == R_ARM_THM_CALL) && !globals->use_blx) + || (r_type == R_ARM_THM_JUMP24)) + && !use_plt)) { if (st_type == STT_ARM_TFUNC) { @@ -2856,24 +3116,29 @@ arm_type_of_stub (struct bfd_link_info *info, { stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ - ? ((globals->use_blx) - /* V5T and above. */ - ? arm_stub_long_branch_any_any_pic - /* not yet supported on V4T. */ - : arm_stub_none) + ? ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) + /* V5T and above. Stub starts with ARM code, so + we must be able to switch mode before + reaching it, which is only possible for 'bl' + (ie R_ARM_THM_CALL relocation). */ + ? arm_stub_long_branch_any_thumb_pic + /* On V4T, use Thumb code only. */ + : arm_stub_long_branch_v4t_thumb_thumb_pic) /* non-PIC stubs. */ - : ((globals->use_blx) + : ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) /* V5T and above. */ ? arm_stub_long_branch_any_any /* V4T. */ - : arm_stub_long_branch_thumb_only); + : arm_stub_long_branch_v4t_thumb_thumb); } else { stub_type = (info->shared | globals->pic_veneer) - /* PIC stub not yet supported on V4T. */ - ? arm_stub_none + /* PIC stub. */ + ? arm_stub_long_branch_thumb_only_pic /* non-PIC stub. */ : arm_stub_long_branch_thumb_only; } @@ -2893,14 +3158,16 @@ arm_type_of_stub (struct bfd_link_info *info, stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ - ? ((globals->use_blx) + ? ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) /* V5T and above. */ - ? arm_stub_long_branch_any_any_pic - /* not yet supported on V4T. */ - : arm_stub_none) + ? arm_stub_long_branch_any_arm_pic + /* V4T PIC stub. */ + : arm_stub_long_branch_v4t_thumb_arm_pic) /* non-PIC stubs. */ - : ((globals->use_blx) + : ((globals->use_blx + && (r_type ==R_ARM_THM_CALL)) /* V5T and above. */ ? arm_stub_long_branch_any_any /* V4T. */ @@ -2914,7 +3181,7 @@ arm_type_of_stub (struct bfd_link_info *info, } } } - else if (r_type == R_ARM_CALL) + else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32) { if (st_type == STT_ARM_TFUNC) { @@ -2934,11 +3201,18 @@ arm_type_of_stub (struct bfd_link_info *info, the mode change (bit 24 (H) of BLX encoding). */ if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2) || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET) - || !globals->use_blx) + || ((r_type == R_ARM_CALL) && !globals->use_blx) + || (r_type == R_ARM_JUMP24) + || (r_type == R_ARM_PLT32)) { stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ - ? arm_stub_long_branch_any_any_pic + ? ((globals->use_blx) + /* V5T and above. */ + ? arm_stub_long_branch_any_thumb_pic + /* V4T stub. */ + : arm_stub_long_branch_v4t_arm_thumb_pic) + /* non-PIC stubs. */ : ((globals->use_blx) /* V5T and above. */ @@ -2955,7 +3229,7 @@ arm_type_of_stub (struct bfd_link_info *info, { stub_type = (info->shared | globals->pic_veneer) /* PIC stubs. */ - ? arm_stub_long_branch_any_any_pic + ? arm_stub_long_branch_any_arm_pic /* non-PIC stubs. */ : arm_stub_long_branch_any_any; } @@ -2979,7 +3253,7 @@ elf32_arm_stub_name (const asection *input_section, if (hash) { len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1; - stub_name = bfd_malloc (len); + stub_name = (char *) bfd_malloc (len); if (stub_name != NULL) sprintf (stub_name, "%08x_%s+%x", input_section->id & 0xffffffff, @@ -2989,7 +3263,7 @@ elf32_arm_stub_name (const asection *input_section, else { len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1; - stub_name = bfd_malloc (len); + stub_name = (char *) bfd_malloc (len); if (stub_name != NULL) sprintf (stub_name, "%08x_%x:%x+%x", input_section->id & 0xffffffff, @@ -3050,17 +3324,16 @@ elf32_arm_get_stub_entry (const asection *input_section, return stub_entry; } -/* Add a new stub entry to the stub hash. Not all fields of the new - stub entry are initialised. */ +/* Find or create a stub section. Returns a pointer to the stub section, and + the section to which the stub section will be attached (in *LINK_SEC_P). + LINK_SEC_P may be NULL. */ -static struct elf32_arm_stub_hash_entry * -elf32_arm_add_stub (const char *stub_name, - asection *section, - struct elf32_arm_link_hash_table *htab) +static asection * +elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section, + struct elf32_arm_link_hash_table *htab) { asection *link_sec; asection *stub_sec; - struct elf32_arm_stub_hash_entry *stub_entry; link_sec = htab->stub_group[section->id].link_sec; stub_sec = htab->stub_group[section->id].stub_sec; @@ -3075,7 +3348,7 @@ elf32_arm_add_stub (const char *stub_name, namelen = strlen (link_sec->name); len = namelen + sizeof (STUB_SUFFIX); - s_name = bfd_alloc (htab->stub_bfd, len); + s_name = (char *) bfd_alloc (htab->stub_bfd, len); if (s_name == NULL) return NULL; @@ -3088,6 +3361,28 @@ elf32_arm_add_stub (const char *stub_name, } htab->stub_group[section->id].stub_sec = stub_sec; } + + if (link_sec_p) + *link_sec_p = link_sec; + + return stub_sec; +} + +/* Add a new stub entry to the stub hash. Not all fields of the new + stub entry are initialised. */ + +static struct elf32_arm_stub_hash_entry * +elf32_arm_add_stub (const char *stub_name, + asection *section, + struct elf32_arm_link_hash_table *htab) +{ + asection *link_sec; + asection *stub_sec; + struct elf32_arm_stub_hash_entry *stub_entry; + + stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab); + if (stub_sec == NULL) + return NULL; /* Enter this entry into the linker stub hash table. */ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, @@ -3133,10 +3428,16 @@ put_thumb_insn (struct elf32_arm_link_hash_table * htab, bfd_putb16 (val, ptr); } +static bfd_reloc_status_type elf32_arm_final_link_relocate + (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *, + Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *, + const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **); + static bfd_boolean arm_build_one_stub (struct bfd_hash_entry *gen_entry, void * in_arg) { +#define MAXRELOCS 2 struct elf32_arm_stub_hash_entry *stub_entry; struct bfd_link_info *info; struct elf32_arm_link_hash_table *htab; @@ -3147,11 +3448,12 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, bfd_vma sym_value; int template_size; int size; - const insn_sequence *template; + const insn_sequence *template_sequence; int i; struct elf32_arm_link_hash_table * globals; - int stub_reloc_idx = -1; - int stub_reloc_offset; + int stub_reloc_idx[MAXRELOCS] = {-1, -1}; + int stub_reloc_offset[MAXRELOCS] = {0, 0}; + int nrelocs = 0; /* Massage our args to the form they really have. */ stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; @@ -3162,6 +3464,12 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, htab = elf32_arm_hash_table (info); stub_sec = stub_entry->stub_sec; + if ((htab->fix_cortex_a8 < 0) + != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm)) + /* We have to do the a8 fixes last, as they are less aligned than + the other veneers. */ + return TRUE; + /* Make a note of the offset within the stubs for this entry. */ stub_entry->stub_offset = stub_sec->size; loc = stub_sec->contents + stub_entry->stub_offset; @@ -3177,35 +3485,61 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, + stub_entry->target_section->output_offset + stub_entry->target_section->output_section->vma); - template = stub_entry->stub_template; + template_sequence = stub_entry->stub_template; template_size = stub_entry->stub_template_size; size = 0; for (i = 0; i < template_size; i++) { - switch(template[i].type) + switch (template_sequence[i].type) { case THUMB16_TYPE: - put_thumb_insn (globals, stub_bfd, template[i].data, loc + size); - size += 2; + { + bfd_vma data = (bfd_vma) template_sequence[i].data; + if (template_sequence[i].reloc_addend != 0) + { + /* We've borrowed the reloc_addend field to mean we should + insert a condition code into this (Thumb-1 branch) + instruction. See THUMB16_BCOND_INSN. */ + BFD_ASSERT ((data & 0xff00) == 0xd000); + data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8; + } + put_thumb_insn (globals, stub_bfd, data, loc + size); + size += 2; + } break; + case THUMB32_TYPE: + put_thumb_insn (globals, stub_bfd, + (template_sequence[i].data >> 16) & 0xffff, + loc + size); + put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff, + loc + size + 2); + if (template_sequence[i].r_type != R_ARM_NONE) + { + stub_reloc_idx[nrelocs] = i; + stub_reloc_offset[nrelocs++] = size; + } + size += 4; + break; + case ARM_TYPE: - put_arm_insn (globals, stub_bfd, template[i].data, loc + size); + put_arm_insn (globals, stub_bfd, template_sequence[i].data, + loc + size); /* Handle cases where the target is encoded within the instruction. */ - if (template[i].reloc_type == R_ARM_JUMP24) + if (template_sequence[i].r_type == R_ARM_JUMP24) { - stub_reloc_idx = i; - stub_reloc_offset = size; + stub_reloc_idx[nrelocs] = i; + stub_reloc_offset[nrelocs++] = size; } size += 4; break; case DATA_TYPE: - bfd_put_32 (stub_bfd, template[i].data, loc + size); - stub_reloc_idx = i; - stub_reloc_offset = size; + bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size); + stub_reloc_idx[nrelocs] = i; + stub_reloc_offset[nrelocs++] = size; size += 4; break; @@ -3225,81 +3559,88 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry, if (stub_entry->st_type == STT_ARM_TFUNC) sym_value |= 1; - /* Assume there is one and only one entry to relocate in each stub. */ - BFD_ASSERT (stub_reloc_idx != -1); + /* Assume there is at least one and at most MAXRELOCS entries to relocate + in each stub. */ + BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS); - _bfd_final_link_relocate (elf32_arm_howto_from_type (template[stub_reloc_idx].reloc_type), - stub_bfd, stub_sec, stub_sec->contents, - stub_entry->stub_offset + stub_reloc_offset, - sym_value, template[stub_reloc_idx].reloc_addend); + for (i = 0; i < nrelocs; i++) + if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24 + || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19 + || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL + || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22) + { + Elf_Internal_Rela rel; + bfd_boolean unresolved_reloc; + char *error_message; + int sym_flags + = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22) + ? STT_ARM_TFUNC : 0; + bfd_vma points_to = sym_value + stub_entry->target_addend; + + rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i]; + rel.r_info = ELF32_R_INFO (0, + template_sequence[stub_reloc_idx[i]].r_type); + rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend; + + if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0) + /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[] + template should refer back to the instruction after the original + branch. */ + points_to = sym_value; + + /* There may be unintended consequences if this is not true. */ + BFD_ASSERT (stub_entry->h == NULL); + + /* Note: _bfd_final_link_relocate doesn't handle these relocations + properly. We should probably use this function unconditionally, + rather than only for certain relocations listed in the enclosing + conditional, for the sake of consistency. */ + elf32_arm_final_link_relocate (elf32_arm_howto_from_type + (template_sequence[stub_reloc_idx[i]].r_type), + stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel, + points_to, info, stub_entry->target_section, "", sym_flags, + (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc, + &error_message); + } + else + { + _bfd_final_link_relocate (elf32_arm_howto_from_type + (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec, + stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i], + sym_value + stub_entry->target_addend, + template_sequence[stub_reloc_idx[i]].reloc_addend); + } return TRUE; +#undef MAXRELOCS } -/* As above, but don't actually build the stub. Just bump offset so - we know stub section sizes. */ +/* Calculate the template, template size and instruction size for a stub. + Return value is the instruction size. */ -static bfd_boolean -arm_size_one_stub (struct bfd_hash_entry *gen_entry, - void * in_arg) +static unsigned int +find_stub_size_and_template (enum elf32_arm_stub_type stub_type, + const insn_sequence **stub_template, + int *stub_template_size) { - struct elf32_arm_stub_hash_entry *stub_entry; - struct elf32_arm_link_hash_table *htab; - const insn_sequence *template; - int template_size; - int size; - int i; + const insn_sequence *template_sequence = NULL; + int template_size = 0, i; + unsigned int size; - /* Massage our args to the form they really have. */ - stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; - htab = (struct elf32_arm_link_hash_table *) in_arg; - - switch (stub_entry->stub_type) - { - case arm_stub_long_branch_any_any: - template = elf32_arm_stub_long_branch_any_any; - template_size = sizeof (elf32_arm_stub_long_branch_any_any) / sizeof (insn_sequence); - - break; - case arm_stub_long_branch_v4t_arm_thumb: - template = elf32_arm_stub_long_branch_v4t_arm_thumb; - template_size = sizeof (elf32_arm_stub_long_branch_v4t_arm_thumb) / sizeof (insn_sequence); - break; - case arm_stub_long_branch_thumb_only: - template = elf32_arm_stub_long_branch_thumb_only; - template_size = sizeof (elf32_arm_stub_long_branch_thumb_only) / sizeof (insn_sequence); - break; - case arm_stub_long_branch_v4t_thumb_arm: - template = elf32_arm_stub_long_branch_v4t_thumb_arm; - template_size = sizeof (elf32_arm_stub_long_branch_v4t_thumb_arm) / sizeof (insn_sequence); - break; - case arm_stub_short_branch_v4t_thumb_arm: - template = elf32_arm_stub_short_branch_v4t_thumb_arm; - template_size = sizeof (elf32_arm_stub_short_branch_v4t_thumb_arm) / sizeof (insn_sequence); - break; - case arm_stub_long_branch_any_any_pic: - template = elf32_arm_stub_long_branch_any_any_pic; - template_size = sizeof (elf32_arm_stub_long_branch_any_any_pic) / sizeof (insn_sequence); - break; - default: - BFD_FAIL (); - return FALSE; - break; - } + template_sequence = stub_definitions[stub_type].template_sequence; + template_size = stub_definitions[stub_type].template_size; size = 0; for (i = 0; i < template_size; i++) { - switch(template[i].type) + switch (template_sequence[i].type) { case THUMB16_TYPE: size += 2; break; case ARM_TYPE: - size += 4; - break; - + case THUMB32_TYPE: case DATA_TYPE: size += 4; break; @@ -3310,8 +3651,39 @@ arm_size_one_stub (struct bfd_hash_entry *gen_entry, } } + if (stub_template) + *stub_template = template_sequence; + + if (stub_template_size) + *stub_template_size = template_size; + + return size; +} + +/* As above, but don't actually build the stub. Just bump offset so + we know stub section sizes. */ + +static bfd_boolean +arm_size_one_stub (struct bfd_hash_entry *gen_entry, + void * in_arg) +{ + struct elf32_arm_stub_hash_entry *stub_entry; + struct elf32_arm_link_hash_table *htab; + const insn_sequence *template_sequence; + int template_size, size; + + /* Massage our args to the form they really have. */ + stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; + htab = (struct elf32_arm_link_hash_table *) in_arg; + + BFD_ASSERT((stub_entry->stub_type > arm_stub_none) + && stub_entry->stub_type < ARRAY_SIZE(stub_definitions)); + + size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence, + &template_size); + stub_entry->stub_size = size; - stub_entry->stub_template = template; + stub_entry->stub_template = template_sequence; stub_entry->stub_template_size = template_size; size = (size + 7) & ~7; @@ -3358,7 +3730,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd, htab->bfd_count = bfd_count; amt = sizeof (struct map_stub) * (top_id + 1); - htab->stub_group = bfd_zmalloc (amt); + htab->stub_group = (struct map_stub *) bfd_zmalloc (amt); if (htab->stub_group == NULL) return -1; @@ -3375,7 +3747,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd, htab->top_index = top_index; amt = sizeof (asection *) * (top_index + 1); - input_list = bfd_malloc (amt); + input_list = (asection **) bfd_malloc (amt); htab->input_list = input_list; if (input_list == NULL) return -1; @@ -3413,12 +3785,12 @@ elf32_arm_next_input_section (struct bfd_link_info *info, { asection **list = htab->input_list + isec->output_section->index; - if (*list != bfd_abs_section_ptr) + if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0) { /* Steal the link_sec pointer for our list. */ #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec) /* This happens to make the list in reverse order, - which is what we want. */ + which we reverse later. */ PREV_SEC (isec) = *list; *list = isec; } @@ -3427,7 +3799,7 @@ elf32_arm_next_input_section (struct bfd_link_info *info, /* See whether we can group stub sections together. Grouping stub sections may result in fewer stubs. More importantly, we need to - put all .init* and .fini* stubs at the beginning of the .init or + put all .init* and .fini* stubs at the end of the .init or .fini output sections respectively, because glibc splits the _init and _fini functions into multiple parts. Putting a stub in the middle of a function is not a good idea. */ @@ -3435,124 +3807,495 @@ elf32_arm_next_input_section (struct bfd_link_info *info, static void group_sections (struct elf32_arm_link_hash_table *htab, bfd_size_type stub_group_size, - bfd_boolean stubs_always_before_branch) + bfd_boolean stubs_always_after_branch) { - asection **list = htab->input_list + htab->top_index; + asection **list = htab->input_list; do { asection *tail = *list; + asection *head; if (tail == bfd_abs_section_ptr) continue; + /* Reverse the list: we must avoid placing stubs at the + beginning of the section because the beginning of the text + section may be required for an interrupt vector in bare metal + code. */ +#define NEXT_SEC PREV_SEC + head = NULL; while (tail != NULL) + { + /* Pop from tail. */ + asection *item = tail; + tail = PREV_SEC (item); + + /* Push on head. */ + NEXT_SEC (item) = head; + head = item; + } + + while (head != NULL) { asection *curr; - asection *prev; - bfd_size_type total; + asection *next; + bfd_vma stub_group_start = head->output_offset; + bfd_vma end_of_next; - curr = tail; - total = tail->size; - while ((prev = PREV_SEC (curr)) != NULL - && ((total += curr->output_offset - prev->output_offset) - < stub_group_size)) - curr = prev; + curr = head; + while (NEXT_SEC (curr) != NULL) + { + next = NEXT_SEC (curr); + end_of_next = next->output_offset + next->size; + if (end_of_next - stub_group_start >= stub_group_size) + /* End of NEXT is too far from start, so stop. */ + break; + /* Add NEXT to the group. */ + curr = next; + } - /* OK, the size from the start of CURR to the end is less + /* OK, the size from the start to the start of CURR is less than stub_group_size and thus can be handled by one stub - section. (Or the tail section is itself larger than + section. (Or the head section is itself larger than stub_group_size, in which case we may be toast.) We should really be keeping track of the total size of stubs added here, as stubs contribute to the final output section size. */ do { - prev = PREV_SEC (tail); + next = NEXT_SEC (head); /* Set up this stub group. */ - htab->stub_group[tail->id].link_sec = curr; + htab->stub_group[head->id].link_sec = curr; } - while (tail != curr && (tail = prev) != NULL); + while (head != curr && (head = next) != NULL); /* But wait, there's more! Input sections up to stub_group_size - bytes before the stub section can be handled by it too. */ - if (!stubs_always_before_branch) + bytes after the stub section can be handled by it too. */ + if (!stubs_always_after_branch) { - total = 0; - while (prev != NULL - && ((total += tail->output_offset - prev->output_offset) - < stub_group_size)) + stub_group_start = curr->output_offset + curr->size; + + while (next != NULL) { - tail = prev; - prev = PREV_SEC (tail); - htab->stub_group[tail->id].link_sec = curr; + end_of_next = next->output_offset + next->size; + if (end_of_next - stub_group_start >= stub_group_size) + /* End of NEXT is too far from stubs, so stop. */ + break; + /* Add NEXT to the stub group. */ + head = next; + next = NEXT_SEC (head); + htab->stub_group[head->id].link_sec = curr; } } - tail = prev; + head = next; } } - while (list-- != htab->input_list); + while (list++ != htab->input_list + htab->top_index); free (htab->input_list); #undef PREV_SEC +#undef NEXT_SEC } -/* Determine and set the size of the stub section for a final link. - - The basic idea here is to examine all the relocations looking for - PC-relative calls to a target that is unreachable with a "bl" - instruction. */ +/* Comparison function for sorting/searching relocations relating to Cortex-A8 + erratum fix. */ -bfd_boolean -elf32_arm_size_stubs (bfd *output_bfd, - bfd *stub_bfd, - struct bfd_link_info *info, - bfd_signed_vma group_size, - asection * (*add_stub_section) (const char *, asection *), - void (*layout_sections_again) (void)) +static int +a8_reloc_compare (const void *a, const void *b) { - bfd_size_type stub_group_size; - bfd_boolean stubs_always_before_branch; - bfd_boolean stub_changed = 0; - struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); - - /* Propagate mach to stub bfd, because it may not have been - finalized when we created stub_bfd. */ - bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), - bfd_get_mach (output_bfd)); + const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a; + const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b; - /* Stash our params away. */ - htab->stub_bfd = stub_bfd; - htab->add_stub_section = add_stub_section; - htab->layout_sections_again = layout_sections_again; - stubs_always_before_branch = group_size < 0; - if (group_size < 0) - stub_group_size = -group_size; + if (ra->from < rb->from) + return -1; + else if (ra->from > rb->from) + return 1; else - stub_group_size = group_size; - - if (stub_group_size == 1) - { - /* Default values. */ - /* Thumb branch range is +-4MB has to be used as the default - maximum size (a given section can contain both ARM and Thumb - code, so the worst case has to be taken into account). + return 0; +} - This value is 24K less than that, which allows for 2025 - 12-byte stubs. If we exceed that, then we will fail to link. - The user will have to relink with an explicit group size - option. */ - stub_group_size = 4170000; - } +static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *, + const char *, char **); - group_sections (htab, stub_group_size, stubs_always_before_branch); +/* Helper function to scan code for sequences which might trigger the Cortex-A8 + branch/TLB erratum. Fill in the table described by A8_FIXES_P, + NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false + otherwise. */ + +static bfd_boolean +cortex_a8_erratum_scan (bfd *input_bfd, + struct bfd_link_info *info, + struct a8_erratum_fix **a8_fixes_p, + unsigned int *num_a8_fixes_p, + unsigned int *a8_fix_table_size_p, + struct a8_erratum_reloc *a8_relocs, + unsigned int num_a8_relocs, + unsigned prev_num_a8_fixes, + bfd_boolean *stub_changed_p) +{ + asection *section; + struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); + struct a8_erratum_fix *a8_fixes = *a8_fixes_p; + unsigned int num_a8_fixes = *num_a8_fixes_p; + unsigned int a8_fix_table_size = *a8_fix_table_size_p; + + for (section = input_bfd->sections; + section != NULL; + section = section->next) + { + bfd_byte *contents = NULL; + struct _arm_elf_section_data *sec_data; + unsigned int span; + bfd_vma base_vma; + + if (elf_section_type (section) != SHT_PROGBITS + || (elf_section_flags (section) & SHF_EXECINSTR) == 0 + || (section->flags & SEC_EXCLUDE) != 0 + || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS) + || (section->output_section == bfd_abs_section_ptr)) + continue; + + base_vma = section->output_section->vma + section->output_offset; + + if (elf_section_data (section)->this_hdr.contents != NULL) + contents = elf_section_data (section)->this_hdr.contents; + else if (! bfd_malloc_and_get_section (input_bfd, section, &contents)) + return TRUE; + + sec_data = elf32_arm_section_data (section); + + for (span = 0; span < sec_data->mapcount; span++) + { + unsigned int span_start = sec_data->map[span].vma; + unsigned int span_end = (span == sec_data->mapcount - 1) + ? section->size : sec_data->map[span + 1].vma; + unsigned int i; + char span_type = sec_data->map[span].type; + bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE; + + if (span_type != 't') + continue; + + /* Span is entirely within a single 4KB region: skip scanning. */ + if (((base_vma + span_start) & ~0xfff) + == ((base_vma + span_end) & ~0xfff)) + continue; + + /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where: + + * The opcode is BLX.W, BL.W, B.W, Bcc.W + * The branch target is in the same 4KB region as the + first half of the branch. + * The instruction before the branch is a 32-bit + length non-branch instruction. */ + for (i = span_start; i < span_end;) + { + unsigned int insn = bfd_getl16 (&contents[i]); + bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE; + bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch; + + if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000) + insn_32bit = TRUE; + + if (insn_32bit) + { + /* Load the rest of the insn (in manual-friendly order). */ + insn = (insn << 16) | bfd_getl16 (&contents[i + 2]); + + /* Encoding T4: B.W. */ + is_b = (insn & 0xf800d000) == 0xf0009000; + /* Encoding T1: BL.W. */ + is_bl = (insn & 0xf800d000) == 0xf000d000; + /* Encoding T2: BLX.W. */ + is_blx = (insn & 0xf800d000) == 0xf000c000; + /* Encoding T3: B.W (not permitted in IT block). */ + is_bcc = (insn & 0xf800d000) == 0xf0008000 + && (insn & 0x07f00000) != 0x03800000; + } + + is_32bit_branch = is_b || is_bl || is_blx || is_bcc; + + if (((base_vma + i) & 0xfff) == 0xffe + && insn_32bit + && is_32bit_branch + && last_was_32bit + && ! last_was_branch) + { + bfd_signed_vma offset; + bfd_boolean force_target_arm = FALSE; + bfd_boolean force_target_thumb = FALSE; + bfd_vma target; + enum elf32_arm_stub_type stub_type = arm_stub_none; + struct a8_erratum_reloc key, *found; + + key.from = base_vma + i; + found = (struct a8_erratum_reloc *) + bsearch (&key, a8_relocs, num_a8_relocs, + sizeof (struct a8_erratum_reloc), + &a8_reloc_compare); + + if (found) + { + char *error_message = NULL; + struct elf_link_hash_entry *entry; + + /* We don't care about the error returned from this + function, only if there is glue or not. */ + entry = find_thumb_glue (info, found->sym_name, + &error_message); + + if (entry) + found->non_a8_stub = TRUE; + + if (found->r_type == R_ARM_THM_CALL + && found->st_type != STT_ARM_TFUNC) + force_target_arm = TRUE; + else if (found->r_type == R_ARM_THM_CALL + && found->st_type == STT_ARM_TFUNC) + force_target_thumb = TRUE; + } + + /* Check if we have an offending branch instruction. */ + + if (found && found->non_a8_stub) + /* We've already made a stub for this instruction, e.g. + it's a long branch or a Thumb->ARM stub. Assume that + stub will suffice to work around the A8 erratum (see + setting of always_after_branch above). */ + ; + else if (is_bcc) + { + offset = (insn & 0x7ff) << 1; + offset |= (insn & 0x3f0000) >> 4; + offset |= (insn & 0x2000) ? 0x40000 : 0; + offset |= (insn & 0x800) ? 0x80000 : 0; + offset |= (insn & 0x4000000) ? 0x100000 : 0; + if (offset & 0x100000) + offset |= ~ ((bfd_signed_vma) 0xfffff); + stub_type = arm_stub_a8_veneer_b_cond; + } + else if (is_b || is_bl || is_blx) + { + int s = (insn & 0x4000000) != 0; + int j1 = (insn & 0x2000) != 0; + int j2 = (insn & 0x800) != 0; + int i1 = !(j1 ^ s); + int i2 = !(j2 ^ s); + + offset = (insn & 0x7ff) << 1; + offset |= (insn & 0x3ff0000) >> 4; + offset |= i2 << 22; + offset |= i1 << 23; + offset |= s << 24; + if (offset & 0x1000000) + offset |= ~ ((bfd_signed_vma) 0xffffff); + + if (is_blx) + offset &= ~ ((bfd_signed_vma) 3); + + stub_type = is_blx ? arm_stub_a8_veneer_blx : + is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b; + } + + if (stub_type != arm_stub_none) + { + bfd_vma pc_for_insn = base_vma + i + 4; + + /* The original instruction is a BL, but the target is + an ARM instruction. If we were not making a stub, + the BL would have been converted to a BLX. Use the + BLX stub instead in that case. */ + if (htab->use_blx && force_target_arm + && stub_type == arm_stub_a8_veneer_bl) + { + stub_type = arm_stub_a8_veneer_blx; + is_blx = TRUE; + is_bl = FALSE; + } + /* Conversely, if the original instruction was + BLX but the target is Thumb mode, use the BL + stub. */ + else if (force_target_thumb + && stub_type == arm_stub_a8_veneer_blx) + { + stub_type = arm_stub_a8_veneer_bl; + is_blx = FALSE; + is_bl = TRUE; + } + + if (is_blx) + pc_for_insn &= ~ ((bfd_vma) 3); + + /* If we found a relocation, use the proper destination, + not the offset in the (unrelocated) instruction. + Note this is always done if we switched the stub type + above. */ + if (found) + offset = + (bfd_signed_vma) (found->destination - pc_for_insn); + + target = pc_for_insn + offset; + + /* The BLX stub is ARM-mode code. Adjust the offset to + take the different PC value (+8 instead of +4) into + account. */ + if (stub_type == arm_stub_a8_veneer_blx) + offset += 4; + + if (((base_vma + i) & ~0xfff) == (target & ~0xfff)) + { + char *stub_name = NULL; + + if (num_a8_fixes == a8_fix_table_size) + { + a8_fix_table_size *= 2; + a8_fixes = (struct a8_erratum_fix *) + bfd_realloc (a8_fixes, + sizeof (struct a8_erratum_fix) + * a8_fix_table_size); + } + + if (num_a8_fixes < prev_num_a8_fixes) + { + /* If we're doing a subsequent scan, + check if we've found the same fix as + before, and try and reuse the stub + name. */ + stub_name = a8_fixes[num_a8_fixes].stub_name; + if ((a8_fixes[num_a8_fixes].section != section) + || (a8_fixes[num_a8_fixes].offset != i)) + { + free (stub_name); + stub_name = NULL; + *stub_changed_p = TRUE; + } + } + + if (!stub_name) + { + stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1); + if (stub_name != NULL) + sprintf (stub_name, "%x:%x", section->id, i); + } + + a8_fixes[num_a8_fixes].input_bfd = input_bfd; + a8_fixes[num_a8_fixes].section = section; + a8_fixes[num_a8_fixes].offset = i; + a8_fixes[num_a8_fixes].addend = offset; + a8_fixes[num_a8_fixes].orig_insn = insn; + a8_fixes[num_a8_fixes].stub_name = stub_name; + a8_fixes[num_a8_fixes].stub_type = stub_type; + + num_a8_fixes++; + } + } + } + + i += insn_32bit ? 4 : 2; + last_was_32bit = insn_32bit; + last_was_branch = is_32bit_branch; + } + } + + if (elf_section_data (section)->this_hdr.contents == NULL) + free (contents); + } + + *a8_fixes_p = a8_fixes; + *num_a8_fixes_p = num_a8_fixes; + *a8_fix_table_size_p = a8_fix_table_size; + + return FALSE; +} + +/* Determine and set the size of the stub section for a final link. + + The basic idea here is to examine all the relocations looking for + PC-relative calls to a target that is unreachable with a "bl" + instruction. */ + +bfd_boolean +elf32_arm_size_stubs (bfd *output_bfd, + bfd *stub_bfd, + struct bfd_link_info *info, + bfd_signed_vma group_size, + asection * (*add_stub_section) (const char *, asection *), + void (*layout_sections_again) (void)) +{ + bfd_size_type stub_group_size; + bfd_boolean stubs_always_after_branch; + struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info); + struct a8_erratum_fix *a8_fixes = NULL; + unsigned int num_a8_fixes = 0, a8_fix_table_size = 10; + struct a8_erratum_reloc *a8_relocs = NULL; + unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i; + + if (htab->fix_cortex_a8) + { + a8_fixes = (struct a8_erratum_fix *) + bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size); + a8_relocs = (struct a8_erratum_reloc *) + bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size); + } + + /* Propagate mach to stub bfd, because it may not have been + finalized when we created stub_bfd. */ + bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd), + bfd_get_mach (output_bfd)); + + /* Stash our params away. */ + htab->stub_bfd = stub_bfd; + htab->add_stub_section = add_stub_section; + htab->layout_sections_again = layout_sections_again; + stubs_always_after_branch = group_size < 0; + + /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page + as the first half of a 32-bit branch straddling two 4K pages. This is a + crude way of enforcing that. */ + if (htab->fix_cortex_a8) + stubs_always_after_branch = 1; + + if (group_size < 0) + stub_group_size = -group_size; + else + stub_group_size = group_size; + + if (stub_group_size == 1) + { + /* Default values. */ + /* Thumb branch range is +-4MB has to be used as the default + maximum size (a given section can contain both ARM and Thumb + code, so the worst case has to be taken into account). + + This value is 24K less than that, which allows for 2025 + 12-byte stubs. If we exceed that, then we will fail to link. + The user will have to relink with an explicit group size + option. */ + stub_group_size = 4170000; + } + + group_sections (htab, stub_group_size, stubs_always_after_branch); + + /* If we're applying the cortex A8 fix, we need to determine the + program header size now, because we cannot change it later -- + that could alter section placements. Notice the A8 erratum fix + ends up requiring the section addresses to remain unchanged + modulo the page size. That's something we cannot represent + inside BFD, and we don't want to force the section alignment to + be the page size. */ + if (htab->fix_cortex_a8) + (*htab->layout_sections_again) (); while (1) { bfd *input_bfd; unsigned int bfd_indx; asection *stub_sec; + bfd_boolean stub_changed = FALSE; + unsigned prev_num_a8_fixes = num_a8_fixes; + num_a8_fixes = 0; for (input_bfd = info->input_bfds, bfd_indx = 0; input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++) @@ -3561,6 +4304,8 @@ elf32_arm_size_stubs (bfd *output_bfd, asection *section; Elf_Internal_Sym *local_syms = NULL; + num_a8_relocs = 0; + /* We'll need the symbol table in a second. */ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr; if (symtab_hdr->sh_info == 0) @@ -3609,6 +4354,7 @@ elf32_arm_size_stubs (bfd *output_bfd, char *stub_name; const asection *id_sec; unsigned char st_type; + bfd_boolean created_stub = FALSE; r_type = ELF32_R_TYPE (irela->r_info); r_indx = ELF32_R_SYM (irela->r_info); @@ -3622,9 +4368,14 @@ elf32_arm_size_stubs (bfd *output_bfd, goto error_ret_free_local; } - /* Only look for stubs on call instructions. */ + /* Only look for stubs on branch instructions. */ if ((r_type != (unsigned int) R_ARM_CALL) - && (r_type != (unsigned int) R_ARM_THM_CALL)) + && (r_type != (unsigned int) R_ARM_THM_CALL) + && (r_type != (unsigned int) R_ARM_JUMP24) + && (r_type != (unsigned int) R_ARM_THM_JUMP19) + && (r_type != (unsigned int) R_ARM_THM_XPC22) + && (r_type != (unsigned int) R_ARM_THM_JUMP24) + && (r_type != (unsigned int) R_ARM_PLT32)) continue; /* Now determine the call target, its name, value, @@ -3656,6 +4407,11 @@ elf32_arm_size_stubs (bfd *output_bfd, sym = local_syms + r_indx; hdr = elf_elfsections (input_bfd)[sym->st_shndx]; sym_sec = hdr->bfd_section; + if (!sym_sec) + /* This is an undefined symbol. It can never + be resolved. */ + continue; + if (ELF_ST_TYPE (sym->st_info) != STT_SECTION) sym_value = sym->st_value; destination = (sym_value + irela->r_addend @@ -3686,17 +4442,52 @@ elf32_arm_size_stubs (bfd *output_bfd, { sym_sec = hash->root.root.u.def.section; sym_value = hash->root.root.u.def.value; - if (sym_sec->output_section != NULL) + + struct elf32_arm_link_hash_table *globals = + elf32_arm_hash_table (info); + + /* For a destination in a shared library, + use the PLT stub as target address to + decide whether a branch stub is + needed. */ + if (globals->splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) -1) + { + sym_sec = globals->splt; + sym_value = hash->root.plt.offset; + if (sym_sec->output_section != NULL) + destination = (sym_value + + sym_sec->output_offset + + sym_sec->output_section->vma); + } + else if (sym_sec->output_section != NULL) destination = (sym_value + irela->r_addend + sym_sec->output_offset + sym_sec->output_section->vma); } - else if (hash->root.root.type == bfd_link_hash_undefweak - || hash->root.root.type == bfd_link_hash_undefined) - /* For a shared library, these will need a PLT stub, - which is treated separately. - For absolute code, they cannot be handled. */ - continue; + else if ((hash->root.root.type == bfd_link_hash_undefined) + || (hash->root.root.type == bfd_link_hash_undefweak)) + { + /* For a shared library, use the PLT stub as + target address to decide whether a long + branch stub is needed. + For absolute code, they cannot be handled. */ + struct elf32_arm_link_hash_table *globals = + elf32_arm_hash_table (info); + + if (globals->splt != NULL && hash != NULL + && hash->root.plt.offset != (bfd_vma) -1) + { + sym_sec = globals->splt; + sym_value = hash->root.plt.offset; + if (sym_sec->output_section != NULL) + destination = (sym_value + + sym_sec->output_offset + + sym_sec->output_section->vma); + } + else + continue; + } else { bfd_set_error (bfd_error_bad_value); @@ -3706,79 +4497,151 @@ elf32_arm_size_stubs (bfd *output_bfd, sym_name = hash->root.root.root.string; } - /* Determine what (if any) linker stub is needed. */ - stub_type = arm_type_of_stub (info, section, irela, st_type, - hash, destination, sym_sec, - input_bfd, sym_name); - if (stub_type == arm_stub_none) - continue; - - /* Support for grouping stub sections. */ - id_sec = htab->stub_group[section->id].link_sec; - - /* Get the name of this stub. */ - stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela); - if (!stub_name) - goto error_ret_free_internal; - - stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, - stub_name, - FALSE, FALSE); - if (stub_entry != NULL) - { - /* The proper stub has already been created. */ - free (stub_name); - continue; - } - - stub_entry = elf32_arm_add_stub (stub_name, section, htab); - if (stub_entry == NULL) - { - free (stub_name); - goto error_ret_free_internal; - } - - stub_entry->target_value = sym_value; - stub_entry->target_section = sym_sec; - stub_entry->stub_type = stub_type; - stub_entry->h = hash; - stub_entry->st_type = st_type; - - if (sym_name == NULL) - sym_name = "unnamed"; - stub_entry->output_name - = bfd_alloc (htab->stub_bfd, - sizeof (THUMB2ARM_GLUE_ENTRY_NAME) - + strlen (sym_name)); - if (stub_entry->output_name == NULL) + do { - free (stub_name); - goto error_ret_free_internal; - } + /* Determine what (if any) linker stub is needed. */ + stub_type = arm_type_of_stub (info, section, irela, + st_type, hash, + destination, sym_sec, + input_bfd, sym_name); + if (stub_type == arm_stub_none) + break; + + /* Support for grouping stub sections. */ + id_sec = htab->stub_group[section->id].link_sec; + + /* Get the name of this stub. */ + stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, + irela); + if (!stub_name) + goto error_ret_free_internal; + + /* We've either created a stub for this reloc already, + or we are about to. */ + created_stub = TRUE; + + stub_entry = arm_stub_hash_lookup + (&htab->stub_hash_table, stub_name, + FALSE, FALSE); + if (stub_entry != NULL) + { + /* The proper stub has already been created. */ + free (stub_name); + stub_entry->target_value = sym_value; + break; + } - /* For historical reasons, use the existing names for - ARM-to-Thumb and Thumb-to-ARM stubs. */ - if (r_type == (unsigned int) R_ARM_THM_CALL - && st_type != STT_ARM_TFUNC) - sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, - sym_name); - else if (r_type == (unsigned int) R_ARM_CALL - && st_type == STT_ARM_TFUNC) - sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, - sym_name); - else - sprintf (stub_entry->output_name, STUB_ENTRY_NAME, - sym_name); + stub_entry = elf32_arm_add_stub (stub_name, section, + htab); + if (stub_entry == NULL) + { + free (stub_name); + goto error_ret_free_internal; + } - stub_changed = TRUE; + stub_entry->target_value = sym_value; + stub_entry->target_section = sym_sec; + stub_entry->stub_type = stub_type; + stub_entry->h = hash; + stub_entry->st_type = st_type; + + if (sym_name == NULL) + sym_name = "unnamed"; + stub_entry->output_name = (char *) + bfd_alloc (htab->stub_bfd, + sizeof (THUMB2ARM_GLUE_ENTRY_NAME) + + strlen (sym_name)); + if (stub_entry->output_name == NULL) + { + free (stub_name); + goto error_ret_free_internal; + } + + /* For historical reasons, use the existing names for + ARM-to-Thumb and Thumb-to-ARM stubs. */ + if ( ((r_type == (unsigned int) R_ARM_THM_CALL) + || (r_type == (unsigned int) R_ARM_THM_JUMP24)) + && st_type != STT_ARM_TFUNC) + sprintf (stub_entry->output_name, + THUMB2ARM_GLUE_ENTRY_NAME, sym_name); + else if ( ((r_type == (unsigned int) R_ARM_CALL) + || (r_type == (unsigned int) R_ARM_JUMP24)) + && st_type == STT_ARM_TFUNC) + sprintf (stub_entry->output_name, + ARM2THUMB_GLUE_ENTRY_NAME, sym_name); + else + sprintf (stub_entry->output_name, STUB_ENTRY_NAME, + sym_name); + + stub_changed = TRUE; + } + while (0); + + /* Look for relocations which might trigger Cortex-A8 + erratum. */ + if (htab->fix_cortex_a8 + && (r_type == (unsigned int) R_ARM_THM_JUMP24 + || r_type == (unsigned int) R_ARM_THM_JUMP19 + || r_type == (unsigned int) R_ARM_THM_CALL + || r_type == (unsigned int) R_ARM_THM_XPC22)) + { + bfd_vma from = section->output_section->vma + + section->output_offset + + irela->r_offset; + + if ((from & 0xfff) == 0xffe) + { + /* Found a candidate. Note we haven't checked the + destination is within 4K here: if we do so (and + don't create an entry in a8_relocs) we can't tell + that a branch should have been relocated when + scanning later. */ + if (num_a8_relocs == a8_reloc_table_size) + { + a8_reloc_table_size *= 2; + a8_relocs = (struct a8_erratum_reloc *) + bfd_realloc (a8_relocs, + sizeof (struct a8_erratum_reloc) + * a8_reloc_table_size); + } + + a8_relocs[num_a8_relocs].from = from; + a8_relocs[num_a8_relocs].destination = destination; + a8_relocs[num_a8_relocs].r_type = r_type; + a8_relocs[num_a8_relocs].st_type = st_type; + a8_relocs[num_a8_relocs].sym_name = sym_name; + a8_relocs[num_a8_relocs].non_a8_stub = created_stub; + + num_a8_relocs++; + } + } } - /* We're done with the internal relocs, free them. */ - if (elf_section_data (section)->relocs == NULL) - free (internal_relocs); + /* We're done with the internal relocs, free them. */ + if (elf_section_data (section)->relocs == NULL) + free (internal_relocs); + } + + if (htab->fix_cortex_a8) + { + /* Sort relocs which might apply to Cortex-A8 erratum. */ + qsort (a8_relocs, num_a8_relocs, + sizeof (struct a8_erratum_reloc), + &a8_reloc_compare); + + /* Scan for branches which might trigger Cortex-A8 erratum. */ + if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes, + &num_a8_fixes, &a8_fix_table_size, + a8_relocs, num_a8_relocs, + prev_num_a8_fixes, &stub_changed) + != 0) + goto error_ret_free_local; } } + if (prev_num_a8_fixes != num_a8_fixes) + stub_changed = TRUE; + if (!stub_changed) break; @@ -3787,15 +4650,89 @@ elf32_arm_size_stubs (bfd *output_bfd, for (stub_sec = htab->stub_bfd->sections; stub_sec != NULL; stub_sec = stub_sec->next) - stub_sec->size = 0; + { + /* Ignore non-stub sections. */ + if (!strstr (stub_sec->name, STUB_SUFFIX)) + continue; + + stub_sec->size = 0; + } bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab); + /* Add Cortex-A8 erratum veneers to stub section sizes too. */ + if (htab->fix_cortex_a8) + for (i = 0; i < num_a8_fixes; i++) + { + stub_sec = elf32_arm_create_or_find_stub_sec (NULL, + a8_fixes[i].section, htab); + + if (stub_sec == NULL) + goto error_ret_free_local; + + stub_sec->size + += find_stub_size_and_template (a8_fixes[i].stub_type, NULL, + NULL); + } + + /* Ask the linker to do its stuff. */ (*htab->layout_sections_again) (); - stub_changed = FALSE; } + /* Add stubs for Cortex-A8 erratum fixes now. */ + if (htab->fix_cortex_a8) + { + for (i = 0; i < num_a8_fixes; i++) + { + struct elf32_arm_stub_hash_entry *stub_entry; + char *stub_name = a8_fixes[i].stub_name; + asection *section = a8_fixes[i].section; + unsigned int section_id = a8_fixes[i].section->id; + asection *link_sec = htab->stub_group[section_id].link_sec; + asection *stub_sec = htab->stub_group[section_id].stub_sec; + const insn_sequence *template_sequence; + int template_size, size = 0; + + stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, + TRUE, FALSE); + if (stub_entry == NULL) + { + (*_bfd_error_handler) (_("%s: cannot create stub entry %s"), + section->owner, + stub_name); + return FALSE; + } + + stub_entry->stub_sec = stub_sec; + stub_entry->stub_offset = 0; + stub_entry->id_sec = link_sec; + stub_entry->stub_type = a8_fixes[i].stub_type; + stub_entry->target_section = a8_fixes[i].section; + stub_entry->target_value = a8_fixes[i].offset; + stub_entry->target_addend = a8_fixes[i].addend; + stub_entry->orig_insn = a8_fixes[i].orig_insn; + stub_entry->st_type = STT_ARM_TFUNC; + + size = find_stub_size_and_template (a8_fixes[i].stub_type, + &template_sequence, + &template_size); + + stub_entry->stub_size = size; + stub_entry->stub_template = template_sequence; + stub_entry->stub_template_size = template_size; + } + + /* Stash the Cortex-A8 erratum fix array for use later in + elf32_arm_write_section(). */ + htab->a8_erratum_fixes = a8_fixes; + htab->num_a8_erratum_fixes = num_a8_fixes; + } + else + { + htab->a8_erratum_fixes = NULL; + htab->num_a8_erratum_fixes = 0; + } return TRUE; error_ret_free_local: @@ -3829,7 +4766,7 @@ elf32_arm_build_stubs (struct bfd_link_info *info) /* Allocate memory to hold the linker stubs. */ size = stub_sec->size; - stub_sec->contents = bfd_zalloc (htab->stub_bfd, size); + stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size); if (stub_sec->contents == NULL && size != 0) return FALSE; stub_sec->size = 0; @@ -3838,6 +4775,12 @@ elf32_arm_build_stubs (struct bfd_link_info *info) /* Build the stubs as directed by the stub hash table. */ table = &htab->stub_hash_table; bfd_hash_traverse (table, arm_build_one_stub, info); + if (htab->fix_cortex_a8) + { + /* Place the cortex a8 stubs last. */ + htab->fix_cortex_a8 = -1; + bfd_hash_traverse (table, arm_build_one_stub, info); + } return TRUE; } @@ -3856,8 +4799,8 @@ find_thumb_glue (struct bfd_link_info *link_info, /* We need a pointer to the armelf specific hash table. */ hash_table = elf32_arm_hash_table (link_info); - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) - + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) + + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); BFD_ASSERT (tmp_name); @@ -3890,8 +4833,8 @@ find_arm_glue (struct bfd_link_info *link_info, /* We need a pointer to the elfarm specific hash table. */ hash_table = elf32_arm_hash_table (link_info); - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) - + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) + + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); BFD_ASSERT (tmp_name); @@ -3985,14 +4928,23 @@ arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * na bfd_byte * contents; if (size == 0) - return; + { + /* Do not include empty glue sections in the output. */ + if (abfd != NULL) + { + s = bfd_get_section_by_name (abfd, name); + if (s != NULL) + s->flags |= SEC_EXCLUDE; + } + return; + } BFD_ASSERT (abfd != NULL); s = bfd_get_section_by_name (abfd, name); BFD_ASSERT (s != NULL); - contents = bfd_alloc (abfd, size); + contents = (bfd_byte *) bfd_alloc (abfd, size); BFD_ASSERT (s->size == size); s->contents = contents; @@ -4051,7 +5003,8 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info, BFD_ASSERT (s != NULL); - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name) + + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1); BFD_ASSERT (tmp_name); @@ -4097,86 +5050,6 @@ record_arm_to_thumb_glue (struct bfd_link_info * link_info, return myh; } -static void -record_thumb_to_arm_glue (struct bfd_link_info *link_info, - struct elf_link_hash_entry *h) -{ - const char *name = h->root.root.string; - asection *s; - char *tmp_name; - struct elf_link_hash_entry *myh; - struct bfd_link_hash_entry *bh; - struct elf32_arm_link_hash_table *hash_table; - bfd_vma val; - - hash_table = elf32_arm_hash_table (link_info); - - BFD_ASSERT (hash_table != NULL); - BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL); - - s = bfd_get_section_by_name - (hash_table->bfd_of_glue_owner, THUMB2ARM_GLUE_SECTION_NAME); - - BFD_ASSERT (s != NULL); - - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) - + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1); - - BFD_ASSERT (tmp_name); - - sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name); - - myh = elf_link_hash_lookup - (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE); - - if (myh != NULL) - { - /* We've already seen this guy. */ - free (tmp_name); - return; - } - - /* The only trick here is using hash_table->thumb_glue_size as the value. - Even though the section isn't allocated yet, this is where we will be - putting it. The +1 on the value marks that the stub has not been - output yet - not that it is a Thumb function. */ - bh = NULL; - val = hash_table->thumb_glue_size + 1; - _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, - tmp_name, BSF_GLOBAL, s, val, - NULL, TRUE, FALSE, &bh); - - /* If we mark it 'Thumb', the disassembler will do a better job. */ - myh = (struct elf_link_hash_entry *) bh; - myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC); - myh->forced_local = 1; - - free (tmp_name); - -#define CHANGE_TO_ARM "__%s_change_to_arm" -#define BACK_FROM_ARM "__%s_back_from_arm" - - /* Allocate another symbol to mark where we switch to Arm mode. */ - tmp_name = bfd_malloc ((bfd_size_type) strlen (name) - + strlen (CHANGE_TO_ARM) + 1); - - BFD_ASSERT (tmp_name); - - sprintf (tmp_name, CHANGE_TO_ARM, name); - - bh = NULL; - val = hash_table->thumb_glue_size + 4, - _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner, - tmp_name, BSF_LOCAL, s, val, - NULL, TRUE, FALSE, &bh); - - free (tmp_name); - - s->size += THUMB2ARM_GLUE_SIZE; - hash_table->thumb_glue_size += THUMB2ARM_GLUE_SIZE; -} - - /* Allocate space for ARMv4 BX veneers. */ static void @@ -4208,7 +5081,8 @@ record_arm_bx_glue (struct bfd_link_info * link_info, int reg) BFD_ASSERT (s != NULL); /* Add symbol for veneer. */ - tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1); + tmp_name = (char *) + bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1); BFD_ASSERT (tmp_name); @@ -4245,7 +5119,8 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma) if (sec_data->map == NULL) { - sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map)); + sec_data->map = (elf32_arm_section_map *) + bfd_malloc (sizeof (elf32_arm_section_map)); sec_data->mapcount = 0; sec_data->mapsize = 1; } @@ -4255,8 +5130,9 @@ elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma) if (sec_data->mapcount > sec_data->mapsize) { sec_data->mapsize *= 2; - sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize - * sizeof (elf32_arm_section_map)); + sec_data->map = (elf32_arm_section_map *) + bfd_realloc_or_free (sec_data->map, sec_data->mapsize + * sizeof (elf32_arm_section_map)); } if (sec_data->map) @@ -4299,8 +5175,8 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info, BFD_ASSERT (s != NULL); - tmp_name = bfd_malloc ((bfd_size_type) strlen - (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen + (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); BFD_ASSERT (tmp_name); @@ -4324,7 +5200,8 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info, /* Link veneer back to calling location. */ errcount = ++(sec_data->erratumcount); - newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); + newerr = (elf32_vfp11_erratum_list *) + bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); newerr->type = VFP11_ERRATUM_ARM_VENEER; newerr->vma = -1; @@ -4387,11 +5264,9 @@ record_vfp11_erratum_veneer (struct bfd_link_info *link_info, return val; } -/* Note: we do not include the flag SEC_LINKER_CREATED, as that - would prevent elf_link_input_bfd() from processing the contents - of the section. */ #define ARM_GLUE_SECTION_FLAGS \ - (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE | SEC_READONLY) + (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \ + | SEC_READONLY | SEC_LINKER_CREATED) /* Create a fake section for use by the ARM backend of the linker. */ @@ -4430,10 +5305,6 @@ bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd, if (info->relocatable) return TRUE; - /* Linker stubs don't need glue. */ - if (!strcmp (abfd->filename, "linker stubs")) - return TRUE; - return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME) && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME) @@ -4552,9 +5423,6 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, /* These are the only relocation types we care about. */ if ( r_type != R_ARM_PC24 - && r_type != R_ARM_PLT32 - && r_type != R_ARM_JUMP24 - && r_type != R_ARM_THM_JUMP24 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2)) continue; @@ -4606,26 +5474,13 @@ bfd_elf32_arm_process_before_allocation (bfd *abfd, switch (r_type) { case R_ARM_PC24: - case R_ARM_PLT32: - case R_ARM_JUMP24: /* This one is a call from arm code. We need to look up the target of the call. If it is a thumb target, we insert glue. */ - if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC - && !(r_type == R_ARM_CALL && globals->use_blx)) + if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC) record_arm_to_thumb_glue (link_info, h); break; - case R_ARM_THM_JUMP24: - /* This one is a call from thumb code. We look - up the target of the call. If it is not a thumb - target, we insert glue. */ - if (ELF_ST_TYPE (h->type) != STT_ARM_TFUNC - && !(globals->use_blx && r_type == R_ARM_THM_CALL) - && h->root.type != bfd_link_hash_undefweak) - record_thumb_to_arm_glue (link_info, h); - break; - default: abort (); } @@ -4706,6 +5561,28 @@ bfd_elf32_arm_init_maps (bfd *abfd) } +/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly + say what they wanted. */ + +void +bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd); + + if (globals->fix_cortex_a8 == -1) + { + /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */ + if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7 + && (out_attr[Tag_CPU_arch_profile].i == 'A' + || out_attr[Tag_CPU_arch_profile].i == 0)) + globals->fix_cortex_a8 = 1; + else + globals->fix_cortex_a8 = 0; + } +} + + void bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info) { @@ -4816,7 +5693,7 @@ static enum bfd_arm_vfp11_pipe bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, int *numregs) { - enum bfd_arm_vfp11_pipe pipe = VFP11_BAD; + enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD; bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0; if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */ @@ -4835,7 +5712,7 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, case 1: /* fnmac[sd]. */ case 2: /* fmsc[sd]. */ case 3: /* fnmsc[sd]. */ - pipe = VFP11_FMAC; + vpipe = VFP11_FMAC; bfd_arm_vfp11_write_mask (destmask, fd); regs[0] = fd; regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ @@ -4847,11 +5724,11 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, case 5: /* fnmul[sd]. */ case 6: /* fadd[sd]. */ case 7: /* fsub[sd]. */ - pipe = VFP11_FMAC; + vpipe = VFP11_FMAC; goto vfp_binop; case 8: /* fdiv[sd]. */ - pipe = VFP11_DS; + vpipe = VFP11_DS; vfp_binop: bfd_arm_vfp11_write_mask (destmask, fd); regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */ @@ -4881,14 +5758,14 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, case 27: /* ftosiz[sd]. */ /* These instructions will not bounce due to underflow. */ *numregs = 0; - pipe = VFP11_FMAC; + vpipe = VFP11_FMAC; break; case 3: /* fsqrt[sd]. */ /* fsqrt cannot underflow, but it can (perhaps) overwrite registers to cause the erratum in previous instructions. */ bfd_arm_vfp11_write_mask (destmask, fd); - pipe = VFP11_DS; + vpipe = VFP11_DS; break; case 15: /* fcvt{ds,sd}. */ @@ -4903,7 +5780,7 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, *numregs = rnum; - pipe = VFP11_FMAC; + vpipe = VFP11_FMAC; } break; @@ -4933,7 +5810,7 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, } } - pipe = VFP11_LS; + vpipe = VFP11_LS; } else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */ { @@ -4968,7 +5845,7 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, return VFP11_BAD; } - pipe = VFP11_LS; + vpipe = VFP11_LS; } /* Single-register transfer. Note L==0. */ else if ((insn & 0x0f100e10) == 0x0e000a10) @@ -4990,10 +5867,10 @@ bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs, break; } - pipe = VFP11_LS; + vpipe = VFP11_LS; } - return pipe; + return vpipe; } @@ -5113,17 +5990,17 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) | (contents[i + 1] << 8) | contents[i]; unsigned int writemask = 0; - enum bfd_arm_vfp11_pipe pipe; + enum bfd_arm_vfp11_pipe vpipe; switch (state) { case 0: - pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs, + vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs, &numregs); /* I'm assuming the VFP11 erratum can trigger with denorm operands on either the FMAC or the DS pipeline. This might lead to slightly overenthusiastic veneer insertion. */ - if (pipe == VFP11_FMAC || pipe == VFP11_DS) + if (vpipe == VFP11_FMAC || vpipe == VFP11_DS) { state = use_vector ? 1 : 2; first_fmac = i; @@ -5134,10 +6011,10 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) case 1: { int other_regs[3], other_numregs; - pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, + vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, other_regs, &other_numregs); - if (pipe != VFP11_BAD + if (vpipe != VFP11_BAD && bfd_arm_vfp11_antidependency (writemask, regs, numregs)) state = 3; @@ -5149,10 +6026,10 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) case 2: { int other_regs[3], other_numregs; - pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, + vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, other_regs, &other_numregs); - if (pipe != VFP11_BAD + if (vpipe != VFP11_BAD && bfd_arm_vfp11_antidependency (writemask, regs, numregs)) state = 3; @@ -5170,8 +6047,8 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info) if (state == 3) { - elf32_vfp11_erratum_list *newerr - = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); + elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *) + bfd_zmalloc (sizeof (elf32_vfp11_erratum_list)); int errcount; errcount = ++(elf32_arm_section_data (sec)->erratumcount); @@ -5239,8 +6116,8 @@ bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd, globals = elf32_arm_hash_table (link_info); - tmp_name = bfd_malloc ((bfd_size_type) strlen - (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); + tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen + (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10); for (sec = abfd->sections; sec != NULL; sec = sec->next) { @@ -5315,7 +6192,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, int use_blx, bfd_arm_vfp11_fix vfp11_fix, int no_enum_warn, int no_wchar_warn, - int pic_veneer) + int pic_veneer, int fix_cortex_a8) { struct elf32_arm_link_hash_table *globals; @@ -5337,6 +6214,7 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd, globals->use_blx |= use_blx; globals->vfp11_fix = vfp11_fix; globals->pic_veneer = pic_veneer; + globals->fix_cortex_a8 = fix_cortex_a8; BFD_ASSERT (is_arm_elf (output_bfd)); elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn; @@ -5967,9 +6845,13 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Handle relocations which should use the PLT entry. ABS32/REL32 will use the symbol's value, which may point to a PLT entry, but we don't need to handle that here. If we created a PLT entry, all - branches in this object should go to it. */ + branches in this object should go to it, except if the PLT is too + far away, in which case a long branch stub should be inserted. */ if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32 - && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI) + && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI + && r_type != R_ARM_CALL + && r_type != R_ARM_JUMP24 + && r_type != R_ARM_PLT32) && h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1) @@ -6122,15 +7004,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, case R_ARM_PC24: /* Arm B/BL instruction. */ case R_ARM_PLT32: { - bfd_vma from; bfd_signed_vma branch_offset; struct elf32_arm_stub_hash_entry *stub_entry = NULL; - from = (input_section->output_section->vma - + input_section->output_offset - + rel->r_offset); - branch_offset = (bfd_signed_vma)(value - from); - if (r_type == R_ARM_XPC25) { /* Check for Arm calling Arm function. */ @@ -6142,7 +7018,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, input_bfd, h ? h->root.root.string : "(local)"); } - else if (r_type != R_ARM_CALL) + else if (r_type == R_ARM_PC24) { /* Check for Arm calling Thumb function. */ if (sym_flags == STT_ARM_TFUNC) @@ -6160,11 +7036,37 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Check if a stub has to be inserted because the destination is too far or we are changing mode. */ - if (r_type == R_ARM_CALL) + if ( r_type == R_ARM_CALL + || r_type == R_ARM_JUMP24 + || r_type == R_ARM_PLT32) { - if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET - || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET - || sym_flags == STT_ARM_TFUNC) + bfd_vma from; + + /* If the call goes through a PLT entry, make sure to + check distance to the right destination address. */ + if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1) + { + value = (splt->output_section->vma + + splt->output_offset + + h->plt.offset); + *unresolved_reloc_p = FALSE; + /* The PLT entry is in ARM mode, regardless of the + target function. */ + sym_flags = STT_FUNC; + } + + from = (input_section->output_section->vma + + input_section->output_offset + + rel->r_offset); + branch_offset = (bfd_signed_vma)(value - from); + + if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET + || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET + || ((sym_flags == STT_ARM_TFUNC) + && (((r_type == R_ARM_CALL) && !globals->use_blx) + || (r_type == R_ARM_JUMP24) + || (r_type == R_ARM_PLT32) )) + ) { /* The target is out of reach, so redirect the branch to the local stub for this function. */ @@ -6211,11 +7113,20 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, signed_addend >>= howto->rightshift; /* A branch to an undefined weak symbol is turned into a jump to - the next instruction. */ - if (h && h->root.type == bfd_link_hash_undefweak) + the next instruction unless a PLT entry will be created. + Do the same for local undefined symbols. + The jump to the next instruction is optimized as a NOP depending + on the architecture. */ + if (h ? (h->root.type == bfd_link_hash_undefweak + && !(splt != NULL && h->plt.offset != (bfd_vma) -1)) + : bfd_is_und_section (sym_sec)) { - value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000) - | 0x0affffff; + value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000); + + if (arch_has_arm_nop (globals)) + value |= 0x0320f000; + else + value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */ } else { @@ -6229,16 +7140,17 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, value = (signed_addend & howto->dst_mask) | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask)); - /* Set the H bit in the BLX instruction. */ - if (sym_flags == STT_ARM_TFUNC) - { - if (addend) - value |= (1 << 24); - else - value &= ~(bfd_vma)(1 << 24); - } if (r_type == R_ARM_CALL) { + /* Set the H bit in the BLX instruction. */ + if (sym_flags == STT_ARM_TFUNC) + { + if (addend) + value |= (1 << 24); + else + value &= ~(bfd_vma)(1 << 24); + } + /* Select the correct instruction (BL or BLX). */ /* Only if we are not handling a BL to a stub. In this case, mode switching is performed by the stub. */ @@ -6374,6 +7286,40 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, return bfd_reloc_ok; } + case R_ARM_THM_PC8: + /* PR 10073: This reloc is not generated by the GNU toolchain, + but it is supported for compatibility with third party libraries + generated by other compilers, specifically the ARM/IAR. */ + { + bfd_vma insn; + bfd_signed_vma relocation; + + insn = bfd_get_16 (input_bfd, hit_data); + + if (globals->use_rel) + addend = (insn & 0x00ff) << 2; + + relocation = value + addend; + relocation -= (input_section->output_section->vma + + input_section->output_offset + + rel->r_offset); + + value = abs (relocation); + + /* We do not check for overflow of this reloc. Although strictly + speaking this is incorrect, it appears to be necessary in order + to work with IAR generated relocs. Since GCC and GAS do not + generate R_ARM_THM_PC8 relocs, the lack of a check should not be + a problem for them. */ + value &= 0x3fc; + + insn = (insn & 0xff00) | (value >> 2); + + bfd_put_16 (input_bfd, insn, hit_data); + + return bfd_reloc_ok; + } + case R_ARM_THM_PC12: /* Corresponds to: ldr.w reg, [pc, #offset]. */ { @@ -6425,15 +7371,25 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, bfd_vma check; bfd_signed_vma signed_check; int bitsize; - int thumb2 = using_thumb2 (globals); + const int thumb2 = using_thumb2 (globals); /* A branch to an undefined weak symbol is turned into a jump to - the next instruction unless a PLT entry will be created. */ + the next instruction unless a PLT entry will be created. + The jump to the next instruction is optimized as a NOP.W for + Thumb-2 enabled architectures. */ if (h && h->root.type == bfd_link_hash_undefweak && !(splt != NULL && h->plt.offset != (bfd_vma) -1)) { - bfd_put_16 (input_bfd, 0xe000, hit_data); - bfd_put_16 (input_bfd, 0xbf00, hit_data + 2); + if (arch_has_thumb2_nop (globals)) + { + bfd_put_16 (input_bfd, 0xf3af, hit_data); + bfd_put_16 (input_bfd, 0x8000, hit_data + 2); + } + else + { + bfd_put_16 (input_bfd, 0xe000, hit_data); + bfd_put_16 (input_bfd, 0xbf00, hit_data + 2); + } return bfd_reloc_ok; } @@ -6482,7 +7438,8 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* Convert BL to BLX. */ lower_insn = (lower_insn & ~0x1000) | 0x0800; } - else if (r_type != R_ARM_THM_CALL) + else if (( r_type != R_ARM_THM_CALL) + && (r_type != R_ARM_THM_JUMP24)) { if (elf32_thumb_to_arm_stub (info, sym_name, input_bfd, output_bfd, input_section, @@ -6512,14 +7469,18 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, /* If the Thumb BLX instruction is available, convert the BL to a BLX instruction to call the ARM-mode PLT entry. */ lower_insn = (lower_insn & ~0x1000) | 0x0800; + sym_flags = STT_FUNC; } else - /* Target the Thumb stub before the ARM PLT entry. */ - value -= PLT_THUMB_STUB_SIZE; + { + /* Target the Thumb stub before the ARM PLT entry. */ + value -= PLT_THUMB_STUB_SIZE; + sym_flags = STT_ARM_TFUNC; + } *unresolved_reloc_p = FALSE; } - if (r_type == R_ARM_THM_CALL) + if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24) { /* Check if a stub has to be inserted because the destination is too far. */ @@ -6539,7 +7500,9 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (thumb2 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET))) - || ((sym_flags != STT_ARM_TFUNC) && !globals->use_blx)) + || ((sym_flags != STT_ARM_TFUNC) + && (((r_type == R_ARM_THM_CALL) && !globals->use_blx) + || r_type == R_ARM_THM_JUMP24))) { /* The target is out of reach or we are changing modes, so redirect the branch to the local stub for this @@ -6553,7 +7516,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, + stub_entry->stub_sec->output_section->vma); /* If this call becomes a call to Arm, force BLX. */ - if (globals->use_blx) + if (globals->use_blx && (r_type == R_ARM_THM_CALL)) { if ((stub_entry && !arm_stub_is_thumb (stub_entry->stub_type)) @@ -7143,7 +8106,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto, (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"), input_bfd, input_section, (long) rel->r_offset, howto->name); - return FALSE; + return (bfd_reloc_status_type) FALSE; } else value = tpoff (info, value); @@ -7820,6 +8783,25 @@ elf32_arm_relocate_section (bfd * output_bfd, sym = local_syms + r_symndx; sym_type = ELF32_ST_TYPE (sym->st_info); sec = local_sections[r_symndx]; + + /* An object file might have a reference to a local + undefined symbol. This is a daft object file, but we + should at least do something about it. V4BX & NONE + relocations do not use the symbol and are explicitly + allowed to use the undefined symbol, so allow those. */ + if (r_type != R_ARM_V4BX + && r_type != R_ARM_NONE + && bfd_is_und_section (sec) + && ELF_ST_BIND (sym->st_info) != STB_WEAK) + { + if (!info->callbacks->undefined_symbol + (info, bfd_elf_string_from_elf_section + (input_bfd, symtab_hdr->sh_link, sym->st_name), + input_bfd, input_section, + rel->r_offset, TRUE)) + return FALSE; + } + if (globals->use_rel) { relocation = (sec->output_section->vma @@ -8062,6 +9044,307 @@ elf32_arm_relocate_section (bfd * output_bfd, return TRUE; } +/* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero, + adds the edit to the start of the list. (The list must be built in order of + ascending TINDEX: the function's callers are primarily responsible for + maintaining that condition). */ + +static void +add_unwind_table_edit (arm_unwind_table_edit **head, + arm_unwind_table_edit **tail, + arm_unwind_edit_type type, + asection *linked_section, + unsigned int tindex) +{ + arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *) + xmalloc (sizeof (arm_unwind_table_edit)); + + new_edit->type = type; + new_edit->linked_section = linked_section; + new_edit->index = tindex; + + if (tindex > 0) + { + new_edit->next = NULL; + + if (*tail) + (*tail)->next = new_edit; + + (*tail) = new_edit; + + if (!*head) + (*head) = new_edit; + } + else + { + new_edit->next = *head; + + if (!*tail) + *tail = new_edit; + + *head = new_edit; + } +} + +static _arm_elf_section_data *get_arm_elf_section_data (asection *); + +/* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */ +static void +adjust_exidx_size(asection *exidx_sec, int adjust) +{ + asection *out_sec; + + if (!exidx_sec->rawsize) + exidx_sec->rawsize = exidx_sec->size; + + bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust); + out_sec = exidx_sec->output_section; + /* Adjust size of output section. */ + bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust); +} + +/* Insert an EXIDX_CANTUNWIND marker at the end of a section. */ +static void +insert_cantunwind_after(asection *text_sec, asection *exidx_sec) +{ + struct _arm_elf_section_data *exidx_arm_data; + + exidx_arm_data = get_arm_elf_section_data (exidx_sec); + add_unwind_table_edit ( + &exidx_arm_data->u.exidx.unwind_edit_list, + &exidx_arm_data->u.exidx.unwind_edit_tail, + INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX); + + adjust_exidx_size(exidx_sec, 8); +} + +/* Scan .ARM.exidx tables, and create a list describing edits which should be + made to those tables, such that: + + 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries. + 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind + codes which have been inlined into the index). + + The edits are applied when the tables are written + (in elf32_arm_write_section). +*/ + +bfd_boolean +elf32_arm_fix_exidx_coverage (asection **text_section_order, + unsigned int num_text_sections, + struct bfd_link_info *info) +{ + bfd *inp; + unsigned int last_second_word = 0, i; + asection *last_exidx_sec = NULL; + asection *last_text_sec = NULL; + int last_unwind_type = -1; + + /* Walk over all EXIDX sections, and create backlinks from the corrsponding + text sections. */ + for (inp = info->input_bfds; inp != NULL; inp = inp->link_next) + { + asection *sec; + + for (sec = inp->sections; sec != NULL; sec = sec->next) + { + struct bfd_elf_section_data *elf_sec = elf_section_data (sec); + Elf_Internal_Shdr *hdr = &elf_sec->this_hdr; + + if (!hdr || hdr->sh_type != SHT_ARM_EXIDX) + continue; + + if (elf_sec->linked_to) + { + Elf_Internal_Shdr *linked_hdr + = &elf_section_data (elf_sec->linked_to)->this_hdr; + struct _arm_elf_section_data *linked_sec_arm_data + = get_arm_elf_section_data (linked_hdr->bfd_section); + + if (linked_sec_arm_data == NULL) + continue; + + /* Link this .ARM.exidx section back from the text section it + describes. */ + linked_sec_arm_data->u.text.arm_exidx_sec = sec; + } + } + } + + /* Walk all text sections in order of increasing VMA. Eilminate duplicate + index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes), + and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */ + + for (i = 0; i < num_text_sections; i++) + { + asection *sec = text_section_order[i]; + asection *exidx_sec; + struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec); + struct _arm_elf_section_data *exidx_arm_data; + bfd_byte *contents = NULL; + int deleted_exidx_bytes = 0; + bfd_vma j; + arm_unwind_table_edit *unwind_edit_head = NULL; + arm_unwind_table_edit *unwind_edit_tail = NULL; + Elf_Internal_Shdr *hdr; + bfd *ibfd; + + if (arm_data == NULL) + continue; + + exidx_sec = arm_data->u.text.arm_exidx_sec; + if (exidx_sec == NULL) + { + /* Section has no unwind data. */ + if (last_unwind_type == 0 || !last_exidx_sec) + continue; + + /* Ignore zero sized sections. */ + if (sec->size == 0) + continue; + + insert_cantunwind_after(last_text_sec, last_exidx_sec); + last_unwind_type = 0; + continue; + } + + /* Skip /DISCARD/ sections. */ + if (bfd_is_abs_section (exidx_sec->output_section)) + continue; + + hdr = &elf_section_data (exidx_sec)->this_hdr; + if (hdr->sh_type != SHT_ARM_EXIDX) + continue; + + exidx_arm_data = get_arm_elf_section_data (exidx_sec); + if (exidx_arm_data == NULL) + continue; + + ibfd = exidx_sec->owner; + + if (hdr->contents != NULL) + contents = hdr->contents; + else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents)) + /* An error? */ + continue; + + for (j = 0; j < hdr->sh_size; j += 8) + { + unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4); + int unwind_type; + int elide = 0; + + /* An EXIDX_CANTUNWIND entry. */ + if (second_word == 1) + { + if (last_unwind_type == 0) + elide = 1; + unwind_type = 0; + } + /* Inlined unwinding data. Merge if equal to previous. */ + else if ((second_word & 0x80000000) != 0) + { + if (last_second_word == second_word && last_unwind_type == 1) + elide = 1; + unwind_type = 1; + last_second_word = second_word; + } + /* Normal table entry. In theory we could merge these too, + but duplicate entries are likely to be much less common. */ + else + unwind_type = 2; + + if (elide) + { + add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail, + DELETE_EXIDX_ENTRY, NULL, j / 8); + + deleted_exidx_bytes += 8; + } + + last_unwind_type = unwind_type; + } + + /* Free contents if we allocated it ourselves. */ + if (contents != hdr->contents) + free (contents); + + /* Record edits to be applied later (in elf32_arm_write_section). */ + exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head; + exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail; + + if (deleted_exidx_bytes > 0) + adjust_exidx_size(exidx_sec, -deleted_exidx_bytes); + + last_exidx_sec = exidx_sec; + last_text_sec = sec; + } + + /* Add terminating CANTUNWIND entry. */ + if (last_exidx_sec && last_unwind_type != 0) + insert_cantunwind_after(last_text_sec, last_exidx_sec); + + return TRUE; +} + +static bfd_boolean +elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd, + bfd *ibfd, const char *name) +{ + asection *sec, *osec; + + sec = bfd_get_section_by_name (ibfd, name); + if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0) + return TRUE; + + osec = sec->output_section; + if (elf32_arm_write_section (obfd, info, sec, sec->contents)) + return TRUE; + + if (! bfd_set_section_contents (obfd, osec, sec->contents, + sec->output_offset, sec->size)) + return FALSE; + + return TRUE; +} + +static bfd_boolean +elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info) +{ + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info); + + /* Invoke the regular ELF backend linker to do all the work. */ + if (!bfd_elf_final_link (abfd, info)) + return FALSE; + + /* Write out any glue sections now that we have created all the + stubs. */ + if (globals->bfd_of_glue_owner != NULL) + { + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + ARM2THUMB_GLUE_SECTION_NAME)) + return FALSE; + + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + THUMB2ARM_GLUE_SECTION_NAME)) + return FALSE; + + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + VFP11_ERRATUM_VENEER_SECTION_NAME)) + return FALSE; + + if (! elf32_arm_output_glue_section (info, abfd, + globals->bfd_of_glue_owner, + ARM_BX_GLUE_SECTION_NAME)) + return FALSE; + } + + return TRUE; +} + /* Set the right machine number. */ static bfd_boolean @@ -8270,7 +9553,7 @@ set_secondary_compatible_arch (bfd *abfd, int arch) /* Note: the tag and its argument below are uleb128 values, though currently-defined values fit in one byte for each. */ if (!attr->s) - attr->s = bfd_alloc (abfd, 3); + attr->s = (char *) bfd_alloc (abfd, 3); attr->s[0] = Tag_CPU_arch; attr->s[1] = arch; attr->s[2] = '\0'; @@ -8355,6 +9638,23 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, T(V6S_M), /* V6_M. */ T(V6S_M) /* V6S_M. */ }; + const int v7e_m[] = + { + -1, /* PRE_V4. */ + -1, /* V4. */ + T(V7E_M), /* V4T. */ + T(V7E_M), /* V5T. */ + T(V7E_M), /* V5TE. */ + T(V7E_M), /* V5TEJ. */ + T(V7E_M), /* V6. */ + T(V7E_M), /* V6KZ. */ + T(V7E_M), /* V6T2. */ + T(V7E_M), /* V6K. */ + T(V7E_M), /* V7. */ + T(V7E_M), /* V6_M. */ + T(V7E_M), /* V6S_M. */ + T(V7E_M) /* V7E_M. */ + }; const int v4t_plus_v6_m[] = { -1, /* PRE_V4. */ @@ -8370,6 +9670,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, T(V7), /* V7. */ T(V6_M), /* V6_M. */ T(V6S_M), /* V6S_M. */ + T(V7E_M), /* V7E_M. */ T(V4T_PLUS_V6_M) /* V4T plus V6_M. */ }; const int *comb[] = @@ -8379,15 +9680,16 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, v7, v6_m, v6s_m, + v7e_m, /* Pseudo-architecture. */ v4t_plus_v6_m }; /* Check we've not got a higher architecture than we know about. */ - if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH) + if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH) { - _bfd_error_handler (_("ERROR: %B: Unknown CPU architecture"), ibfd); + _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd); return -1; } @@ -8425,7 +9727,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out, if (result == -1) { - _bfd_error_handler (_("ERROR: %B: Conflicting CPU architectures %d/%d"), + _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"), ibfd, oldtag, newtag); return -1; } @@ -8448,11 +9750,15 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) /* Some tags have 0 = don't care, 1 = strong requirement, 2 = weak requirement. */ static const int order_021[3] = {0, 2, 1}; - /* For use with Tag_VFP_arch. */ - static const int order_01243[5] = {0, 1, 2, 4, 3}; int i; bfd_boolean result = TRUE; + /* Skip the linker stubs file. This preserves previous behavior + of accepting unknown attributes in the first input file - but + is that a bug? */ + if (ibfd->flags & BFD_LINKER_CREATED) + return TRUE; + if (!elf_known_obj_attributes_proc (obfd)[0].i) { /* This is the first object. Copy the attributes. */ @@ -8476,7 +9782,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) else if (in_attr[Tag_ABI_FP_number_model].i != 0) { _bfd_error_handler - (_("ERROR: %B uses VFP register arguments, %B does not"), + (_("error: %B uses VFP register arguments, %B does not"), ibfd, obfd); result = FALSE; } @@ -8592,7 +9898,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) binaries in the toolchain have had the attributes set properly. _bfd_error_handler - (_("ERROR: %B: 8-byte data alignment conflicts with %B"), + (_("error: %B: 8-byte data alignment conflicts with %B"), obfd, ibfd); result = FALSE; */ } @@ -8626,7 +9932,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) else { _bfd_error_handler - (_("ERROR: %B: Conflicting architecture profiles %c/%c"), + (_("error: %B: Conflicting architecture profiles %c/%c"), ibfd, in_attr[i].i ? in_attr[i].i : '0', out_attr[i].i ? out_attr[i].i : '0'); @@ -8635,12 +9941,50 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) } break; case Tag_VFP_arch: - /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the - largest value if greater than 4 (for future-proofing). */ - if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i) - || (in_attr[i].i <= 4 && out_attr[i].i <= 4 - && order_01243[in_attr[i].i] > order_01243[out_attr[i].i])) - out_attr[i].i = in_attr[i].i; + { + static const struct + { + int ver; + int regs; + } vfp_versions[7] = + { + {0, 0}, + {1, 16}, + {2, 16}, + {3, 32}, + {3, 16}, + {4, 32}, + {4, 16} + }; + int ver; + int regs; + int newval; + + /* Values greater than 6 aren't defined, so just pick the + biggest */ + if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i) + { + out_attr[i] = in_attr[i]; + break; + } + /* The output uses the superset of input features + (ISA version) and registers. */ + ver = vfp_versions[in_attr[i].i].ver; + if (ver < vfp_versions[out_attr[i].i].ver) + ver = vfp_versions[out_attr[i].i].ver; + regs = vfp_versions[in_attr[i].i].regs; + if (regs < vfp_versions[out_attr[i].i].regs) + regs = vfp_versions[out_attr[i].i].regs; + /* This assumes all possible supersets are also a valid + options. */ + for (newval = 6; newval > 0; newval--) + { + if (regs == vfp_versions[newval].regs + && ver == vfp_versions[newval].ver) + break; + } + out_attr[i].i = newval; + } break; case Tag_PCS_config: if (out_attr[i].i == 0) @@ -8659,7 +10003,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) && in_attr[i].i != AEABI_R9_unused) { _bfd_error_handler - (_("ERROR: %B: Conflicting use of R9"), ibfd); + (_("error: %B: Conflicting use of R9"), ibfd); result = FALSE; } if (out_attr[i].i == AEABI_R9_unused) @@ -8671,7 +10015,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused) { _bfd_error_handler - (_("ERROR: %B: SB relative addressing conflicts with use of R9"), + (_("error: %B: SB relative addressing conflicts with use of R9"), ibfd); result = FALSE; } @@ -8727,7 +10071,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) if (in_attr[i].i != out_attr[i].i) { _bfd_error_handler - (_("ERROR: %B uses iWMMXt register arguments, %B does not"), + (_("error: %B uses iWMMXt register arguments, %B does not"), ibfd, obfd); result = FALSE; } @@ -8749,7 +10093,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd) if (in_attr[i].i != out_attr[i].i) { _bfd_error_handler - (_("ERROR: fp16 format mismatch between %B and %B"), + (_("error: fp16 format mismatch between %B and %B"), ibfd, obfd); result = FALSE; } @@ -8920,266 +10264,48 @@ elf32_arm_versions_compatible (unsigned iver, unsigned over) object file when linking. */ static bfd_boolean -elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd) -{ - flagword out_flags; - flagword in_flags; - bfd_boolean flags_compatible = TRUE; - asection *sec; +elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd); - /* Check if we have the same endianess. */ - if (! _bfd_generic_verify_endian_match (ibfd, obfd)) - return FALSE; +/* Display the flags field. */ - if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) - return TRUE; +static bfd_boolean +elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) +{ + FILE * file = (FILE *) ptr; + unsigned long flags; - if (!elf32_arm_merge_eabi_attributes (ibfd, obfd)) - return FALSE; + BFD_ASSERT (abfd != NULL && ptr != NULL); - /* The input BFD must have had its flags initialised. */ - /* The following seems bogus to me -- The flags are initialized in - the assembler but I don't think an elf_flags_init field is - written into the object. */ - /* BFD_ASSERT (elf_flags_init (ibfd)); */ + /* Print normal ELF private data. */ + _bfd_elf_print_private_bfd_data (abfd, ptr); - in_flags = elf_elfheader (ibfd)->e_flags; - out_flags = elf_elfheader (obfd)->e_flags; + flags = elf_elfheader (abfd)->e_flags; + /* Ignore init flag - it may not be set, despite the flags field + containing valid data. */ - /* In theory there is no reason why we couldn't handle this. However - in practice it isn't even close to working and there is no real - reason to want it. */ - if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4 - && !(ibfd->flags & DYNAMIC) - && (in_flags & EF_ARM_BE8)) - { - _bfd_error_handler (_("ERROR: %B is already in final BE8 format"), - ibfd); - return FALSE; - } + /* xgettext:c-format */ + fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); - if (!elf_flags_init (obfd)) + switch (EF_ARM_EABI_VERSION (flags)) { - /* If the input is the default architecture and had the default - flags then do not bother setting the flags for the output - architecture, instead allow future merges to do this. If no - future merges ever set these flags then they will retain their - uninitialised values, which surprise surprise, correspond - to the default values. */ - if (bfd_get_arch_info (ibfd)->the_default - && elf_elfheader (ibfd)->e_flags == 0) - return TRUE; + case EF_ARM_EABI_UNKNOWN: + /* The following flag bits are GNU extensions and not part of the + official ARM ELF extended ABI. Hence they are only decoded if + the EABI version is not set. */ + if (flags & EF_ARM_INTERWORK) + fprintf (file, _(" [interworking enabled]")); - elf_flags_init (obfd) = TRUE; - elf_elfheader (obfd)->e_flags = in_flags; + if (flags & EF_ARM_APCS_26) + fprintf (file, " [APCS-26]"); + else + fprintf (file, " [APCS-32]"); - if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) - && bfd_get_arch_info (obfd)->the_default) - return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd)); - - return TRUE; - } - - /* Determine what should happen if the input ARM architecture - does not match the output ARM architecture. */ - if (! bfd_arm_merge_machines (ibfd, obfd)) - return FALSE; - - /* Identical flags must be compatible. */ - if (in_flags == out_flags) - return TRUE; - - /* Check to see if the input BFD actually contains any sections. If - not, its flags may not have been initialised either, but it - cannot actually cause any incompatiblity. Do not short-circuit - dynamic objects; their section list may be emptied by - elf_link_add_object_symbols. - - Also check to see if there are no code sections in the input. - In this case there is no need to check for code specific flags. - XXX - do we need to worry about floating-point format compatability - in data sections ? */ - if (!(ibfd->flags & DYNAMIC)) - { - bfd_boolean null_input_bfd = TRUE; - bfd_boolean only_data_sections = TRUE; - - for (sec = ibfd->sections; sec != NULL; sec = sec->next) - { - /* Ignore synthetic glue sections. */ - if (strcmp (sec->name, ".glue_7") - && strcmp (sec->name, ".glue_7t")) - { - if ((bfd_get_section_flags (ibfd, sec) - & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) - == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) - only_data_sections = FALSE; - - null_input_bfd = FALSE; - break; - } - } - - if (null_input_bfd || only_data_sections) - return TRUE; - } - - /* Complain about various flag mismatches. */ - if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags), - EF_ARM_EABI_VERSION (out_flags))) - { - _bfd_error_handler - (_("ERROR: Source object %B has EABI version %d, but target %B has EABI version %d"), - ibfd, obfd, - (in_flags & EF_ARM_EABIMASK) >> 24, - (out_flags & EF_ARM_EABIMASK) >> 24); - return FALSE; - } - - /* Not sure what needs to be checked for EABI versions >= 1. */ - /* VxWorks libraries do not use these flags. */ - if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed - && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed - && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN) - { - if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) - { - _bfd_error_handler - (_("ERROR: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"), - ibfd, obfd, - in_flags & EF_ARM_APCS_26 ? 26 : 32, - out_flags & EF_ARM_APCS_26 ? 26 : 32); - flags_compatible = FALSE; - } - - if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) - { - if (in_flags & EF_ARM_APCS_FLOAT) - _bfd_error_handler - (_("ERROR: %B passes floats in float registers, whereas %B passes them in integer registers"), - ibfd, obfd); - else - _bfd_error_handler - (_("ERROR: %B passes floats in integer registers, whereas %B passes them in float registers"), - ibfd, obfd); - - flags_compatible = FALSE; - } - - if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT)) - { - if (in_flags & EF_ARM_VFP_FLOAT) - _bfd_error_handler - (_("ERROR: %B uses VFP instructions, whereas %B does not"), - ibfd, obfd); - else - _bfd_error_handler - (_("ERROR: %B uses FPA instructions, whereas %B does not"), - ibfd, obfd); - - flags_compatible = FALSE; - } - - if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT)) - { - if (in_flags & EF_ARM_MAVERICK_FLOAT) - _bfd_error_handler - (_("ERROR: %B uses Maverick instructions, whereas %B does not"), - ibfd, obfd); - else - _bfd_error_handler - (_("ERROR: %B does not use Maverick instructions, whereas %B does"), - ibfd, obfd); - - flags_compatible = FALSE; - } - -#ifdef EF_ARM_SOFT_FLOAT - if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT)) - { - /* We can allow interworking between code that is VFP format - layout, and uses either soft float or integer regs for - passing floating point arguments and results. We already - know that the APCS_FLOAT flags match; similarly for VFP - flags. */ - if ((in_flags & EF_ARM_APCS_FLOAT) != 0 - || (in_flags & EF_ARM_VFP_FLOAT) == 0) - { - if (in_flags & EF_ARM_SOFT_FLOAT) - _bfd_error_handler - (_("ERROR: %B uses software FP, whereas %B uses hardware FP"), - ibfd, obfd); - else - _bfd_error_handler - (_("ERROR: %B uses hardware FP, whereas %B uses software FP"), - ibfd, obfd); - - flags_compatible = FALSE; - } - } -#endif - - /* Interworking mismatch is only a warning. */ - if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) - { - if (in_flags & EF_ARM_INTERWORK) - { - _bfd_error_handler - (_("Warning: %B supports interworking, whereas %B does not"), - ibfd, obfd); - } - else - { - _bfd_error_handler - (_("Warning: %B does not support interworking, whereas %B does"), - ibfd, obfd); - } - } - } - - return flags_compatible; -} - -/* Display the flags field. */ - -static bfd_boolean -elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr) -{ - FILE * file = (FILE *) ptr; - unsigned long flags; - - BFD_ASSERT (abfd != NULL && ptr != NULL); - - /* Print normal ELF private data. */ - _bfd_elf_print_private_bfd_data (abfd, ptr); - - flags = elf_elfheader (abfd)->e_flags; - /* Ignore init flag - it may not be set, despite the flags field - containing valid data. */ - - /* xgettext:c-format */ - fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags); - - switch (EF_ARM_EABI_VERSION (flags)) - { - case EF_ARM_EABI_UNKNOWN: - /* The following flag bits are GNU extensions and not part of the - official ARM ELF extended ABI. Hence they are only decoded if - the EABI version is not set. */ - if (flags & EF_ARM_INTERWORK) - fprintf (file, _(" [interworking enabled]")); - - if (flags & EF_ARM_APCS_26) - fprintf (file, " [APCS-26]"); - else - fprintf (file, " [APCS-32]"); - - if (flags & EF_ARM_VFP_FLOAT) - fprintf (file, _(" [VFP float format]")); - else if (flags & EF_ARM_MAVERICK_FLOAT) - fprintf (file, _(" [Maverick float format]")); - else - fprintf (file, _(" [FPA float format]")); + if (flags & EF_ARM_VFP_FLOAT) + fprintf (file, _(" [VFP float format]")); + else if (flags & EF_ARM_MAVERICK_FLOAT) + fprintf (file, _(" [Maverick float format]")); + else + fprintf (file, _(" [FPA float format]")); if (flags & EF_ARM_APCS_FLOAT) fprintf (file, _(" [floats passed in float registers]")); @@ -9468,6 +10594,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, bfd_vma *local_got_offsets; struct elf32_arm_link_hash_table *htab; bfd_boolean needs_plt; + unsigned long nsyms; if (info->relocatable) return TRUE; @@ -9491,7 +10618,8 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, symtab_hdr = & elf_symtab_hdr (abfd); sym_hashes = elf_sym_hashes (abfd); - + nsyms = NUM_SHDR_ENTRIES (symtab_hdr); + rel_end = relocs + sec->reloc_count; for (rel = relocs; rel < rel_end; rel++) { @@ -9504,14 +10632,18 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, r_type = ELF32_R_TYPE (rel->r_info); r_type = arm_real_reloc_type (htab, r_type); - if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) + if (r_symndx >= nsyms + /* PR 9934: It is possible to have relocations that do not + refer to symbols, thus it is also possible to have an + object file containing relocations but no symbol table. */ + && (r_symndx > 0 || nsyms > 0)) { (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd, - r_symndx); + r_symndx); return FALSE; } - if (r_symndx < symtab_hdr->sh_info) + if (nsyms == 0 || r_symndx < symtab_hdr->sh_info) h = NULL; else { @@ -9557,7 +10689,8 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, size = symtab_hdr->sh_info; size *= (sizeof (bfd_signed_vma) + sizeof (char)); - local_got_refcounts = bfd_zalloc (abfd, size); + local_got_refcounts = (bfd_signed_vma *) + bfd_zalloc (abfd, size); if (local_got_refcounts == NULL) return FALSE; elf_local_got_refcounts (abfd) = local_got_refcounts; @@ -9620,16 +10753,27 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, needs_plt = 1; goto normal_reloc; + case R_ARM_MOVW_ABS_NC: + case R_ARM_MOVT_ABS: + case R_ARM_THM_MOVW_ABS_NC: + case R_ARM_THM_MOVT_ABS: + if (info->shared) + { + (*_bfd_error_handler) + (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), + abfd, elf32_arm_howto_table_1[r_type].name, + (h) ? h->root.root.string : "a local symbol"); + bfd_set_error (bfd_error_bad_value); + return FALSE; + } + + /* Fall through. */ case R_ARM_ABS32: case R_ARM_ABS32_NOI: case R_ARM_REL32: case R_ARM_REL32_NOI: - case R_ARM_MOVW_ABS_NC: - case R_ARM_MOVT_ABS: case R_ARM_MOVW_PREL_NC: case R_ARM_MOVT_PREL: - case R_ARM_THM_MOVW_ABS_NC: - case R_ARM_THM_MOVT_ABS: case R_ARM_THM_MOVW_PREL_NC: case R_ARM_THM_MOVT_PREL: needs_plt = 0; @@ -9702,12 +10846,12 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, return FALSE; /* BPABI objects never have dynamic relocations mapped. */ - if (! htab->symbian_p) + if (htab->symbian_p) { flagword flags; flags = bfd_get_section_flags (dynobj, sreloc); - flags |= (SEC_LOAD | SEC_ALLOC); + flags &= ~(SEC_LOAD | SEC_ALLOC); bfd_set_section_flags (dynobj, sreloc, flags); } } @@ -9723,15 +10867,19 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, /* Track dynamic relocs needed for local syms too. We really need local syms available to do this easily. Oh well. */ - asection *s; void *vpp; + Elf_Internal_Sym *isym; - s = bfd_section_from_r_symndx (abfd, &htab->sym_sec, - sec, r_symndx); - if (s == NULL) + isym = bfd_sym_from_r_symndx (&htab->sym_cache, + abfd, r_symndx); + if (isym == NULL) return FALSE; + s = bfd_section_from_elf_index (abfd, isym->st_shndx); + if (s == NULL) + s = sec; + vpp = &elf_section_data (s)->local_dynrel; head = (struct elf32_arm_relocs_copied **) vpp; } @@ -9741,7 +10889,8 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info, { bfd_size_type amt = sizeof *p; - p = bfd_alloc (htab->root.dynobj, amt); + p = (struct elf32_arm_relocs_copied *) + bfd_alloc (htab->root.dynobj, amt); if (p == NULL) return FALSE; p->next = *head; @@ -10515,7 +11664,8 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, { struct elf32_arm_relocs_copied *p; - for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next) + for (p = (struct elf32_arm_relocs_copied *) + elf_section_data (s)->local_dynrel; p != NULL; p = p->next) { if (!bfd_is_abs_section (p->section) && bfd_is_abs_section (p->section->output_section)) @@ -10605,6 +11755,9 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, ibfd->filename); } + /* Allocate space for the glue sections now that we've sized them. */ + bfd_elf32_arm_allocate_interworking_sections (info); + /* The check_relocs and adjust_dynamic_symbol entry points have determined the sizes of the various dynamic sections. Allocate memory for them. */ @@ -10666,7 +11819,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED, continue; /* Allocate memory for the section contents. */ - s->contents = bfd_zalloc (dynobj, s->size); + s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size); if (s->contents == NULL) return FALSE; } @@ -11461,7 +12614,7 @@ record_section_with_arm_elf_section_data (asection * sec) { struct section_list * entry; - entry = bfd_malloc (sizeof (* entry)); + entry = (struct section_list *) bfd_malloc (sizeof (* entry)); if (entry == NULL) return; entry->sec = sec; @@ -11545,8 +12698,8 @@ typedef struct struct bfd_link_info *info; asection *sec; int sec_shndx; - bfd_boolean (*func) (void *, const char *, Elf_Internal_Sym *, - asection *, struct elf_link_hash_entry *); + int (*func) (void *, const char *, Elf_Internal_Sym *, + asection *, struct elf_link_hash_entry *); } output_arch_syminfo; enum map_symbol_type @@ -11576,9 +12729,7 @@ elf32_arm_output_map_sym (output_arch_syminfo *osi, sym.st_other = 0; sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE); sym.st_shndx = osi->sec_shndx; - if (!osi->func (osi->finfo, names[type], &sym, osi->sec, NULL)) - return FALSE; - return TRUE; + return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1; } @@ -11676,9 +12827,7 @@ elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name, sym.st_other = 0; sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC); sym.st_shndx = osi->sec_shndx; - if (!osi->func (osi->finfo, name, &sym, osi->sec, NULL)) - return FALSE; - return TRUE; + return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1; } static bfd_boolean @@ -11692,7 +12841,7 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, bfd_vma addr; char *stub_name; output_arch_syminfo *osi; - const insn_sequence *template; + const insn_sequence *template_sequence; enum stub_insn_type prev_type; int size; int i; @@ -11715,33 +12864,36 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, addr = (bfd_vma) stub_entry->stub_offset; stub_name = stub_entry->output_name; - template = stub_entry->stub_template; - switch(template[0].type) + template_sequence = stub_entry->stub_template; + switch (template_sequence[0].type) { case ARM_TYPE: if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size)) return FALSE; break; case THUMB16_TYPE: + case THUMB32_TYPE: if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, stub_entry->stub_size)) return FALSE; break; default: BFD_FAIL (); + return 0; } prev_type = DATA_TYPE; size = 0; for (i = 0; i < stub_entry->stub_template_size; i++) { - switch(template[i].type) + switch (template_sequence[i].type) { case ARM_TYPE: sym_type = ARM_MAP_ARM; break; case THUMB16_TYPE: + case THUMB32_TYPE: sym_type = ARM_MAP_THUMB; break; @@ -11751,18 +12903,20 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, default: BFD_FAIL (); + return FALSE; } - if (template[i].type != prev_type) + if (template_sequence[i].type != prev_type) { - prev_type = template[i].type; + prev_type = template_sequence[i].type; if (!elf32_arm_output_map_sym (osi, sym_type, addr + size)) return FALSE; } - switch(template[i].type) + switch (template_sequence[i].type) { case ARM_TYPE: + case THUMB32_TYPE: size += 4; break; @@ -11776,6 +12930,7 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry, default: BFD_FAIL (); + return FALSE; } } @@ -11788,10 +12943,10 @@ static bfd_boolean elf32_arm_output_arch_local_syms (bfd *output_bfd, struct bfd_link_info *info, void *finfo, - bfd_boolean (*func) (void *, const char *, - Elf_Internal_Sym *, - asection *, - struct elf_link_hash_entry *)) + int (*func) (void *, const char *, + Elf_Internal_Sym *, + asection *, + struct elf_link_hash_entry *)) { output_arch_syminfo osi; struct elf32_arm_link_hash_table *htab; @@ -11923,7 +13078,7 @@ elf32_arm_new_section_hook (bfd *abfd, asection *sec) _arm_elf_section_data *sdata; bfd_size_type amt = sizeof (*sdata); - sdata = bfd_zalloc (abfd, amt); + sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt); if (sdata == NULL) return FALSE; sec->used_by_bfd = sdata; @@ -11958,48 +13113,191 @@ elf32_arm_compare_mapping (const void * a, const void * b) return 0; } +/* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */ -/* Do code byteswapping. Return FALSE afterwards so that the section is - written out as normal. */ - -static bfd_boolean -elf32_arm_write_section (bfd *output_bfd, - struct bfd_link_info *link_info, - asection *sec, - bfd_byte *contents) +static unsigned long +offset_prel31 (unsigned long addr, bfd_vma offset) { - int mapcount, errcount; - _arm_elf_section_data *arm_data; - struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); - elf32_arm_section_map *map; - elf32_vfp11_erratum_list *errnode; - bfd_vma ptr; - bfd_vma end; - bfd_vma offset = sec->output_section->vma + sec->output_offset; - bfd_byte tmp; - int i; + return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful); +} - /* If this section has not been allocated an _arm_elf_section_data - structure then we cannot record anything. */ - arm_data = get_arm_elf_section_data (sec); - if (arm_data == NULL) - return FALSE; +/* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31 + relocations. */ - mapcount = arm_data->mapcount; - map = arm_data->map; - errcount = arm_data->erratumcount; +static void +copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset) +{ + unsigned long first_word = bfd_get_32 (output_bfd, from); + unsigned long second_word = bfd_get_32 (output_bfd, from + 4); + + /* High bit of first word is supposed to be zero. */ + if ((first_word & 0x80000000ul) == 0) + first_word = offset_prel31 (first_word, offset); + + /* If the high bit of the first word is clear, and the bit pattern is not 0x1 + (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */ + if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0)) + second_word = offset_prel31 (second_word, offset); + + bfd_put_32 (output_bfd, first_word, to); + bfd_put_32 (output_bfd, second_word, to + 4); +} + +/* Data for make_branch_to_a8_stub(). */ + +struct a8_branch_to_stub_data { + asection *writing_section; + bfd_byte *contents; +}; - if (errcount != 0) - { - unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0; - for (errnode = arm_data->erratumlist; errnode != 0; - errnode = errnode->next) - { - bfd_vma index = errnode->vma - offset; +/* Helper to insert branches to Cortex-A8 erratum stubs in the right + places for a particular section. */ - switch (errnode->type) - { +static bfd_boolean +make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry, + void *in_arg) +{ + struct elf32_arm_stub_hash_entry *stub_entry; + struct a8_branch_to_stub_data *data; + bfd_byte *contents; + unsigned long branch_insn; + bfd_vma veneered_insn_loc, veneer_entry_loc; + bfd_signed_vma branch_offset; + bfd *abfd; + unsigned int target; + + stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry; + data = (struct a8_branch_to_stub_data *) in_arg; + + if (stub_entry->target_section != data->writing_section + || stub_entry->stub_type < arm_stub_a8_veneer_b_cond) + return TRUE; + + contents = data->contents; + + veneered_insn_loc = stub_entry->target_section->output_section->vma + + stub_entry->target_section->output_offset + + stub_entry->target_value; + + veneer_entry_loc = stub_entry->stub_sec->output_section->vma + + stub_entry->stub_sec->output_offset + + stub_entry->stub_offset; + + if (stub_entry->stub_type == arm_stub_a8_veneer_blx) + veneered_insn_loc &= ~3u; + + branch_offset = veneer_entry_loc - veneered_insn_loc - 4; + + abfd = stub_entry->target_section->owner; + target = stub_entry->target_value; + + /* We attempt to avoid this condition by setting stubs_always_after_branch + in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround. + This check is just to be on the safe side... */ + if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff)) + { + (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is " + "allocated in unsafe location"), abfd); + return FALSE; + } + + switch (stub_entry->stub_type) + { + case arm_stub_a8_veneer_b: + case arm_stub_a8_veneer_b_cond: + branch_insn = 0xf0009000; + goto jump24; + + case arm_stub_a8_veneer_blx: + branch_insn = 0xf000e800; + goto jump24; + + case arm_stub_a8_veneer_bl: + { + unsigned int i1, j1, i2, j2, s; + + branch_insn = 0xf000d000; + + jump24: + if (branch_offset < -16777216 || branch_offset > 16777214) + { + /* There's not much we can do apart from complain if this + happens. */ + (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out " + "of range (input file too large)"), abfd); + return FALSE; + } + + /* i1 = not(j1 eor s), so: + not i1 = j1 eor s + j1 = (not i1) eor s. */ + + branch_insn |= (branch_offset >> 1) & 0x7ff; + branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16; + i2 = (branch_offset >> 22) & 1; + i1 = (branch_offset >> 23) & 1; + s = (branch_offset >> 24) & 1; + j1 = (!i1) ^ s; + j2 = (!i2) ^ s; + branch_insn |= j2 << 11; + branch_insn |= j1 << 13; + branch_insn |= s << 26; + } + break; + + default: + BFD_FAIL (); + return FALSE; + } + + bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]); + bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]); + + return TRUE; +} + +/* Do code byteswapping. Return FALSE afterwards so that the section is + written out as normal. */ + +static bfd_boolean +elf32_arm_write_section (bfd *output_bfd, + struct bfd_link_info *link_info, + asection *sec, + bfd_byte *contents) +{ + unsigned int mapcount, errcount; + _arm_elf_section_data *arm_data; + struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info); + elf32_arm_section_map *map; + elf32_vfp11_erratum_list *errnode; + bfd_vma ptr; + bfd_vma end; + bfd_vma offset = sec->output_section->vma + sec->output_offset; + bfd_byte tmp; + unsigned int i; + + /* If this section has not been allocated an _arm_elf_section_data + structure then we cannot record anything. */ + arm_data = get_arm_elf_section_data (sec); + if (arm_data == NULL) + return FALSE; + + mapcount = arm_data->mapcount; + map = arm_data->map; + errcount = arm_data->erratumcount; + + if (errcount != 0) + { + unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0; + + for (errnode = arm_data->erratumlist; errnode != 0; + errnode = errnode->next) + { + bfd_vma target = errnode->vma - offset; + + switch (errnode->type) + { case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER: { bfd_vma branch_to_veneer; @@ -12009,7 +13307,7 @@ elf32_arm_write_section (bfd *output_bfd, | 0x0a000000; /* The instruction is before the label. */ - index -= 4; + target -= 4; /* Above offset included in -4 below. */ branch_to_veneer = errnode->u.b.veneer->vma @@ -12021,10 +13319,10 @@ elf32_arm_write_section (bfd *output_bfd, "range"), output_bfd); insn |= (branch_to_veneer >> 2) & 0xffffff; - contents[endianflip ^ index] = insn & 0xff; - contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff; - contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff; - contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff; + contents[endianflip ^ target] = insn & 0xff; + contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; + contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; + contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; } break; @@ -12044,17 +13342,17 @@ elf32_arm_write_section (bfd *output_bfd, /* Original instruction. */ insn = errnode->u.v.branch->u.b.vfp_insn; - contents[endianflip ^ index] = insn & 0xff; - contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff; - contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff; - contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff; + contents[endianflip ^ target] = insn & 0xff; + contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff; + contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff; + contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff; /* Branch back to insn after original insn. */ insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff); - contents[endianflip ^ (index + 4)] = insn & 0xff; - contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff; - contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff; - contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff; + contents[endianflip ^ (target + 4)] = insn & 0xff; + contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff; + contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff; + contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff; } break; @@ -12064,6 +13362,106 @@ elf32_arm_write_section (bfd *output_bfd, } } + if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX) + { + arm_unwind_table_edit *edit_node + = arm_data->u.exidx.unwind_edit_list; + /* Now, sec->size is the size of the section we will write. The original + size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND + markers) was sec->rawsize. (This isn't the case if we perform no + edits, then rawsize will be zero and we should use size). */ + bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size); + unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size; + unsigned int in_index, out_index; + bfd_vma add_to_offsets = 0; + + for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;) + { + if (edit_node) + { + unsigned int edit_index = edit_node->index; + + if (in_index < edit_index && in_index * 8 < input_size) + { + copy_exidx_entry (output_bfd, edited_contents + out_index * 8, + contents + in_index * 8, add_to_offsets); + out_index++; + in_index++; + } + else if (in_index == edit_index + || (in_index * 8 >= input_size + && edit_index == UINT_MAX)) + { + switch (edit_node->type) + { + case DELETE_EXIDX_ENTRY: + in_index++; + add_to_offsets += 8; + break; + + case INSERT_EXIDX_CANTUNWIND_AT_END: + { + asection *text_sec = edit_node->linked_section; + bfd_vma text_offset = text_sec->output_section->vma + + text_sec->output_offset + + text_sec->size; + bfd_vma exidx_offset = offset + out_index * 8; + unsigned long prel31_offset; + + /* Note: this is meant to be equivalent to an + R_ARM_PREL31 relocation. These synthetic + EXIDX_CANTUNWIND markers are not relocated by the + usual BFD method. */ + prel31_offset = (text_offset - exidx_offset) + & 0x7ffffffful; + + /* First address we can't unwind. */ + bfd_put_32 (output_bfd, prel31_offset, + &edited_contents[out_index * 8]); + + /* Code for EXIDX_CANTUNWIND. */ + bfd_put_32 (output_bfd, 0x1, + &edited_contents[out_index * 8 + 4]); + + out_index++; + add_to_offsets -= 8; + } + break; + } + + edit_node = edit_node->next; + } + } + else + { + /* No more edits, copy remaining entries verbatim. */ + copy_exidx_entry (output_bfd, edited_contents + out_index * 8, + contents + in_index * 8, add_to_offsets); + out_index++; + in_index++; + } + } + + if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD)) + bfd_set_section_contents (output_bfd, sec->output_section, + edited_contents, + (file_ptr) sec->output_offset, sec->size); + + return TRUE; + } + + /* Fix code to point to Cortex-A8 erratum stubs. */ + if (globals->fix_cortex_a8) + { + struct a8_branch_to_stub_data data; + + data.writing_section = sec; + data.contents = contents; + + bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub, + &data); + } + if (mapcount == 0) return FALSE; @@ -12245,7 +13643,8 @@ elf32_arm_modify_segment_map (bfd *abfd, m = m->next; if (!m) { - m = bfd_zalloc (abfd, sizeof (struct elf_segment_map)); + m = (struct elf_segment_map *) + bfd_zalloc (abfd, sizeof (struct elf_segment_map)); if (m == NULL) return FALSE; m->p_type = PT_ARM_EXIDX; @@ -12340,6 +13739,7 @@ const struct elf_size_info elf32_arm_size_info = #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info +#define bfd_elf32_bfd_final_link elf32_arm_final_link #define elf_backend_get_symbol_type elf32_arm_get_symbol_type #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook @@ -12456,6 +13856,231 @@ elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker) #include "elf32-target.h" +/* Merge backend specific data from an object file to the output + object file when linking. */ + +static bfd_boolean +elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd) +{ + flagword out_flags; + flagword in_flags; + bfd_boolean flags_compatible = TRUE; + asection *sec; + + /* Check if we have the same endianess. */ + if (! _bfd_generic_verify_endian_match (ibfd, obfd)) + return FALSE; + + if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd)) + return TRUE; + + if (!elf32_arm_merge_eabi_attributes (ibfd, obfd)) + return FALSE; + + /* The input BFD must have had its flags initialised. */ + /* The following seems bogus to me -- The flags are initialized in + the assembler but I don't think an elf_flags_init field is + written into the object. */ + /* BFD_ASSERT (elf_flags_init (ibfd)); */ + + in_flags = elf_elfheader (ibfd)->e_flags; + out_flags = elf_elfheader (obfd)->e_flags; + + /* In theory there is no reason why we couldn't handle this. However + in practice it isn't even close to working and there is no real + reason to want it. */ + if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4 + && !(ibfd->flags & DYNAMIC) + && (in_flags & EF_ARM_BE8)) + { + _bfd_error_handler (_("error: %B is already in final BE8 format"), + ibfd); + return FALSE; + } + + if (!elf_flags_init (obfd)) + { + /* If the input is the default architecture and had the default + flags then do not bother setting the flags for the output + architecture, instead allow future merges to do this. If no + future merges ever set these flags then they will retain their + uninitialised values, which surprise surprise, correspond + to the default values. */ + if (bfd_get_arch_info (ibfd)->the_default + && elf_elfheader (ibfd)->e_flags == 0) + return TRUE; + + elf_flags_init (obfd) = TRUE; + elf_elfheader (obfd)->e_flags = in_flags; + + if (bfd_get_arch (obfd) == bfd_get_arch (ibfd) + && bfd_get_arch_info (obfd)->the_default) + return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd)); + + return TRUE; + } + + /* Determine what should happen if the input ARM architecture + does not match the output ARM architecture. */ + if (! bfd_arm_merge_machines (ibfd, obfd)) + return FALSE; + + /* Identical flags must be compatible. */ + if (in_flags == out_flags) + return TRUE; + + /* Check to see if the input BFD actually contains any sections. If + not, its flags may not have been initialised either, but it + cannot actually cause any incompatiblity. Do not short-circuit + dynamic objects; their section list may be emptied by + elf_link_add_object_symbols. + + Also check to see if there are no code sections in the input. + In this case there is no need to check for code specific flags. + XXX - do we need to worry about floating-point format compatability + in data sections ? */ + if (!(ibfd->flags & DYNAMIC)) + { + bfd_boolean null_input_bfd = TRUE; + bfd_boolean only_data_sections = TRUE; + + for (sec = ibfd->sections; sec != NULL; sec = sec->next) + { + /* Ignore synthetic glue sections. */ + if (strcmp (sec->name, ".glue_7") + && strcmp (sec->name, ".glue_7t")) + { + if ((bfd_get_section_flags (ibfd, sec) + & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) + == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS)) + only_data_sections = FALSE; + + null_input_bfd = FALSE; + break; + } + } + + if (null_input_bfd || only_data_sections) + return TRUE; + } + + /* Complain about various flag mismatches. */ + if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags), + EF_ARM_EABI_VERSION (out_flags))) + { + _bfd_error_handler + (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"), + ibfd, obfd, + (in_flags & EF_ARM_EABIMASK) >> 24, + (out_flags & EF_ARM_EABIMASK) >> 24); + return FALSE; + } + + /* Not sure what needs to be checked for EABI versions >= 1. */ + /* VxWorks libraries do not use these flags. */ + if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed + && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed + && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN) + { + if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26)) + { + _bfd_error_handler + (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"), + ibfd, obfd, + in_flags & EF_ARM_APCS_26 ? 26 : 32, + out_flags & EF_ARM_APCS_26 ? 26 : 32); + flags_compatible = FALSE; + } + + if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT)) + { + if (in_flags & EF_ARM_APCS_FLOAT) + _bfd_error_handler + (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"), + ibfd, obfd); + else + _bfd_error_handler + (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"), + ibfd, obfd); + + flags_compatible = FALSE; + } + + if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT)) + { + if (in_flags & EF_ARM_VFP_FLOAT) + _bfd_error_handler + (_("error: %B uses VFP instructions, whereas %B does not"), + ibfd, obfd); + else + _bfd_error_handler + (_("error: %B uses FPA instructions, whereas %B does not"), + ibfd, obfd); + + flags_compatible = FALSE; + } + + if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT)) + { + if (in_flags & EF_ARM_MAVERICK_FLOAT) + _bfd_error_handler + (_("error: %B uses Maverick instructions, whereas %B does not"), + ibfd, obfd); + else + _bfd_error_handler + (_("error: %B does not use Maverick instructions, whereas %B does"), + ibfd, obfd); + + flags_compatible = FALSE; + } + +#ifdef EF_ARM_SOFT_FLOAT + if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT)) + { + /* We can allow interworking between code that is VFP format + layout, and uses either soft float or integer regs for + passing floating point arguments and results. We already + know that the APCS_FLOAT flags match; similarly for VFP + flags. */ + if ((in_flags & EF_ARM_APCS_FLOAT) != 0 + || (in_flags & EF_ARM_VFP_FLOAT) == 0) + { + if (in_flags & EF_ARM_SOFT_FLOAT) + _bfd_error_handler + (_("error: %B uses software FP, whereas %B uses hardware FP"), + ibfd, obfd); + else + _bfd_error_handler + (_("error: %B uses hardware FP, whereas %B uses software FP"), + ibfd, obfd); + + flags_compatible = FALSE; + } + } +#endif + + /* Interworking mismatch is only a warning. */ + if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK)) + { + if (in_flags & EF_ARM_INTERWORK) + { + _bfd_error_handler + (_("Warning: %B supports interworking, whereas %B does not"), + ibfd, obfd); + } + else + { + _bfd_error_handler + (_("Warning: %B does not support interworking, whereas %B does"), + ibfd, obfd); + } + } + } + + return flags_compatible; +} + + /* Symbian OS Targets. */ #undef TARGET_LITTLE_SYM