#define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
#define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
-static const bfd_vma arm_long_branch_stub[] =
+enum stub_insn_type
{
- 0xe51ff004, /* ldr pc, [pc, #-4] */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ THUMB16_TYPE = 1,
+ THUMB32_TYPE,
+ ARM_TYPE,
+ DATA_TYPE
+ };
+
+#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
+#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
+#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
+#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
+#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
+
+typedef struct
+{
+ bfd_vma data;
+ enum stub_insn_type type;
+ unsigned int r_type;
+ int reloc_addend;
+} insn_sequence;
+
+/* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
+ to reach the stub if necessary. */
+static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
+ {
+ ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
+ available. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
+ {
+ ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* Thumb -> Thumb long branch stub. Used on architectures which
+ support only this mode, or on V4T where it is expensive to switch
+ to ARM. */
+static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
+ {
+ THUMB16_INSN(0xb401), /* push {r0} */
+ THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
+ THUMB16_INSN(0x4684), /* mov ip, r0 */
+ THUMB16_INSN(0xbc01), /* pop {r0} */
+ THUMB16_INSN(0x4760), /* bx ip */
+ THUMB16_INSN(0xbf00), /* nop */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
+ available. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
+ {
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Thumb -> ARM short branch stub. Shorter variant of the above
+ one, when the destination is close enough. */
+static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
+ {
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
};
-static const bfd_vma arm_thumb_v4t_long_branch_stub[] =
+/* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
+ blx to reach the stub if necessary. */
+static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
{
- 0xe59fc000, /* ldr ip, [pc, #0] */
- 0xe12fff1c, /* bx ip */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
+ ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
+ DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
};
-static const bfd_vma arm_thumb_thumb_long_branch_stub[] =
+/* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
+ blx to reach the stub if necessary. We can not add into pc;
+ it is not guaranteed to mode switch (different in ARMv6 and
+ ARMv7). */
+static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
{
- 0x4e02b540, /* push {r6, lr} */
- /* ldr r6, [pc, #8] */
- 0x473046fe, /* mov lr, pc */
- /* bx r6 */
- 0xbf00bd40, /* pop {r6, pc} */
- /* nop */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
+ ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_thumb_arm_v4t_long_branch_stub[] =
+/* V4T ARM -> ARM long branch stub, PIC. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
{
- 0x4e03b540, /* push {r6, lr} */
- /* ldr r6, [pc, #12] */
- 0x473046fe, /* mov lr, pc */
- /* bx r6 */
- 0xe8bd4040, /* pop {r6, pc} */
- 0xe12fff1e, /* bx lr */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
+ ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_thumb_arm_v4t_short_branch_stub[] =
+/* V4T Thumb -> ARM long branch stub, PIC. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
{
- 0x46c04778, /* bx pc */
- /* nop */
- 0xea000000, /* b (X) */
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
+ ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
+ DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_pic_long_branch_stub[] =
+/* Thumb -> Thumb long branch stub, PIC. Used on architectures which
+ support only this mode, or on V4T where it is expensive to switch
+ to ARM. */
+static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
{
- 0xe59fc000, /* ldr r12, [pc] */
- 0xe08ff00c, /* add pc, pc, ip */
- 0x00000000, /* dcd R_ARM_REL32(X) */
+ THUMB16_INSN(0xb401), /* push {r0} */
+ THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
+ THUMB16_INSN(0x46fc), /* mov ip, pc */
+ THUMB16_INSN(0x4484), /* add ip, r0 */
+ THUMB16_INSN(0xbc01), /* pop {r0} */
+ THUMB16_INSN(0x4760), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
};
/* Section name for stubs is the associated section name plus this
enum elf32_arm_stub_type
{
arm_stub_none,
- arm_stub_long_branch,
- arm_thumb_v4t_stub_long_branch,
- arm_thumb_thumb_stub_long_branch,
- arm_thumb_arm_v4t_stub_long_branch,
- arm_thumb_arm_v4t_stub_short_branch,
- arm_stub_pic_long_branch,
+ arm_stub_long_branch_any_any,
+ arm_stub_long_branch_v4t_arm_thumb,
+ arm_stub_long_branch_thumb_only,
+ arm_stub_long_branch_v4t_thumb_arm,
+ arm_stub_short_branch_v4t_thumb_arm,
+ arm_stub_long_branch_any_arm_pic,
+ arm_stub_long_branch_any_thumb_pic,
+ arm_stub_long_branch_v4t_arm_thumb_pic,
+ arm_stub_long_branch_v4t_thumb_arm_pic,
+ arm_stub_long_branch_thumb_only_pic,
};
struct elf32_arm_stub_hash_entry
bfd_vma target_value;
asection *target_section;
+ /* The stub type. */
enum elf32_arm_stub_type stub_type;
+ /* Its encoding size in bytes. */
+ int stub_size;
+ /* Its template. */
+ const insn_sequence *stub_template;
+ /* The size of the template (number of entries). */
+ int stub_template_size;
/* The symbol table entry, if any, that this was derived from. */
struct elf32_arm_link_hash_entry *h;
eh->target_value = 0;
eh->target_section = NULL;
eh->stub_type = arm_stub_none;
+ eh->stub_size = 0;
+ eh->stub_template = NULL;
+ eh->stub_template_size = 0;
eh->h = NULL;
eh->id_sec = NULL;
}
{
switch (stub_type)
{
- case arm_thumb_thumb_stub_long_branch:
- case arm_thumb_arm_v4t_stub_long_branch:
- case arm_thumb_arm_v4t_stub_short_branch:
+ case arm_stub_long_branch_thumb_only:
+ case arm_stub_long_branch_v4t_thumb_arm:
+ case arm_stub_short_branch_v4t_thumb_arm:
+ case arm_stub_long_branch_v4t_thumb_arm_pic:
+ case arm_stub_long_branch_thumb_only_pic:
return TRUE;
case arm_stub_none:
BFD_FAIL ();
if (!thumb_only)
{
stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stubs. */
? ((globals->use_blx)
- ? arm_stub_pic_long_branch
- : arm_stub_none)
- : (globals->use_blx)
- ? arm_stub_long_branch
- : arm_stub_none;
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_thumb_pic
+ /* On V4T, use Thumb code only. */
+ : arm_stub_long_branch_thumb_only_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_thumb_only);
}
else
{
stub_type = (info->shared | globals->pic_veneer)
- ? arm_stub_none
- : (globals->use_blx)
- ? arm_thumb_thumb_stub_long_branch
- : arm_stub_none;
+ /* PIC stub. */
+ ? arm_stub_long_branch_thumb_only_pic
+ /* non-PIC stub. */
+ : arm_stub_long_branch_thumb_only;
}
}
else
}
stub_type = (info->shared | globals->pic_veneer)
+ /* PIC stubs. */
? ((globals->use_blx)
- ? arm_stub_pic_long_branch
- : arm_stub_none)
- : (globals->use_blx)
- ? arm_stub_long_branch
- : arm_thumb_arm_v4t_stub_long_branch;
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_arm_pic
+ /* V4T PIC stub. */
+ : arm_stub_long_branch_v4t_thumb_arm_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_thumb_arm);
/* Handle v4t short branches. */
- if ((stub_type == arm_thumb_arm_v4t_stub_long_branch)
+ if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
&& (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
&& (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
- stub_type = arm_thumb_arm_v4t_stub_short_branch;
+ stub_type = arm_stub_short_branch_v4t_thumb_arm;
}
}
}
{
(*_bfd_error_handler)
(_("%B(%s): warning: interworking not enabled.\n"
- " first occurrence: %B: Thumb call to ARM"),
+ " first occurrence: %B: ARM call to Thumb"),
sym_sec->owner, input_bfd, name);
}
|| !globals->use_blx)
{
stub_type = (info->shared | globals->pic_veneer)
- ? arm_stub_pic_long_branch
- : (globals->use_blx)
- ? arm_stub_long_branch
- : arm_thumb_v4t_stub_long_branch;
+ /* PIC stubs. */
+ ? ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_thumb_pic
+ /* V4T stub. */
+ : arm_stub_long_branch_v4t_arm_thumb_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_arm_thumb);
}
}
else
|| (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
{
stub_type = (info->shared | globals->pic_veneer)
- ? arm_stub_pic_long_branch
- : arm_stub_long_branch;
+ /* PIC stubs. */
+ ? arm_stub_long_branch_any_arm_pic
+ /* non-PIC stubs. */
+ : arm_stub_long_branch_any_any;
}
}
}
bfd_vma sym_value;
int template_size;
int size;
- const bfd_vma *template;
+ const insn_sequence *template;
int i;
struct elf32_arm_link_hash_table * globals;
+ int stub_reloc_idx = -1;
+ int stub_reloc_offset = 0;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
- switch (stub_entry->stub_type)
- {
- case arm_stub_long_branch:
- template = arm_long_branch_stub;
- template_size = (sizeof (arm_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_v4t_stub_long_branch:
- template = arm_thumb_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_thumb_stub_long_branch:
- template = arm_thumb_thumb_long_branch_stub;
- template_size = (sizeof (arm_thumb_thumb_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- template = arm_thumb_arm_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_arm_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
- template = arm_thumb_arm_v4t_short_branch_stub;
- template_size = (sizeof(arm_thumb_arm_v4t_short_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_stub_pic_long_branch:
- template = arm_pic_long_branch_stub;
- template_size = (sizeof (arm_pic_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- default:
- BFD_FAIL ();
- return FALSE;
- }
+ template = stub_entry->stub_template;
+ template_size = stub_entry->stub_template_size;
size = 0;
- for (i = 0; i < (template_size / 4); i++)
+ for (i = 0; i < template_size; i++)
{
- /* A 0 pattern is a placeholder, every other pattern is an
- instruction. */
- if (template[i] != 0)
- put_arm_insn (globals, stub_bfd, template[i], loc + size);
- else
- bfd_put_32 (stub_bfd, template[i], loc + size);
+ switch (template[i].type)
+ {
+ case THUMB16_TYPE:
+ put_thumb_insn (globals, stub_bfd, template[i].data, loc + size);
+ size += 2;
+ break;
- size += 4;
+ case ARM_TYPE:
+ put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
+ /* Handle cases where the target is encoded within the
+ instruction. */
+ if (template[i].r_type == R_ARM_JUMP24)
+ {
+ stub_reloc_idx = i;
+ stub_reloc_offset = size;
+ }
+ size += 4;
+ break;
+
+ case DATA_TYPE:
+ bfd_put_32 (stub_bfd, template[i].data, loc + size);
+ stub_reloc_idx = i;
+ stub_reloc_offset = size;
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
}
+
stub_sec->size += size;
+ /* Stub size has already been computed in arm_size_one_stub. Check
+ consistency. */
+ BFD_ASSERT (size == stub_entry->stub_size);
+
/* Destination is Thumb. Force bit 0 to 1 to reflect this. */
if (stub_entry->st_type == STT_ARM_TFUNC)
sym_value |= 1;
- switch (stub_entry->stub_type)
- {
- case arm_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 4, sym_value, 0);
- break;
- case arm_thumb_v4t_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 8, sym_value, 0);
- break;
- case arm_thumb_thumb_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 12, sym_value, 0);
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 16, sym_value, 0);
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
- {
- long int rel_offset;
- static const insn32 t2a3_b_insn = 0xea000000;
+ /* Assume there is one and only one entry to relocate in each stub. */
+ BFD_ASSERT (stub_reloc_idx != -1);
- rel_offset = sym_value - (stub_addr + 8 + 4);
-
- put_arm_insn (globals, stub_bfd,
- (bfd_vma) t2a3_b_insn | ((rel_offset >> 2) & 0x00FFFFFF),
- loc + 4);
- }
- break;
-
- case arm_stub_pic_long_branch:
- /* We want the value relative to the address 8 bytes from the
- start of the stub. */
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_REL32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 8, sym_value, 0);
- break;
- default:
- break;
- }
+ _bfd_final_link_relocate (elf32_arm_howto_from_type (template[stub_reloc_idx].r_type),
+ stub_bfd, stub_sec, stub_sec->contents,
+ stub_entry->stub_offset + stub_reloc_offset,
+ sym_value, template[stub_reloc_idx].reloc_addend);
return TRUE;
}
{
struct elf32_arm_stub_hash_entry *stub_entry;
struct elf32_arm_link_hash_table *htab;
- const bfd_vma *template;
+ const insn_sequence *template;
int template_size;
int size;
int i;
switch (stub_entry->stub_type)
{
- case arm_stub_long_branch:
- template = arm_long_branch_stub;
- template_size = (sizeof (arm_long_branch_stub) / sizeof (bfd_vma)) * 4;
+ case arm_stub_long_branch_any_any:
+ template = elf32_arm_stub_long_branch_any_any;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_any_any);
+ break;
+ case arm_stub_long_branch_v4t_arm_thumb:
+ template = elf32_arm_stub_long_branch_v4t_arm_thumb;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_arm_thumb);
+ break;
+ case arm_stub_long_branch_thumb_only:
+ template = elf32_arm_stub_long_branch_thumb_only;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_thumb_only);
+ break;
+ case arm_stub_long_branch_v4t_thumb_arm:
+ template = elf32_arm_stub_long_branch_v4t_thumb_arm;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_thumb_arm);
break;
- case arm_thumb_v4t_stub_long_branch:
- template = arm_thumb_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
+ case arm_stub_short_branch_v4t_thumb_arm:
+ template = elf32_arm_stub_short_branch_v4t_thumb_arm;
+ template_size = ARRAY_SIZE (elf32_arm_stub_short_branch_v4t_thumb_arm);
break;
- case arm_thumb_thumb_stub_long_branch:
- template = arm_thumb_thumb_long_branch_stub;
- template_size = (sizeof (arm_thumb_thumb_long_branch_stub) / sizeof (bfd_vma)) * 4;
+ case arm_stub_long_branch_any_arm_pic:
+ template = elf32_arm_stub_long_branch_any_arm_pic;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_any_arm_pic);
break;
- case arm_thumb_arm_v4t_stub_long_branch:
- template = arm_thumb_arm_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_arm_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
+ case arm_stub_long_branch_any_thumb_pic:
+ template = elf32_arm_stub_long_branch_any_thumb_pic;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_any_thumb_pic);
break;
- case arm_thumb_arm_v4t_stub_short_branch:
- template = arm_thumb_arm_v4t_short_branch_stub;
- template_size = (sizeof(arm_thumb_arm_v4t_short_branch_stub) / sizeof (bfd_vma)) * 4;
+ case arm_stub_long_branch_v4t_arm_thumb_pic:
+ template = elf32_arm_stub_long_branch_v4t_arm_thumb_pic;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_arm_thumb_pic);
break;
- case arm_stub_pic_long_branch:
- template = arm_pic_long_branch_stub;
- template_size = (sizeof (arm_pic_long_branch_stub) / sizeof (bfd_vma)) * 4;
+ case arm_stub_long_branch_v4t_thumb_arm_pic:
+ template = elf32_arm_stub_long_branch_v4t_thumb_arm_pic;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_v4t_thumb_arm_pic);
+ break;
+ case arm_stub_long_branch_thumb_only_pic:
+ template = elf32_arm_stub_long_branch_thumb_only_pic;
+ template_size = ARRAY_SIZE (elf32_arm_stub_long_branch_thumb_only_pic);
break;
default:
BFD_FAIL ();
return FALSE;
- break;
}
size = 0;
- for (i = 0; i < (template_size / 4); i++)
- size += 4;
+ for (i = 0; i < template_size; i++)
+ {
+ switch (template[i].type)
+ {
+ case THUMB16_TYPE:
+ size += 2;
+ break;
+
+ case ARM_TYPE:
+ size += 4;
+ break;
+
+ case DATA_TYPE:
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
+ }
+
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template;
+ stub_entry->stub_template_size = template_size;
+
size = (size + 7) & ~7;
stub_entry->stub_sec->size += size;
+
return TRUE;
}
/* Steal the link_sec pointer for our list. */
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
/* This happens to make the list in reverse order,
- which is what we want. */
+ which we reverse later. */
PREV_SEC (isec) = *list;
*list = isec;
}
/* See whether we can group stub sections together. Grouping stub
sections may result in fewer stubs. More importantly, we need to
- put all .init* and .fini* stubs at the beginning of the .init or
+ put all .init* and .fini* stubs at the end of the .init or
.fini output sections respectively, because glibc splits the
_init and _fini functions into multiple parts. Putting a stub in
the middle of a function is not a good idea. */
static void
group_sections (struct elf32_arm_link_hash_table *htab,
bfd_size_type stub_group_size,
- bfd_boolean stubs_always_before_branch)
+ bfd_boolean stubs_always_after_branch)
{
- asection **list = htab->input_list + htab->top_index;
+ asection **list = htab->input_list;
do
{
asection *tail = *list;
+ asection *head;
+ asection *tp;
if (tail == bfd_abs_section_ptr)
continue;
- while (tail != NULL)
+ /* Reverse the list: we must avoid placing stubs at the
+ beginning of the section because the beginning of the text
+ section may be required for an interrupt vector in bare metal
+ code. */
+#define NEXT_SEC PREV_SEC
+ head = tail;
+ tp = NULL;
+ for (;;)
+ {
+ asection *h = PREV_SEC (head);
+ NEXT_SEC (head) = tp;
+ if (h == NULL)
+ break;
+ tp = head;
+ head = h;
+ }
+
+ while (head != NULL)
{
asection *curr;
- asection *prev;
+ asection *next;
bfd_size_type total;
- curr = tail;
- total = tail->size;
- while ((prev = PREV_SEC (curr)) != NULL
- && ((total += curr->output_offset - prev->output_offset)
+ curr = head;
+ total = head->size;
+ while ((next = NEXT_SEC (curr)) != NULL
+ && ((total += next->output_offset - curr->output_offset)
< stub_group_size))
- curr = prev;
+ curr = next;
- /* OK, the size from the start of CURR to the end is less
+ /* OK, the size from the start to the start of CURR is less
than stub_group_size and thus can be handled by one stub
- section. (Or the tail section is itself larger than
+ section. (Or the head section is itself larger than
stub_group_size, in which case we may be toast.)
We should really be keeping track of the total size of
stubs added here, as stubs contribute to the final output
section size. */
do
{
- prev = PREV_SEC (tail);
+ next = NEXT_SEC (head);
/* Set up this stub group. */
- htab->stub_group[tail->id].link_sec = curr;
+ htab->stub_group[head->id].link_sec = curr;
}
- while (tail != curr && (tail = prev) != NULL);
+ while (head != curr && (head = next) != NULL);
/* But wait, there's more! Input sections up to stub_group_size
- bytes before the stub section can be handled by it too. */
- if (!stubs_always_before_branch)
+ bytes after the stub section can be handled by it too. */
+ if (!stubs_always_after_branch)
{
total = 0;
- while (prev != NULL
- && ((total += tail->output_offset - prev->output_offset)
+ while (next != NULL
+ && ((total += next->output_offset - head->output_offset)
< stub_group_size))
{
- tail = prev;
- prev = PREV_SEC (tail);
- htab->stub_group[tail->id].link_sec = curr;
+ head = next;
+ next = NEXT_SEC (head);
+ htab->stub_group[head->id].link_sec = curr;
}
}
- tail = prev;
+ head = next;
}
}
- while (list-- != htab->input_list);
+ while (list++ != htab->input_list + htab->top_index);
free (htab->input_list);
#undef PREV_SEC
+#undef NEXT_SEC
}
/* Determine and set the size of the stub section for a final link.
void (*layout_sections_again) (void))
{
bfd_size_type stub_group_size;
- bfd_boolean stubs_always_before_branch;
+ bfd_boolean stubs_always_after_branch;
bfd_boolean stub_changed = 0;
struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
htab->stub_bfd = stub_bfd;
htab->add_stub_section = add_stub_section;
htab->layout_sections_again = layout_sections_again;
- stubs_always_before_branch = group_size < 0;
+ stubs_always_after_branch = group_size < 0;
if (group_size < 0)
stub_group_size = -group_size;
else
stub_group_size = 4170000;
}
- group_sections (htab, stub_group_size, stubs_always_before_branch);
+ group_sections (htab, stub_group_size, stubs_always_after_branch);
while (1)
{
/* This one is a call from arm code. We need to look up
the target of the call. If it is a thumb target, we
insert glue. */
- if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
- && !(r_type == R_ARM_CALL && globals->use_blx))
+ if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
record_arm_to_thumb_glue (link_info, h);
break;
if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
return TRUE;
+ /* Skip this BFD if it corresponds to an executable or dynamic object. */
+ if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
+ return TRUE;
+
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
if (elf_section_type (sec) != SHT_PROGBITS
|| (elf_section_flags (sec) & SHF_EXECINSTR) == 0
|| (sec->flags & SEC_EXCLUDE) != 0
+ || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
+ || sec->output_section == bfd_abs_section_ptr
|| strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
continue;
elf32_arm_obj_attrs_arg_type (int tag)
{
if (tag == Tag_compatibility)
- return 3;
- else if (tag == 4 || tag == 5)
- return 2;
+ return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
+ else if (tag == Tag_nodefaults)
+ return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
+ else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
+ return ATTR_TYPE_FLAG_STR_VAL;
else if (tag < 32)
- return 1;
+ return ATTR_TYPE_FLAG_INT_VAL;
else
- return (tag & 1) != 0 ? 2 : 1;
+ return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
+}
+
+/* The ABI defines that Tag_conformance should be emitted first, and that
+ Tag_nodefaults should be second (if either is defined). This sets those
+ two positions, and bumps up the position of all the remaining tags to
+ compensate. */
+static int
+elf32_arm_obj_attrs_order (int num)
+{
+ if (num == 4)
+ return Tag_conformance;
+ if (num == 5)
+ return Tag_nodefaults;
+ if ((num - 2) < Tag_nodefaults)
+ return num - 2;
+ if ((num - 1) < Tag_conformance)
+ return num - 1;
+ return num;
+}
+
+/* Read the architecture from the Tag_also_compatible_with attribute, if any.
+ Returns -1 if no architecture could be read. */
+
+static int
+get_secondary_compatible_arch (bfd *abfd)
+{
+ obj_attribute *attr =
+ &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
+
+ /* Note: the tag and its argument below are uleb128 values, though
+ currently-defined values fit in one byte for each. */
+ if (attr->s
+ && attr->s[0] == Tag_CPU_arch
+ && (attr->s[1] & 128) != 128
+ && attr->s[2] == 0)
+ return attr->s[1];
+
+ /* This tag is "safely ignorable", so don't complain if it looks funny. */
+ return -1;
}
+/* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
+ The tag is removed if ARCH is -1. */
+
static void
-elf32_arm_copy_one_eabi_other_attribute (bfd *ibfd, bfd *obfd, obj_attribute_list *in_list)
+set_secondary_compatible_arch (bfd *abfd, int arch)
{
- switch (in_list->tag)
- {
- case Tag_VFP_HP_extension:
- case Tag_ABI_FP_16bit_format:
- bfd_elf_add_obj_attr_int (obfd, OBJ_ATTR_PROC, in_list->tag, in_list->attr.i);
- break;
+ obj_attribute *attr =
+ &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
- default:
- if ((in_list->tag & 127) < 64)
- {
- _bfd_error_handler
- (_("Warning: %B: Unknown EABI object attribute %d"), ibfd, in_list->tag);
- break;
- }
+ if (arch == -1)
+ {
+ attr->s = NULL;
+ return;
}
+
+ /* Note: the tag and its argument below are uleb128 values, though
+ currently-defined values fit in one byte for each. */
+ if (!attr->s)
+ attr->s = bfd_alloc (abfd, 3);
+ attr->s[0] = Tag_CPU_arch;
+ attr->s[1] = arch;
+ attr->s[2] = '\0';
}
-static void
-elf32_arm_copy_eabi_other_attribute_list (bfd *ibfd, bfd *obfd, obj_attribute_list *in_list)
-{
- for (; in_list; in_list = in_list->next )
- elf32_arm_copy_one_eabi_other_attribute (ibfd, obfd, in_list);
+/* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
+ into account. */
+
+static int
+tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
+ int newtag, int secondary_compat)
+{
+#define T(X) TAG_CPU_ARCH_##X
+ int tagl, tagh, result;
+ const int v6t2[] =
+ {
+ T(V6T2), /* PRE_V4. */
+ T(V6T2), /* V4. */
+ T(V6T2), /* V4T. */
+ T(V6T2), /* V5T. */
+ T(V6T2), /* V5TE. */
+ T(V6T2), /* V5TEJ. */
+ T(V6T2), /* V6. */
+ T(V7), /* V6KZ. */
+ T(V6T2) /* V6T2. */
+ };
+ const int v6k[] =
+ {
+ T(V6K), /* PRE_V4. */
+ T(V6K), /* V4. */
+ T(V6K), /* V4T. */
+ T(V6K), /* V5T. */
+ T(V6K), /* V5TE. */
+ T(V6K), /* V5TEJ. */
+ T(V6K), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V6K) /* V6K. */
+ };
+ const int v7[] =
+ {
+ T(V7), /* PRE_V4. */
+ T(V7), /* V4. */
+ T(V7), /* V4T. */
+ T(V7), /* V5T. */
+ T(V7), /* V5TE. */
+ T(V7), /* V5TEJ. */
+ T(V7), /* V6. */
+ T(V7), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V7), /* V6K. */
+ T(V7) /* V7. */
+ };
+ const int v6_m[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ T(V6K), /* V4T. */
+ T(V6K), /* V5T. */
+ T(V6K), /* V5TE. */
+ T(V6K), /* V5TEJ. */
+ T(V6K), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V6K), /* V6K. */
+ T(V7), /* V7. */
+ T(V6_M) /* V6_M. */
+ };
+ const int v6s_m[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ T(V6K), /* V4T. */
+ T(V6K), /* V5T. */
+ T(V6K), /* V5TE. */
+ T(V6K), /* V5TEJ. */
+ T(V6K), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V6K), /* V6K. */
+ T(V7), /* V7. */
+ T(V6S_M), /* V6_M. */
+ T(V6S_M) /* V6S_M. */
+ };
+ const int v4t_plus_v6_m[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ T(V4T), /* V4T. */
+ T(V5T), /* V5T. */
+ T(V5TE), /* V5TE. */
+ T(V5TEJ), /* V5TEJ. */
+ T(V6), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V6T2), /* V6T2. */
+ T(V6K), /* V6K. */
+ T(V7), /* V7. */
+ T(V6_M), /* V6_M. */
+ T(V6S_M), /* V6S_M. */
+ T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
+ };
+ const int *comb[] =
+ {
+ v6t2,
+ v6k,
+ v7,
+ v6_m,
+ v6s_m,
+ /* Pseudo-architecture. */
+ v4t_plus_v6_m
+ };
+
+ /* Check we've not got a higher architecture than we know about. */
+
+ if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
+ {
+ _bfd_error_handler (_("ERROR: %B: Unknown CPU architecture"), ibfd);
+ return -1;
+ }
+
+ /* Override old tag if we have a Tag_also_compatible_with on the output. */
+
+ if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
+ || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
+ oldtag = T(V4T_PLUS_V6_M);
+
+ /* And override the new tag if we have a Tag_also_compatible_with on the
+ input. */
+
+ if ((newtag == T(V6_M) && secondary_compat == T(V4T))
+ || (newtag == T(V4T) && secondary_compat == T(V6_M)))
+ newtag = T(V4T_PLUS_V6_M);
+
+ tagl = (oldtag < newtag) ? oldtag : newtag;
+ result = tagh = (oldtag > newtag) ? oldtag : newtag;
+
+ /* Architectures before V6KZ add features monotonically. */
+ if (tagh <= TAG_CPU_ARCH_V6KZ)
+ return result;
+
+ result = comb[tagh - T(V6T2)][tagl];
+
+ /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
+ as the canonical version. */
+ if (result == T(V4T_PLUS_V6_M))
+ {
+ result = T(V4T);
+ *secondary_compat_out = T(V6_M);
+ }
+ else
+ *secondary_compat_out = -1;
+
+ if (result == -1)
+ {
+ _bfd_error_handler (_("ERROR: %B: Conflicting CPU architectures %d/%d"),
+ ibfd, oldtag, newtag);
+ return -1;
+ }
+
+ return result;
+#undef T
}
/* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
obj_attribute *out_attr;
obj_attribute_list *in_list;
obj_attribute_list *out_list;
+ obj_attribute_list **out_listp;
/* Some tags have 0 = don't care, 1 = strong requirement,
2 = weak requirement. */
- static const int order_312[3] = {3, 1, 2};
+ static const int order_021[3] = {0, 2, 1};
/* For use with Tag_VFP_arch. */
static const int order_01243[5] = {0, 1, 2, 4, 3};
int i;
+ bfd_boolean result = TRUE;
if (!elf_known_obj_attributes_proc (obfd)[0].i)
{
_bfd_error_handler
(_("ERROR: %B uses VFP register arguments, %B does not"),
ibfd, obfd);
- return FALSE;
+ result = FALSE;
}
}
{
case Tag_CPU_raw_name:
case Tag_CPU_name:
- /* Use whichever has the greatest architecture requirements. We
- won't necessarily have both the above tags, so make sure input
- name is non-NULL. */
- if (in_attr[Tag_CPU_arch].i > out_attr[Tag_CPU_arch].i
- && in_attr[i].s)
- out_attr[i].s = _bfd_elf_attr_strdup (obfd, in_attr[i].s);
+ /* These are merged after Tag_CPU_arch. */
break;
case Tag_ABI_optimization_goals:
break;
case Tag_CPU_arch:
+ {
+ int secondary_compat = -1, secondary_compat_out = -1;
+ unsigned int saved_out_attr = out_attr[i].i;
+ static const char *name_table[] = {
+ /* These aren't real CPU names, but we can't guess
+ that from the architecture version alone. */
+ "Pre v4",
+ "ARM v4",
+ "ARM v4T",
+ "ARM v5T",
+ "ARM v5TE",
+ "ARM v5TEJ",
+ "ARM v6",
+ "ARM v6KZ",
+ "ARM v6T2",
+ "ARM v6K",
+ "ARM v7",
+ "ARM v6-M",
+ "ARM v6S-M"
+ };
+
+ /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
+ secondary_compat = get_secondary_compatible_arch (ibfd);
+ secondary_compat_out = get_secondary_compatible_arch (obfd);
+ out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
+ &secondary_compat_out,
+ in_attr[i].i,
+ secondary_compat);
+ set_secondary_compatible_arch (obfd, secondary_compat_out);
+
+ /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
+ if (out_attr[i].i == saved_out_attr)
+ ; /* Leave the names alone. */
+ else if (out_attr[i].i == in_attr[i].i)
+ {
+ /* The output architecture has been changed to match the
+ input architecture. Use the input names. */
+ out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
+ ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
+ : NULL;
+ out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
+ ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
+ : NULL;
+ }
+ else
+ {
+ out_attr[Tag_CPU_name].s = NULL;
+ out_attr[Tag_CPU_raw_name].s = NULL;
+ }
+
+ /* If we still don't have a value for Tag_CPU_name,
+ make one up now. Tag_CPU_raw_name remains blank. */
+ if (out_attr[Tag_CPU_name].s == NULL
+ && out_attr[i].i < ARRAY_SIZE (name_table))
+ out_attr[Tag_CPU_name].s =
+ _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
+ }
+ break;
+
case Tag_ARM_ISA_use:
case Tag_THUMB_ISA_use:
case Tag_WMMX_arch:
- case Tag_NEON_arch:
- /* ??? Do NEON and WMMX conflict? */
+ case Tag_Advanced_SIMD_arch:
+ /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
case Tag_ABI_FP_rounding:
- case Tag_ABI_FP_denormal:
case Tag_ABI_FP_exceptions:
case Tag_ABI_FP_user_exceptions:
case Tag_ABI_FP_number_model:
- case Tag_ABI_align8_preserved:
- case Tag_ABI_HardFP_use:
+ case Tag_VFP_HP_extension:
+ case Tag_CPU_unaligned_access:
+ case Tag_T2EE_use:
+ case Tag_Virtualization_use:
+ case Tag_MPextension_use:
/* Use the largest value specified. */
if (in_attr[i].i > out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_CPU_arch_profile:
- /* Warn if conflicting architecture profiles used. */
- if (out_attr[i].i && in_attr[i].i && in_attr[i].i != out_attr[i].i)
+ case Tag_ABI_align8_preserved:
+ case Tag_ABI_PCS_RO_data:
+ /* Use the smallest value specified. */
+ if (in_attr[i].i < out_attr[i].i)
+ out_attr[i].i = in_attr[i].i;
+ break;
+
+ case Tag_ABI_align8_needed:
+ if ((in_attr[i].i > 0 || out_attr[i].i > 0)
+ && (in_attr[Tag_ABI_align8_preserved].i == 0
+ || out_attr[Tag_ABI_align8_preserved].i == 0))
{
+ /* This error message should be enabled once all non-conformant
+ binaries in the toolchain have had the attributes set
+ properly.
_bfd_error_handler
- (_("ERROR: %B: Conflicting architecture profiles %c/%c"),
- ibfd, in_attr[i].i, out_attr[i].i);
- return FALSE;
+ (_("ERROR: %B: 8-byte data alignment conflicts with %B"),
+ obfd, ibfd);
+ result = FALSE; */
}
- if (in_attr[i].i)
+ /* Fall through. */
+ case Tag_ABI_FP_denormal:
+ case Tag_ABI_PCS_GOT_use:
+ /* Use the "greatest" from the sequence 0, 2, 1, or the largest
+ value if greater than 2 (for future-proofing). */
+ if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
+ || (in_attr[i].i <= 2 && out_attr[i].i <= 2
+ && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
out_attr[i].i = in_attr[i].i;
break;
+
+
+ case Tag_CPU_arch_profile:
+ if (out_attr[i].i != in_attr[i].i)
+ {
+ /* 0 will merge with anything.
+ 'A' and 'S' merge to 'A'.
+ 'R' and 'S' merge to 'R'.
+ 'M' and 'A|R|S' is an error. */
+ if (out_attr[i].i == 0
+ || (out_attr[i].i == 'S'
+ && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
+ out_attr[i].i = in_attr[i].i;
+ else if (in_attr[i].i == 0
+ || (in_attr[i].i == 'S'
+ && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
+ ; /* Do nothing. */
+ else
+ {
+ _bfd_error_handler
+ (_("ERROR: %B: Conflicting architecture profiles %c/%c"),
+ ibfd,
+ in_attr[i].i ? in_attr[i].i : '0',
+ out_attr[i].i ? out_attr[i].i : '0');
+ result = FALSE;
+ }
+ }
+ break;
case Tag_VFP_arch:
- if (in_attr[i].i > 4 || out_attr[i].i > 4
- || order_01243[in_attr[i].i] > order_01243[out_attr[i].i])
+ /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
+ largest value if greater than 4 (for future-proofing). */
+ if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
+ || (in_attr[i].i <= 4 && out_attr[i].i <= 4
+ && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
out_attr[i].i = in_attr[i].i;
break;
case Tag_PCS_config:
{
_bfd_error_handler
(_("ERROR: %B: Conflicting use of R9"), ibfd);
- return FALSE;
+ result = FALSE;
}
if (out_attr[i].i == AEABI_R9_unused)
out_attr[i].i = in_attr[i].i;
_bfd_error_handler
(_("ERROR: %B: SB relative addressing conflicts with use of R9"),
ibfd);
- return FALSE;
+ result = FALSE;
}
/* Use the smallest value specified. */
if (in_attr[i].i < out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_ABI_PCS_RO_data:
- /* Use the smallest value specified. */
- if (in_attr[i].i < out_attr[i].i)
- out_attr[i].i = in_attr[i].i;
- break;
- case Tag_ABI_PCS_GOT_use:
- if (in_attr[i].i > 2 || out_attr[i].i > 2
- || order_312[in_attr[i].i] < order_312[out_attr[i].i])
- out_attr[i].i = in_attr[i].i;
- break;
case Tag_ABI_PCS_wchar_t:
if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
&& !elf_arm_tdata (obfd)->no_wchar_size_warning)
else if (in_attr[i].i && !out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_ABI_align8_needed:
- /* ??? Check against Tag_ABI_align8_preserved. */
- if (in_attr[i].i > 2 || out_attr[i].i > 2
- || order_312[in_attr[i].i] < order_312[out_attr[i].i])
- out_attr[i].i = in_attr[i].i;
- break;
case Tag_ABI_enum_size:
if (in_attr[i].i != AEABI_enum_unused)
{
&& out_attr[i].i != in_attr[i].i
&& !elf_arm_tdata (obfd)->no_enum_size_warning)
{
- const char *aeabi_enum_names[] =
+ static const char *aeabi_enum_names[] =
{ "", "variable-size", "32-bit", "" };
+ const char *in_name =
+ in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
+ ? aeabi_enum_names[in_attr[i].i]
+ : "<unknown>";
+ const char *out_name =
+ out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
+ ? aeabi_enum_names[out_attr[i].i]
+ : "<unknown>";
_bfd_error_handler
(_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
- ibfd, aeabi_enum_names[in_attr[i].i],
- aeabi_enum_names[out_attr[i].i]);
+ ibfd, in_name, out_name);
}
}
break;
_bfd_error_handler
(_("ERROR: %B uses iWMMXt register arguments, %B does not"),
ibfd, obfd);
- return FALSE;
+ result = FALSE;
}
break;
-
case Tag_compatibility:
/* Merged in target-independent code. */
break;
+ case Tag_ABI_HardFP_use:
+ /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
+ if ((in_attr[i].i == 1 && out_attr[i].i == 2)
+ || (in_attr[i].i == 2 && out_attr[i].i == 1))
+ out_attr[i].i = 3;
+ else if (in_attr[i].i > out_attr[i].i)
+ out_attr[i].i = in_attr[i].i;
+ break;
+ case Tag_ABI_FP_16bit_format:
+ if (in_attr[i].i != 0 && out_attr[i].i != 0)
+ {
+ if (in_attr[i].i != out_attr[i].i)
+ {
+ _bfd_error_handler
+ (_("ERROR: fp16 format mismatch between %B and %B"),
+ ibfd, obfd);
+ result = FALSE;
+ }
+ }
+ if (in_attr[i].i != 0)
+ out_attr[i].i = in_attr[i].i;
+ break;
- default: /* All known attributes should be explicitly covered. */
- abort ();
- }
+ case Tag_nodefaults:
+ /* This tag is set if it exists, but the value is unused (and is
+ typically zero). We don't actually need to do anything here -
+ the merge happens automatically when the type flags are merged
+ below. */
+ break;
+ case Tag_also_compatible_with:
+ /* Already done in Tag_CPU_arch. */
+ break;
+ case Tag_conformance:
+ /* Keep the attribute if it matches. Throw it away otherwise.
+ No attribute means no claim to conform. */
+ if (!in_attr[i].s || !out_attr[i].s
+ || strcmp (in_attr[i].s, out_attr[i].s) != 0)
+ out_attr[i].s = NULL;
+ break;
- if (in_attr[i].type && !out_attr[i].type)
- switch (in_attr[i].type)
+ default:
{
- case 1:
- if (out_attr[i].i)
- out_attr[i].type = 1;
- break;
+ bfd *err_bfd = NULL;
- case 2:
- if (out_attr[i].s)
- out_attr[i].type = 2;
- break;
+ /* The "known_obj_attributes" table does contain some undefined
+ attributes. Ensure that there are unused. */
+ if (out_attr[i].i != 0 || out_attr[i].s != NULL)
+ err_bfd = obfd;
+ else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
+ err_bfd = ibfd;
- default:
- abort ();
+ if (err_bfd != NULL)
+ {
+ /* Attribute numbers >=64 (mod 128) can be safely ignored. */
+ if ((i & 127) < 64)
+ {
+ _bfd_error_handler
+ (_("%B: Unknown mandatory EABI object attribute %d"),
+ err_bfd, i);
+ bfd_set_error (bfd_error_bad_value);
+ result = FALSE;
+ }
+ else
+ {
+ _bfd_error_handler
+ (_("Warning: %B: Unknown EABI object attribute %d"),
+ err_bfd, i);
+ }
+ }
+
+ /* Only pass on attributes that match in both inputs. */
+ if (in_attr[i].i != out_attr[i].i
+ || in_attr[i].s != out_attr[i].s
+ || (in_attr[i].s != NULL && out_attr[i].s != NULL
+ && strcmp (in_attr[i].s, out_attr[i].s) != 0))
+ {
+ out_attr[i].i = 0;
+ out_attr[i].s = NULL;
+ }
}
+ }
+
+ /* If out_attr was copied from in_attr then it won't have a type yet. */
+ if (in_attr[i].type && !out_attr[i].type)
+ out_attr[i].type = in_attr[i].type;
}
/* Merge Tag_compatibility attributes and any common GNU ones. */
/* Check for any attributes not known on ARM. */
in_list = elf_other_obj_attributes_proc (ibfd);
- out_list = elf_other_obj_attributes_proc (obfd);
+ out_listp = &elf_other_obj_attributes_proc (obfd);
+ out_list = *out_listp;
- for (; in_list != NULL; )
+ for (; in_list || out_list; )
{
- if (out_list == NULL)
- {
- elf32_arm_copy_eabi_other_attribute_list (ibfd, obfd, in_list);
- return TRUE;
- }
+ bfd *err_bfd = NULL;
+ int err_tag = 0;
/* The tags for each list are in numerical order. */
/* If the tags are equal, then merge. */
- if (in_list->tag == out_list->tag)
- {
- switch (in_list->tag)
- {
- case Tag_VFP_HP_extension:
- if (out_list->attr.i == 0)
- out_list->attr.i = in_list->attr.i;
- break;
-
- case Tag_ABI_FP_16bit_format:
- if (in_list->attr.i != 0 && out_list->attr.i != 0)
- {
- if (in_list->attr.i != out_list->attr.i)
- {
- _bfd_error_handler
- (_("ERROR: fp16 format mismatch between %B and %B"),
- ibfd, obfd);
- return FALSE;
- }
- }
- if (in_list->attr.i != 0)
- out_list->attr.i = in_list->attr.i;
- break;
-
- default:
- if ((in_list->tag & 127) < 64)
- {
- _bfd_error_handler
- (_("Warning: %B: Unknown EABI object attribute %d"), ibfd, in_list->tag);
- break;
- }
- }
- }
- else if (in_list->tag < out_list->tag)
+ if (out_list && (!in_list || in_list->tag > out_list->tag))
{
- /* This attribute is in ibfd, but not obfd. Copy to obfd and advance to
- next input attribute. */
- elf32_arm_copy_one_eabi_other_attribute (ibfd, obfd, in_list);
+ /* This attribute only exists in obfd. We can't merge, and we don't
+ know what the tag means, so delete it. */
+ err_bfd = obfd;
+ err_tag = out_list->tag;
+ *out_listp = out_list->next;
+ out_list = *out_listp;
}
- if (in_list->tag <= out_list->tag)
+ else if (in_list && (!out_list || in_list->tag < out_list->tag))
{
+ /* This attribute only exists in ibfd. We can't merge, and we don't
+ know what the tag means, so ignore it. */
+ err_bfd = ibfd;
+ err_tag = in_list->tag;
in_list = in_list->next;
- if (in_list == NULL)
- continue;
}
- while (out_list && out_list->tag < in_list->tag)
- out_list = out_list->next;
+ else /* The tags are equal. */
+ {
+ /* As present, all attributes in the list are unknown, and
+ therefore can't be merged meaningfully. */
+ err_bfd = obfd;
+ err_tag = out_list->tag;
+
+ /* Only pass on attributes that match in both inputs. */
+ if (in_list->attr.i != out_list->attr.i
+ || in_list->attr.s != out_list->attr.s
+ || (in_list->attr.s && out_list->attr.s
+ && strcmp (in_list->attr.s, out_list->attr.s) != 0))
+ {
+ /* No match. Delete the attribute. */
+ *out_listp = out_list->next;
+ out_list = *out_listp;
+ }
+ else
+ {
+ /* Matched. Keep the attribute and move to the next. */
+ out_list = out_list->next;
+ in_list = in_list->next;
+ }
+ }
+
+ if (err_bfd)
+ {
+ /* Attribute numbers >=64 (mod 128) can be safely ignored. */
+ if ((err_tag & 127) < 64)
+ {
+ _bfd_error_handler
+ (_("%B: Unknown mandatory EABI object attribute %d"),
+ err_bfd, err_tag);
+ bfd_set_error (bfd_error_bad_value);
+ result = FALSE;
+ }
+ else
+ {
+ _bfd_error_handler
+ (_("Warning: %B: Unknown EABI object attribute %d"),
+ err_bfd, err_tag);
+ }
+ }
}
- return TRUE;
+ return result;
}
flagword flags;
flags = bfd_get_section_flags (dynobj, sreloc);
- flags &= ~(SEC_LOAD | SEC_ALLOC);
+ flags |= (SEC_LOAD | SEC_ALLOC);
bfd_set_section_flags (dynobj, sreloc, flags);
}
}
bfd_vma addr;
char *stub_name;
output_arch_syminfo *osi;
+ const insn_sequence *template;
+ enum stub_insn_type prev_type;
+ int size;
+ int i;
+ enum map_symbol_type sym_type;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
addr = (bfd_vma) stub_entry->stub_offset;
stub_name = stub_entry->output_name;
- switch (stub_entry->stub_type)
+ template = stub_entry->stub_template;
+ switch (template[0].type)
{
- case arm_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 8))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
+ case ARM_TYPE:
+ if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
return FALSE;
break;
- case arm_thumb_v4t_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 12))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
- return FALSE;
- break;
- case arm_thumb_thumb_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 16))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
- return FALSE;
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 20))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 8))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
- return FALSE;
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 8))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 4))
- return FALSE;
- break;
- case arm_stub_pic_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 12))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
+ case THUMB16_TYPE:
+ if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
+ stub_entry->stub_size))
return FALSE;
break;
default:
BFD_FAIL ();
+ return FALSE;
+ }
+
+ prev_type = DATA_TYPE;
+ size = 0;
+ for (i = 0; i < stub_entry->stub_template_size; i++)
+ {
+ switch (template[i].type)
+ {
+ case ARM_TYPE:
+ sym_type = ARM_MAP_ARM;
+ break;
+
+ case THUMB16_TYPE:
+ sym_type = ARM_MAP_THUMB;
+ break;
+
+ case DATA_TYPE:
+ sym_type = ARM_MAP_DATA;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
+
+ if (template[i].type != prev_type)
+ {
+ prev_type = template[i].type;
+ if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
+ return FALSE;
+ }
+
+ switch (template[i].type)
+ {
+ case ARM_TYPE:
+ size += 4;
+ break;
+
+ case THUMB16_TYPE:
+ size += 2;
+ break;
+
+ case DATA_TYPE:
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
}
return TRUE;
#define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
#undef elf_backend_obj_attrs_section_type
#define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
+#define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
#include "elf32-target.h"