/* 32-bit ELF support for ARM
- Copyright (C) 1998-2016 Free Software Foundation, Inc.
+ Copyright (C) 1998-2017 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
as per ARMv8-M Security Extensions. */
int cmse_implib;
+ /* The import library whose symbols' address must remain stable in
+ the import library generated. */
+ bfd *in_implib_bfd;
+
/* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
bfd_vma next_tls_desc_index;
/* How many R_ARM_TLS_DESC relocations were generated so far. */
bfd_vma num_tls_desc;
- /* Short-cuts to get to dynamic linker sections. */
- asection *sdynbss;
- asection *srelbss;
-
/* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
asection *srelplt2;
/* Input stub section holding secure gateway veneers. */
asection *cmse_stub_sec;
+ /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
+ start to be allocated. */
+ bfd_vma new_cmse_stub_offset;
+
/* Number of elements in stub_group. */
unsigned int top_id;
}
static inline int
-popcount (unsigned int mask)
+elf32_arm_popcount (unsigned int mask)
{
#if GCC_VERSION >= 3004
return __builtin_popcount (mask);
#else
- unsigned int i, sum = 0;
+ unsigned int i;
+ int sum = 0;
for (i = 0; i < 8 * sizeof (mask); i++)
{
/* Initialize the local fields. */
eh = (struct elf32_arm_stub_hash_entry *) entry;
eh->stub_sec = NULL;
- eh->stub_offset = 0;
+ eh->stub_offset = (bfd_vma) -1;
eh->source_value = 0;
eh->target_value = 0;
eh->target_section = NULL;
eh->stub_type = arm_stub_none;
eh->stub_size = 0;
eh->stub_template = NULL;
- eh->stub_template_size = 0;
+ eh->stub_template_size = -1;
eh->h = NULL;
eh->id_sec = NULL;
eh->output_name = NULL;
if (!_bfd_elf_create_dynamic_sections (dynobj, info))
return FALSE;
- htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
- if (!bfd_link_pic (info))
- htab->srelbss = bfd_get_linker_section (dynobj,
- RELOC_SECTION (htab, ".bss"));
-
if (htab->vxworks_p)
{
if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
if (!htab->root.splt
|| !htab->root.srelplt
- || !htab->sdynbss
- || (!bfd_link_pic (info) && !htab->srelbss))
+ || !htab->root.sdynbss
+ || (!bfd_link_pic (info) && !htab->root.srelbss))
abort ();
return TRUE;
/* Note when dealing with PLT entries: the main PLT stub is in
ARM mode, so if the branch is in Thumb mode, another
Thumb->ARM stub will be inserted later just before the ARM
- PLT stub. We don't take this extra distance into account
- here, because if a long branch stub is needed, we'll add a
- Thumb->Arm one and branch directly to the ARM PLT entry
- because it avoids spreading offset corrections in several
- places. */
+ PLT stub. If a long branch stub is needed, we'll add a
+ Thumb->Arm one and branch directly to the ARM PLT entry.
+ Here, we have to check if a pre-PLT Thumb->ARM stub
+ is needed and if it will be close enough. */
destination = (splt->output_section->vma
+ splt->output_offset
+ root_plt->offset);
st_type = STT_FUNC;
- branch_type = ST_BRANCH_TO_ARM;
+
+ /* Thumb branch/call to PLT: it can become a branch to ARM
+ or to Thumb. We must perform the same checks and
+ corrections as in elf32_arm_final_link_relocate. */
+ if ((r_type == R_ARM_THM_CALL)
+ || (r_type == R_ARM_THM_JUMP24))
+ {
+ if (globals->use_blx
+ && r_type == R_ARM_THM_CALL
+ && !thumb_only)
+ {
+ /* If the Thumb BLX instruction is available, convert
+ the BL to a BLX instruction to call the ARM-mode
+ PLT entry. */
+ branch_type = ST_BRANCH_TO_ARM;
+ }
+ else
+ {
+ if (!thumb_only)
+ /* Target the Thumb stub before the ARM PLT entry. */
+ destination -= PLT_THUMB_STUB_SIZE;
+ branch_type = ST_BRANCH_TO_THUMB;
+ }
+ }
+ else
+ {
+ branch_type = ST_BRANCH_TO_ARM;
+ }
}
}
/* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
- it's a Thumb->Arm call and blx is not available, or it's a
Thumb->Arm branch (not bl). A stub is needed in this case,
but only if this call is not through a PLT entry. Indeed,
- PLT stubs handle mode switching already.
- */
+ PLT stubs handle mode switching already. */
if ((!thumb2_bl
&& (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
|| (r_type == R_ARM_THM_JUMP19))
&& !use_plt))
{
+ /* If we need to insert a Thumb-Thumb long branch stub to a
+ PLT, use one that branches directly to the ARM PLT
+ stub. If we pretended we'd use the pre-PLT Thumb->ARM
+ stub, undo this now. */
+ if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
+ {
+ branch_type = ST_BRANCH_TO_ARM;
+ branch_offset += PLT_THUMB_STUB_SIZE;
+ }
+
if (branch_type == ST_BRANCH_TO_THUMB)
{
/* Thumb to thumb. */
if (!thumb_only)
{
if (input_sec->flags & SEC_ELF_PURECODE)
- (*_bfd_error_handler) (_("%B(%s): warning: long branch "
- " veneers used in section with "
- "SHF_ARM_PURECODE section "
- "attribute is only supported"
- " for M-profile targets that "
- "implement the movw "
- "instruction."));
+ _bfd_error_handler (_("\
+%B(%A): warning: long branch veneers used in section with SHF_ARM_PURECODE section \
+attribute is only supported for M-profile targets that implement the movw instruction."),
+ input_sec);
stub_type = (bfd_link_pic (info) | globals->pic_veneer)
/* PIC stubs. */
else
{
if (input_sec->flags & SEC_ELF_PURECODE)
- (*_bfd_error_handler) (_("%B(%s): warning: long branch "
- " veneers used in section with "
- "SHF_ARM_PURECODE section "
- "attribute is only supported"
- " for M-profile targets that "
- "implement the movw "
- "instruction."));
+ _bfd_error_handler (_("\
+%B(%A): warning: long branch veneers used in section with SHF_ARM_PURECODE section \
+attribute is only supported for M-profile targets that implement the movw instruction."),
+ input_sec);
stub_type = (bfd_link_pic (info) | globals->pic_veneer)
/* PIC stub. */
else
{
if (input_sec->flags & SEC_ELF_PURECODE)
- (*_bfd_error_handler) (_("%B(%s): warning: long branch "
- " veneers used in section with "
- "SHF_ARM_PURECODE section "
- "attribute is only supported"
- " for M-profile targets that "
- "implement the movw "
- "instruction."));
+ _bfd_error_handler (_("%B(%s): warning: long branch "
+ " veneers used in section with "
+ "SHF_ARM_PURECODE section "
+ "attribute is only supported"
+ " for M-profile targets that "
+ "implement the movw "
+ "instruction."));
/* Thumb to arm. */
if (sym_sec != NULL
&& sym_sec->owner != NULL
&& !INTERWORK_FLAG (sym_sec->owner))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B(%s): warning: interworking not enabled.\n"
" first occurrence: %B: Thumb call to ARM"),
sym_sec->owner, input_bfd, name);
|| r_type == R_ARM_TLS_CALL)
{
if (input_sec->flags & SEC_ELF_PURECODE)
- (*_bfd_error_handler) (_("%B(%s): warning: long branch "
- " veneers used in section with "
- "SHF_ARM_PURECODE section "
- "attribute is only supported"
- " for M-profile targets that "
- "implement the movw "
- "instruction."));
+ _bfd_error_handler (_("%B(%s): warning: long branch "
+ " veneers used in section with "
+ "SHF_ARM_PURECODE section "
+ "attribute is only supported"
+ " for M-profile targets that "
+ "implement the movw "
+ "instruction."));
if (branch_type == ST_BRANCH_TO_THUMB)
{
/* Arm to thumb. */
&& sym_sec->owner != NULL
&& !INTERWORK_FLAG (sym_sec->owner))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B(%s): warning: interworking not enabled.\n"
" first occurrence: %B: ARM call to Thumb"),
sym_sec->owner, input_bfd, name);
Stub names need to include a section id, as there may well be
more than one stub used to reach say, printf, and we need to
distinguish between them. */
+ BFD_ASSERT (input_section->id <= htab->top_id);
id_sec = htab->stub_group[input_section->id].link_sec;
if (h != NULL && h->stub_cache != NULL
out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
if (out_sec == NULL)
{
- (*_bfd_error_handler) (_("No address assigned to the veneers output "
- "section %s"), out_sec_name);
+ _bfd_error_handler (_("No address assigned to the veneers output "
+ "section %s"), out_sec_name);
return NULL;
}
}
else
{
+ BFD_ASSERT (section->id <= htab->top_id);
link_sec = htab->stub_group[section->id].link_sec;
BFD_ASSERT (link_sec != NULL);
stub_sec_p = &htab->stub_group[section->id].stub_sec;
{
if (section == NULL)
section = stub_sec;
- (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
- section->owner,
- stub_name);
+ _bfd_error_handler (_("%s: cannot create stub entry %s"),
+ section->owner, stub_name);
return NULL;
}
stub_entry->stub_sec = stub_sec;
- stub_entry->stub_offset = 0;
+ stub_entry->stub_offset = (bfd_vma) -1;
stub_entry->id_sec = link_sec;
return stub_entry;
abort (); /* Should be unreachable. */
}
+/* If veneers of type STUB_TYPE should go in a dedicated output section,
+ returns the address of the hash table field in HTAB holding the offset at
+ which new veneers should be layed out in the stub section. */
+
+static bfd_vma*
+arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
+ enum elf32_arm_stub_type stub_type)
+{
+ switch (stub_type)
+ {
+ case arm_stub_cmse_branch_thumb_only:
+ return &htab->new_cmse_stub_offset;
+
+ default:
+ BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+ return NULL;
+ }
+}
+
static bfd_boolean
arm_build_one_stub (struct bfd_hash_entry *gen_entry,
void * in_arg)
{
#define MAXRELOCS 3
+ bfd_boolean removed_sg_veneer;
struct elf32_arm_stub_hash_entry *stub_entry;
struct elf32_arm_link_hash_table *globals;
struct bfd_link_info *info;
int stub_reloc_idx[MAXRELOCS] = {-1, -1};
int stub_reloc_offset[MAXRELOCS] = {0, 0};
int nrelocs = 0;
+ int just_allocated = 0;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
/* We have to do less-strictly-aligned fixes last. */
return TRUE;
- /* Make a note of the offset within the stubs for this entry. */
- stub_entry->stub_offset = stub_sec->size;
+ /* Assign a slot at the end of section if none assigned yet. */
+ if (stub_entry->stub_offset == (bfd_vma) -1)
+ {
+ stub_entry->stub_offset = stub_sec->size;
+ just_allocated = 1;
+ }
loc = stub_sec->contents + stub_entry->stub_offset;
stub_bfd = stub_sec->owner;
}
}
- stub_sec->size += size;
+ if (just_allocated)
+ stub_sec->size += size;
/* Stub size has already been computed in arm_size_one_stub. Check
consistency. */
if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
sym_value |= 1;
- /* Assume there is at least one and at most MAXRELOCS entries to relocate
- in each stub. */
- BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
+ /* Assume non empty slots have at least one and at most MAXRELOCS entries
+ to relocate in each stub. */
+ removed_sg_veneer =
+ (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
+ BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
for (i = 0; i < nrelocs; i++)
{
size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
&template_size);
- stub_entry->stub_size = size;
- stub_entry->stub_template = template_sequence;
- stub_entry->stub_template_size = template_size;
+ /* Initialized to -1. Null size indicates an empty slot full of zeros. */
+ if (stub_entry->stub_template_size)
+ {
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template_sequence;
+ stub_entry->stub_template_size = template_size;
+ }
+
+ /* Already accounted for. */
+ if (stub_entry->stub_offset != (bfd_vma) -1)
+ return TRUE;
size = (size + 7) & ~7;
stub_entry->stub_sec->size += size;
and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
TRUE and the stub entry is initialized.
- Returns whether the stub could be successfully created or updated, or FALSE
- if an error occured. */
+ Returns the stub that was created or updated, or NULL if an error
+ occurred. */
-static bfd_boolean
+static struct elf32_arm_stub_hash_entry *
elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
enum elf32_arm_stub_type stub_type, asection *section,
Elf_Internal_Rela *irela, asection *sym_sec,
{
BFD_ASSERT (irela);
BFD_ASSERT (section);
+ BFD_ASSERT (section->id <= htab->top_id);
/* Support for grouping stub sections. */
id_sec = htab->stub_group[section->id].link_sec;
stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
stub_type);
if (!stub_name)
- return FALSE;
+ return NULL;
}
stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
if (!sym_claimed)
free (stub_name);
stub_entry->target_value = sym_value;
- return TRUE;
+ return stub_entry;
}
stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
{
if (!sym_claimed)
free (stub_name);
- return FALSE;
+ return NULL;
}
stub_entry->target_value = sym_value;
if (stub_entry->output_name == NULL)
{
free (stub_name);
- return FALSE;
+ return NULL;
}
/* For historical reasons, use the existing names for ARM-to-Thumb and
}
*new_stub = TRUE;
- return TRUE;
+ return stub_entry;
}
/* Scan symbols in INPUT_BFD to identify secure entry functions needing a
OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
entry mapping while HTAB gives the name to hash entry mapping.
+ *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
+ created.
- If any secure gateway veneer is created, *STUB_CHANGED is set to TRUE. The
- return value gives whether a stub failed to be allocated. */
+ The return value gives whether a stub failed to be allocated. */
static bfd_boolean
cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
- bfd_boolean *stub_changed)
+ int *cmse_stub_created)
{
const struct elf_backend_data *bed;
Elf_Internal_Shdr *symtab_hdr;
char *sym_name, *lsym_name;
bfd_vma sym_value;
asection *section;
- bfd_boolean is_v8m, new_stub, created_stub, cmse_invalid, ret = TRUE;
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
bed = get_elf_backend_data (input_bfd);
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
if (!is_v8m)
{
- (*_bfd_error_handler) (_("%B: Special symbol `%s' only allowed for "
- "ARMv8-M architecture or later."),
- input_bfd, sym_name);
+ _bfd_error_handler (_("%B: Special symbol `%s' only allowed for "
+ "ARMv8-M architecture or later."),
+ input_bfd, sym_name);
is_v8m = TRUE; /* Avoid multiple warning. */
ret = FALSE;
}
if (cmse_invalid)
{
- (*_bfd_error_handler) (_("%B: invalid special symbol `%s'."),
- input_bfd, sym_name);
- (*_bfd_error_handler) (_("It must be a global or weak function "
- "symbol."));
+ _bfd_error_handler (_("%B: invalid special symbol `%s'."),
+ input_bfd, sym_name);
+ _bfd_error_handler (_("It must be a global or weak function "
+ "symbol."));
ret = FALSE;
if (i < ext_start)
continue;
if (hash || j < ext_start)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: invalid standard symbol `%s'."), input_bfd, sym_name);
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("It must be a global or weak function symbol."));
}
else
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: absent standard symbol `%s'."), input_bfd, sym_name);
ret = FALSE;
if (!hash)
if (cmse_hash->root.root.u.def.section != section)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: `%s' and its special symbol are in different sections."),
input_bfd, sym_name);
ret = FALSE;
don't create any stubs. */
if (section->output_section == NULL)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: entry function `%s' not output."), input_bfd, sym_name);
continue;
}
if (hash->root.size == 0)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: entry function `%s' is empty."), input_bfd, sym_name);
ret = FALSE;
}
if (!ret)
continue;
branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
- created_stub
+ stub_entry
= elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
NULL, NULL, section, hash, sym_name,
sym_value, branch_type, &new_stub);
- if (!created_stub)
+ if (stub_entry == NULL)
ret = FALSE;
else
{
BFD_ASSERT (new_stub);
- *stub_changed = TRUE;
+ (*cmse_stub_created)++;
}
}
return ret;
}
+/* Return TRUE iff a symbol identified by its linker HASH entry is a secure
+ code entry function, ie can be called from non secure code without using a
+ veneer. */
+
+static bfd_boolean
+cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
+{
+ bfd_byte contents[4];
+ uint32_t first_insn;
+ asection *section;
+ file_ptr offset;
+ bfd *abfd;
+
+ /* Defined symbol of function type. */
+ if (hash->root.root.type != bfd_link_hash_defined
+ && hash->root.root.type != bfd_link_hash_defweak)
+ return FALSE;
+ if (hash->root.type != STT_FUNC)
+ return FALSE;
+
+ /* Read first instruction. */
+ section = hash->root.root.u.def.section;
+ abfd = section->owner;
+ offset = hash->root.root.u.def.value - section->vma;
+ if (!bfd_get_section_contents (abfd, section, contents, offset,
+ sizeof (contents)))
+ return FALSE;
+
+ first_insn = bfd_get_32 (abfd, contents);
+
+ /* Starts by SG instruction. */
+ return first_insn == 0xe97fe97f;
+}
+
+/* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
+ secure gateway veneers (ie. the veneers was not in the input import library)
+ and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
+
+static bfd_boolean
+arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
+{
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ struct bfd_link_info *info;
+
+ /* Massage our args to the form they really have. */
+ stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+ info = (struct bfd_link_info *) gen_info;
+
+ if (info->out_implib_bfd)
+ return TRUE;
+
+ if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
+ return TRUE;
+
+ if (stub_entry->stub_offset == (bfd_vma) -1)
+ _bfd_error_handler (" %s", stub_entry->output_name);
+
+ return TRUE;
+}
+
+/* Set offset of each secure gateway veneers so that its address remain
+ identical to the one in the input import library referred by
+ HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
+ (present in input import library but absent from the executable being
+ linked) or if new veneers appeared and there is no output import library
+ (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
+ number of secure gateway veneers found in the input import library.
+
+ The function returns whether an error occurred. If no error occurred,
+ *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
+ and this function and HTAB->new_cmse_stub_offset is set to the biggest
+ veneer observed set for new veneers to be layed out after. */
+
+static bfd_boolean
+set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
+ struct elf32_arm_link_hash_table *htab,
+ int *cmse_stub_created)
+{
+ long symsize;
+ char *sym_name;
+ flagword flags;
+ long i, symcount;
+ bfd *in_implib_bfd;
+ asection *stub_out_sec;
+ bfd_boolean ret = TRUE;
+ Elf_Internal_Sym *intsym;
+ const char *out_sec_name;
+ bfd_size_type cmse_stub_size;
+ asymbol **sympp = NULL, *sym;
+ struct elf32_arm_link_hash_entry *hash;
+ const insn_sequence *cmse_stub_template;
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
+ bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
+ bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
+
+ /* No input secure gateway import library. */
+ if (!htab->in_implib_bfd)
+ return TRUE;
+
+ in_implib_bfd = htab->in_implib_bfd;
+ if (!htab->cmse_implib)
+ {
+ _bfd_error_handler (_("%B: --in-implib only supported for Secure "
+ "Gateway import libraries."), in_implib_bfd);
+ return FALSE;
+ }
+
+ /* Get symbol table size. */
+ symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
+ if (symsize < 0)
+ return FALSE;
+
+ /* Read in the input secure gateway import library's symbol table. */
+ sympp = (asymbol **) xmalloc (symsize);
+ symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
+ if (symcount < 0)
+ {
+ ret = FALSE;
+ goto free_sym_buf;
+ }
+
+ htab->new_cmse_stub_offset = 0;
+ cmse_stub_size =
+ find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
+ &cmse_stub_template,
+ &cmse_stub_template_size);
+ out_sec_name =
+ arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
+ stub_out_sec =
+ bfd_get_section_by_name (htab->obfd, out_sec_name);
+ if (stub_out_sec != NULL)
+ cmse_stub_sec_vma = stub_out_sec->vma;
+
+ /* Set addresses of veneers mentionned in input secure gateway import
+ library's symbol table. */
+ for (i = 0; i < symcount; i++)
+ {
+ sym = sympp[i];
+ flags = sym->flags;
+ sym_name = (char *) bfd_asymbol_name (sym);
+ intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
+
+ if (sym->section != bfd_abs_section_ptr
+ || !(flags & (BSF_GLOBAL | BSF_WEAK))
+ || (flags & BSF_FUNCTION) != BSF_FUNCTION
+ || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
+ != ST_BRANCH_TO_THUMB))
+ {
+ _bfd_error_handler (_("%B: invalid import library entry: `%s'."),
+ in_implib_bfd, sym_name);
+ _bfd_error_handler (_("Symbol should be absolute, global and "
+ "refer to Thumb functions."));
+ ret = FALSE;
+ continue;
+ }
+
+ veneer_value = bfd_asymbol_value (sym);
+ stub_offset = veneer_value - cmse_stub_sec_vma;
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
+ FALSE, FALSE);
+ hash = (struct elf32_arm_link_hash_entry *)
+ elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
+
+ /* Stub entry should have been created by cmse_scan or the symbol be of
+ a secure function callable from non secure code. */
+ if (!stub_entry && !hash)
+ {
+ bfd_boolean new_stub;
+
+ _bfd_error_handler
+ (_("Entry function `%s' disappeared from secure code."), sym_name);
+ hash = (struct elf32_arm_link_hash_entry *)
+ elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
+ stub_entry
+ = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
+ NULL, NULL, bfd_abs_section_ptr, hash,
+ sym_name, veneer_value,
+ ST_BRANCH_TO_THUMB, &new_stub);
+ if (stub_entry == NULL)
+ ret = FALSE;
+ else
+ {
+ BFD_ASSERT (new_stub);
+ new_cmse_stubs_created++;
+ (*cmse_stub_created)++;
+ }
+ stub_entry->stub_template_size = stub_entry->stub_size = 0;
+ stub_entry->stub_offset = stub_offset;
+ }
+ /* Symbol found is not callable from non secure code. */
+ else if (!stub_entry)
+ {
+ if (!cmse_entry_fct_p (hash))
+ {
+ _bfd_error_handler (_("`%s' refers to a non entry function."),
+ sym_name);
+ ret = FALSE;
+ }
+ continue;
+ }
+ else
+ {
+ /* Only stubs for SG veneers should have been created. */
+ BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
+
+ /* Check visibility hasn't changed. */
+ if (!!(flags & BSF_GLOBAL)
+ != (hash->root.root.type == bfd_link_hash_defined))
+ _bfd_error_handler
+ (_("%B: visibility of symbol `%s' has changed."), in_implib_bfd,
+ sym_name);
+
+ stub_entry->stub_offset = stub_offset;
+ }
+
+ /* Size should match that of a SG veneer. */
+ if (intsym->st_size != cmse_stub_size)
+ {
+ _bfd_error_handler (_("%B: incorrect size for symbol `%s'."),
+ in_implib_bfd, sym_name);
+ ret = FALSE;
+ }
+
+ /* Previous veneer address is before current SG veneer section. */
+ if (veneer_value < cmse_stub_sec_vma)
+ {
+ /* Avoid offset underflow. */
+ if (stub_entry)
+ stub_entry->stub_offset = 0;
+ stub_offset = 0;
+ ret = FALSE;
+ }
+
+ /* Complain if stub offset not a multiple of stub size. */
+ if (stub_offset % cmse_stub_size)
+ {
+ _bfd_error_handler
+ (_("Offset of veneer for entry function `%s' not a multiple of "
+ "its size."), sym_name);
+ ret = FALSE;
+ }
+
+ if (!ret)
+ continue;
+
+ new_cmse_stubs_created--;
+ if (veneer_value < cmse_stub_array_start)
+ cmse_stub_array_start = veneer_value;
+ next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
+ if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
+ htab->new_cmse_stub_offset = next_cmse_stub_offset;
+ }
+
+ if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
+ {
+ BFD_ASSERT (new_cmse_stubs_created > 0);
+ _bfd_error_handler
+ (_("new entry function(s) introduced but no output import library "
+ "specified:"));
+ bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
+ }
+
+ if (cmse_stub_array_start != cmse_stub_sec_vma)
+ {
+ _bfd_error_handler
+ (_("Start address of `%s' is different from previous link."),
+ out_sec_name);
+ ret = FALSE;
+ }
+
+free_sym_buf:
+ free (sympp);
+ return ret;
+}
+
/* Determine and set the size of the stub section for a final link.
The basic idea here is to examine all the relocations looking for
unsigned int),
void (*layout_sections_again) (void))
{
+ bfd_boolean ret = TRUE;
obj_attribute *out_attr;
+ int cmse_stub_created = 0;
bfd_size_type stub_group_size;
bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
out_attr = elf_known_obj_attributes_proc (output_bfd);
m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
+
/* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
as the first half of a 32-bit branch straddling two 4K pages. This is a
crude way of enforcing that. */
sym_hashes = elf_sym_hashes (input_bfd);
if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
- &stub_changed))
+ &cmse_stub_created))
goto error_ret_free_local;
+
+ if (cmse_stub_created != 0)
+ stub_changed = TRUE;
}
/* Walk over each section attached to the input bfd. */
do
{
bfd_boolean new_stub;
+ struct elf32_arm_stub_hash_entry *stub_entry;
/* Determine what (if any) linker stub is needed. */
stub_type = arm_type_of_stub (info, section, irela,
/* We've either created a stub for this reloc already,
or we are about to. */
- created_stub =
+ stub_entry =
elf32_arm_create_stub (htab, stub_type, section, irela,
sym_sec, hash,
(char *) sym_name, sym_value,
branch_type, &new_stub);
+ created_stub = stub_entry != NULL;
if (!created_stub)
goto error_ret_free_internal;
else if (!new_stub)
}
}
+ if (first_veneer_scan
+ && !set_cmse_veneer_addr_from_implib (info, htab,
+ &cmse_stub_created))
+ ret = FALSE;
+
if (prev_num_a8_fixes != num_a8_fixes)
stub_changed = TRUE;
stub_sec->size = 0;
}
+ /* Add new SG veneers after those already in the input import
+ library. */
+ for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
+ stub_type++)
+ {
+ bfd_vma *start_offset_p;
+ asection **stub_sec_p;
+
+ start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
+ stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+ if (start_offset_p == NULL)
+ continue;
+
+ BFD_ASSERT (stub_sec_p != NULL);
+ if (*stub_sec_p != NULL)
+ (*stub_sec_p)->size = *start_offset_p;
+ }
+
/* Compute stub section size, considering padding. */
bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
TRUE, FALSE);
if (stub_entry == NULL)
{
- (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
- section->owner,
- stub_name);
+ _bfd_error_handler (_("%s: cannot create stub entry %s"),
+ section->owner, stub_name);
return FALSE;
}
stub_entry->stub_sec = stub_sec;
- stub_entry->stub_offset = 0;
+ stub_entry->stub_offset = (bfd_vma) -1;
stub_entry->id_sec = link_sec;
stub_entry->stub_type = a8_fixes[i].stub_type;
stub_entry->source_value = a8_fixes[i].offset;
htab->a8_erratum_fixes = NULL;
htab->num_a8_erratum_fixes = 0;
}
- return TRUE;
+ return ret;
}
/* Build all the stubs associated with the current output file. The
{
asection *stub_sec;
struct bfd_hash_table *table;
+ enum elf32_arm_stub_type stub_type;
struct elf32_arm_link_hash_table *htab;
htab = elf32_arm_hash_table (info);
continue;
/* Allocate memory to hold the linker stubs. Zeroing the stub sections
- must at least be done for stub section requiring padding. */
+ must at least be done for stub section requiring padding and for SG
+ veneers to ensure that a non secure code branching to a removed SG
+ veneer causes an error. */
size = stub_sec->size;
stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
if (stub_sec->contents == NULL && size != 0)
return FALSE;
+
stub_sec->size = 0;
}
+ /* Add new SG veneers after those already in the input import library. */
+ for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
+ {
+ bfd_vma *start_offset_p;
+ asection **stub_sec_p;
+
+ start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
+ stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+ if (start_offset_p == NULL)
+ continue;
+
+ BFD_ASSERT (stub_sec_p != NULL);
+ if (*stub_sec_p != NULL)
+ (*stub_sec_p)->size = *start_offset_p;
+ }
+
/* Build the stubs as directed by the stub hash table. */
table = &htab->stub_hash_table;
bfd_hash_traverse (table, arm_build_one_stub, info);
default:
/* Give a warning, but do as the user requests anyway. */
- (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
+ _bfd_error_handler (_("%B: warning: selected VFP11 erratum "
"workaround is not necessary for target architecture"), obfd);
}
}
{
if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
/* Give a warning, but do as the user requests anyway. */
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: warning: selected STM32L4XX erratum "
"workaround is not necessary for target architecture"), obfd);
}
(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
if (myh == NULL)
- (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
- "`%s'"), abfd, tmp_name);
+ _bfd_error_handler (_("%B: unable to find VFP11 veneer "
+ "`%s'"), abfd, tmp_name);
vma = myh->root.u.def.section->output_section->vma
+ myh->root.u.def.section->output_offset
(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
if (myh == NULL)
- (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
- "`%s'"), abfd, tmp_name);
+ _bfd_error_handler (_("%B: unable to find VFP11 veneer "
+ "`%s'"), abfd, tmp_name);
vma = myh->root.u.def.section->output_section->vma
+ myh->root.u.def.section->output_offset
(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
if (myh == NULL)
- (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
- "`%s'"), abfd, tmp_name);
+ _bfd_error_handler (_("%B: unable to find STM32L4XX veneer "
+ "`%s'"), abfd, tmp_name);
vma = myh->root.u.def.section->output_section->vma
+ myh->root.u.def.section->output_offset
(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
if (myh == NULL)
- (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
- "`%s'"), abfd, tmp_name);
+ _bfd_error_handler (_("%B: unable to find STM32L4XX veneer "
+ "`%s'"), abfd, tmp_name);
vma = myh->root.u.def.section->output_section->vma
+ myh->root.u.def.section->output_offset
/* The field encoding the register list is the same for both LDMIA
and LDMDB encodings. */
if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
- nb_words = popcount (insn & 0x0000ffff);
+ nb_words = elf32_arm_popcount (insn & 0x0000ffff);
else if (is_thumb2_vldm (insn))
nb_words = (insn & 0xff);
if the instruction is not the last instruction of
an IT block, we cannot create a jump there, so we
bail out. */
- if ((is_ldm || is_vldm) &&
- stm32l4xx_need_create_replacing_stub
+ if ((is_ldm || is_vldm)
+ && stm32l4xx_need_create_replacing_stub
(insn, globals->stm32l4xx_fix))
{
if (is_not_last_in_it_block)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
/* Note - overlong line used here to allow for translation. */
+ /* xgettext:c-format */
(_("\
%B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
"Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
- abfd, sec, (long)i);
+ abfd, sec, (long) i);
}
else
{
There can be no nested IT blocks so an IT block
is naturally a new one for which it is worth
computing its size. */
- bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
- ((insn & 0x000f) != 0x0000);
+ bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
+ && ((insn & 0x000f) != 0x0000);
/* If we have a new IT block we compute its size. */
if (is_newitblock)
{
/* Set target relocation values needed during linking. */
void
-bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
+bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
struct bfd_link_info *link_info,
- int target1_is_rel,
- char * target2_type,
- int fix_v4bx,
- int use_blx,
- bfd_arm_vfp11_fix vfp11_fix,
- bfd_arm_stm32l4xx_fix stm32l4xx_fix,
- int no_enum_warn, int no_wchar_warn,
- int pic_veneer, int fix_cortex_a8,
- int fix_arm1176, int cmse_implib)
+ struct elf32_arm_params *params)
{
struct elf32_arm_link_hash_table *globals;
if (globals == NULL)
return;
- globals->target1_is_rel = target1_is_rel;
- if (strcmp (target2_type, "rel") == 0)
+ globals->target1_is_rel = params->target1_is_rel;
+ if (strcmp (params->target2_type, "rel") == 0)
globals->target2_reloc = R_ARM_REL32;
- else if (strcmp (target2_type, "abs") == 0)
+ else if (strcmp (params->target2_type, "abs") == 0)
globals->target2_reloc = R_ARM_ABS32;
- else if (strcmp (target2_type, "got-rel") == 0)
+ else if (strcmp (params->target2_type, "got-rel") == 0)
globals->target2_reloc = R_ARM_GOT_PREL;
else
{
_bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
- target2_type);
+ params->target2_type);
}
- globals->fix_v4bx = fix_v4bx;
- globals->use_blx |= use_blx;
- globals->vfp11_fix = vfp11_fix;
- globals->stm32l4xx_fix = stm32l4xx_fix;
- globals->pic_veneer = pic_veneer;
- globals->fix_cortex_a8 = fix_cortex_a8;
- globals->fix_arm1176 = fix_arm1176;
- globals->cmse_implib = cmse_implib;
+ globals->fix_v4bx = params->fix_v4bx;
+ globals->use_blx |= params->use_blx;
+ globals->vfp11_fix = params->vfp11_denorm_fix;
+ globals->stm32l4xx_fix = params->stm32l4xx_fix;
+ globals->pic_veneer = params->pic_veneer;
+ globals->fix_cortex_a8 = params->fix_cortex_a8;
+ globals->fix_arm1176 = params->fix_arm1176;
+ globals->cmse_implib = params->cmse_implib;
+ globals->in_implib_bfd = params->in_implib_bfd;
BFD_ASSERT (is_arm_elf (output_bfd));
- elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
- elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
+ elf_arm_tdata (output_bfd)->no_enum_size_warning
+ = params->no_enum_size_warning;
+ elf_arm_tdata (output_bfd)->no_wchar_size_warning
+ = params->no_wchar_size_warning;
}
/* Replace the target offset of a Thumb bl or b.w instruction. */
&& sym_sec->owner != NULL
&& !INTERWORK_FLAG (sym_sec->owner))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B(%s): warning: interworking not enabled.\n"
" first occurrence: %B: Thumb call to ARM"),
sym_sec->owner, input_bfd, name);
&& sym_sec->owner != NULL
&& !INTERWORK_FLAG (sym_sec->owner))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B(%s): warning: interworking not enabled.\n"
" first occurrence: %B: arm call to thumb"),
sym_sec->owner, input_bfd, name);
error generation. */
insn = (insn << 16)
| bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+0x%lx): unexpected Thumb instruction '0x%x' in TLS trampoline"),
input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
return bfd_reloc_notsupported;
}
}
else
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+0x%lx): unexpected ARM instruction '0x%x' in TLS trampoline"),
input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
return bfd_reloc_notsupported;
}
case R_ARM_ABS12:
if (!globals->vxworks_p)
return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
+ /* Fall through. */
case R_ARM_PC24:
case R_ARM_ABS32:
if (bfd_link_executable (info))
v = _("PIE executable");
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: relocation %s against external or undefined symbol `%s'"
" can not be used when making a %s; recompile with -fPIC"), input_bfd,
elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
else if (h != NULL
&& h->dynindx != -1
&& (!bfd_link_pic (info)
- || !SYMBOLIC_BIND (info, h)
+ || !(bfd_link_pie (info)
+ || SYMBOLIC_BIND (info, h))
|| !h->def_regular))
outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
else
/* FIXME: Should we translate the instruction into a BL
instruction instead ? */
if (branch_type != ST_BRANCH_TO_THUMB)
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
input_bfd,
h ? h->root.root.string : "(local)");
/* FIXME: Should we translate the instruction into a BL
instruction instead ? */
if (branch_type == ST_BRANCH_TO_THUMB)
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
input_bfd,
h ? h->root.root.string : "(local)");
{
if (dynreloc_st_type == STT_GNU_IFUNC)
outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
- else if (bfd_link_pic (info) &&
- (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
- || h->root.type != bfd_link_hash_undefweak))
+ else if (bfd_link_pic (info)
+ && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
+ || h->root.type != bfd_link_hash_undefweak))
outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
else
outrel.r_info = 0;
{
bfd_vma off;
- BFD_ASSERT (local_got_offsets != NULL &&
- local_got_offsets[r_symndx] != (bfd_vma) -1);
+ BFD_ASSERT (local_got_offsets != NULL
+ && local_got_offsets[r_symndx] != (bfd_vma) -1);
off = local_got_offsets[r_symndx];
value = -5;
else
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+0x%lx): unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
input_bfd, input_section,
(unsigned long)rel->r_offset, insn);
return bfd_reloc_notsupported;
break;
default:
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%B(%A+0x%lx): unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
input_bfd, input_section,
(unsigned long)rel->r_offset, insn);
return bfd_reloc_notsupported;
case R_ARM_TLS_LE32:
if (bfd_link_dll (info))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
input_bfd, input_section,
(long) rel->r_offset, howto->name);
negative = identify_add_or_sub (insn);
if (negative == 0)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
input_bfd, input_section,
(long) rel->r_offset, howto->name);
|| r_type == R_ARM_ALU_SB_G1
|| r_type == R_ARM_ALU_SB_G2) && residual != 0)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
input_bfd, input_section,
(long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
/* Check for overflow. */
if (residual >= 0x1000)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
input_bfd, input_section,
(long) rel->r_offset, labs (signed_value), howto->name);
/* Check for overflow. */
if (residual >= 0x100)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
input_bfd, input_section,
(long) rel->r_offset, labs (signed_value), howto->name);
fit in eight bits.) */
if ((residual & 0x3) != 0 || residual >= 0x400)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
input_bfd, input_section,
(long) rel->r_offset, labs (signed_value), howto->name);
if (howto->rightshift
|| (howto->src_mask & (howto->src_mask + 1)))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
input_bfd, input_section,
(long) rel->r_offset, howto->name);
|| h->root.type == bfd_link_hash_defweak)
&& IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
((sym_type == STT_TLS
+ /* xgettext:c-format */
? _("%B(%A+0x%lx): %s used with TLS symbol %s")
+ /* xgettext:c-format */
: _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
input_bfd,
input_section,
&& _bfd_elf_section_offset (output_bfd, info, input_section,
rel->r_offset) != (bfd_vma) -1)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
+ /* xgettext:c-format */
(_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
input_bfd,
input_section,
if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
{
if (flags & EF_ARM_INTERWORK)
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
abfd);
else
are conflicting attributes. */
static bfd_boolean
-elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
+elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
{
+ bfd *obfd = info->output_bfd;
obj_attribute *in_attr;
obj_attribute *out_attr;
/* Some tags have 0 = don't care, 1 = strong requirement,
}
/* Merge Tag_compatibility attributes and any common GNU ones. */
- if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
+ if (!_bfd_elf_merge_object_attributes (ibfd, info))
return FALSE;
/* Check for any attributes not known on ARM. */
object file when linking. */
static bfd_boolean
-elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
+elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
/* Display the flags field. */
/* Ignore init flag - it may not be set, despite the flags field
containing valid data. */
- /* xgettext:c-format */
fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
switch (EF_ARM_EABI_VERSION (flags))
object file containing relocations but no symbol table. */
&& (r_symndx > STN_UNDEF || nsyms > 0))
{
- (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
- r_symndx);
+ _bfd_error_handler (_("%B: bad symbol index: %d"), abfd,
+ r_symndx);
return FALSE;
}
break;
}
else goto jump_over;
-
+
/* Fall through. */
case R_ARM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
if (bfd_link_pic (info))
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
abfd, elf32_arm_howto_table_1[r_type].name,
(h) ? h->root.root.string : "a local symbol");
return TRUE;
}
+static void
+elf32_arm_update_relocs (asection *o,
+ struct bfd_elf_section_reloc_data *reldata)
+{
+ void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
+ void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
+ const struct elf_backend_data *bed;
+ _arm_elf_section_data *eado;
+ struct bfd_link_order *p;
+ bfd_byte *erela_head, *erela;
+ Elf_Internal_Rela *irela_head, *irela;
+ Elf_Internal_Shdr *rel_hdr;
+ bfd *abfd;
+ unsigned int count;
+
+ eado = get_arm_elf_section_data (o);
+
+ if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
+ return;
+
+ abfd = o->owner;
+ bed = get_elf_backend_data (abfd);
+ rel_hdr = reldata->hdr;
+
+ if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
+ {
+ swap_in = bed->s->swap_reloc_in;
+ swap_out = bed->s->swap_reloc_out;
+ }
+ else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
+ {
+ swap_in = bed->s->swap_reloca_in;
+ swap_out = bed->s->swap_reloca_out;
+ }
+ else
+ abort ();
+
+ erela_head = rel_hdr->contents;
+ irela_head = (Elf_Internal_Rela *) bfd_zmalloc
+ ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
+
+ erela = erela_head;
+ irela = irela_head;
+ count = 0;
+
+ for (p = o->map_head.link_order; p; p = p->next)
+ {
+ if (p->type == bfd_section_reloc_link_order
+ || p->type == bfd_symbol_reloc_link_order)
+ {
+ (*swap_in) (abfd, erela, irela);
+ erela += rel_hdr->sh_entsize;
+ irela++;
+ count++;
+ }
+ else if (p->type == bfd_indirect_link_order)
+ {
+ struct bfd_elf_section_reloc_data *input_reldata;
+ arm_unwind_table_edit *edit_list, *edit_tail;
+ _arm_elf_section_data *eadi;
+ bfd_size_type j;
+ bfd_vma offset;
+ asection *i;
+
+ i = p->u.indirect.section;
+
+ eadi = get_arm_elf_section_data (i);
+ edit_list = eadi->u.exidx.unwind_edit_list;
+ edit_tail = eadi->u.exidx.unwind_edit_tail;
+ offset = o->vma + i->output_offset;
+
+ if (eadi->elf.rel.hdr &&
+ eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
+ input_reldata = &eadi->elf.rel;
+ else if (eadi->elf.rela.hdr &&
+ eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
+ input_reldata = &eadi->elf.rela;
+ else
+ abort ();
+
+ if (edit_list)
+ {
+ for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
+ {
+ arm_unwind_table_edit *edit_node, *edit_next;
+ bfd_vma bias;
+ bfd_vma reloc_index;
+
+ (*swap_in) (abfd, erela, irela);
+ reloc_index = (irela->r_offset - offset) / 8;
+
+ bias = 0;
+ edit_node = edit_list;
+ for (edit_next = edit_list;
+ edit_next && edit_next->index <= reloc_index;
+ edit_next = edit_node->next)
+ {
+ bias++;
+ edit_node = edit_next;
+ }
+
+ if (edit_node->type != DELETE_EXIDX_ENTRY
+ || edit_node->index != reloc_index)
+ {
+ irela->r_offset -= bias * 8;
+ irela++;
+ count++;
+ }
+
+ erela += rel_hdr->sh_entsize;
+ }
+
+ if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
+ {
+ /* New relocation entity. */
+ asection *text_sec = edit_tail->linked_section;
+ asection *text_out = text_sec->output_section;
+ bfd_vma exidx_offset = offset + i->size - 8;
+
+ irela->r_addend = 0;
+ irela->r_offset = exidx_offset;
+ irela->r_info = ELF32_R_INFO
+ (text_out->target_index, R_ARM_PREL31);
+ irela++;
+ count++;
+ }
+ }
+ else
+ {
+ for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
+ {
+ (*swap_in) (abfd, erela, irela);
+ erela += rel_hdr->sh_entsize;
+ irela++;
+ }
+
+ count += NUM_SHDR_ENTRIES (input_reldata->hdr);
+ }
+ }
+ }
+
+ reldata->count = count;
+ rel_hdr->sh_size = count * rel_hdr->sh_entsize;
+
+ erela = erela_head;
+ irela = irela_head;
+ while (count > 0)
+ {
+ (*swap_out) (abfd, irela, erela);
+ erela += rel_hdr->sh_entsize;
+ irela++;
+ count--;
+ }
+
+ free (irela_head);
+
+ /* Hashes are no longer valid. */
+ free (reldata->hashes);
+ reldata->hashes = NULL;
+}
+
/* Unwinding tables are not referenced directly. This pass marks them as
required if the corresponding code section is marked. Similarly, ARMv8-M
secure entry functions can only be referenced by SG veneers which are
if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
{
cmse_sec = cmse_hash->root.root.u.def.section;
- if (!_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
+ if (!cmse_sec->gc_mark
+ && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
return FALSE;
}
}
struct elf_link_hash_entry * h)
{
bfd * dynobj;
- asection * s;
+ asection *s, *srel;
struct elf32_arm_link_hash_entry * eh;
struct elf32_arm_link_hash_table *globals;
determine the address it must put in the global offset table, so
both the dynamic object and the regular object will refer to the
same memory location for the variable. */
- s = bfd_get_linker_section (dynobj, ".dynbss");
- BFD_ASSERT (s != NULL);
-
/* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
linker to copy the initial value out of the dynamic object and into
the runtime process image. We need to remember the offset into the
.rel(a).bss section we are going to use. */
+ if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
+ {
+ s = globals->root.sdynrelro;
+ srel = globals->root.sreldynrelro;
+ }
+ else
+ {
+ s = globals->root.sdynbss;
+ srel = globals->root.srelbss;
+ }
if (info->nocopyreloc == 0
&& (h->root.u.def.section->flags & SEC_ALLOC) != 0
&& h->size != 0)
{
- asection *srel;
-
- srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
elf32_arm_allocate_dynrelocs (info, srel, 1);
h->needs_copy = 1;
}
if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
|| !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
|| !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
- /* xgettext:c-format */
_bfd_error_handler (_("Errors encountered processing file %s"),
ibfd->filename);
}
&& s != htab->root.sgotplt
&& s != htab->root.iplt
&& s != htab->root.igotplt
- && s != htab->sdynbss)
+ && s != htab->root.sdynbss
+ && s != htab->root.sdynrelro)
{
/* It's not one of our sections, so don't allocate space. */
continue;
|| !add_dynamic_entry (DT_JMPREL, 0))
return FALSE;
- if (htab->dt_tlsdesc_plt &&
- (!add_dynamic_entry (DT_TLSDESC_PLT,0)
- || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
+ if (htab->dt_tlsdesc_plt
+ && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
+ || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
return FALSE;
}
&& (h->root.type == bfd_link_hash_defined
|| h->root.type == bfd_link_hash_defweak));
- s = htab->srelbss;
- BFD_ASSERT (s != NULL);
-
rel.r_addend = 0;
rel.r_offset = (h->root.u.def.value
+ h->root.u.def.section->output_section->vma
+ h->root.u.def.section->output_offset);
rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
+ if (h->root.u.def.section == htab->root.sdynrelro)
+ s = htab->root.sreldynrelro;
+ else
+ s = htab->root.srelbss;
elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
}
s = bfd_get_linker_section (dynobj, name);
if (s == NULL)
{
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("could not find section %s"), name);
bfd_set_error (bfd_error_invalid_operation);
return FALSE;
case DT_RELSZ:
case DT_RELASZ:
- if (!htab->symbian_p)
- {
- /* My reading of the SVR4 ABI indicates that the
- procedure linkage table relocs (DT_JMPREL) should be
- included in the overall relocs (DT_REL). This is
- what Solaris does. However, UnixWare can not handle
- that case. Therefore, we override the DT_RELSZ entry
- here to make it not include the JMPREL relocs. Since
- the linker script arranges for .rel(a).plt to follow all
- other relocation sections, we don't have to worry
- about changing the DT_REL entry. */
- s = htab->root.srelplt;
- if (s != NULL)
- dyn.d_un.d_val -= s->size;
- bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
- break;
- }
- /* Fall through. */
-
case DT_REL:
case DT_RELA:
/* In the BPABI, the DT_REL tag must point at the file
offset, not the VMA, of the first relocation
section. So, we use code similar to that in
elflink.c, but do not check for SHF_ALLOC on the
- relcoation section, since relocations sections are
- never allocated under the BPABI. The comments above
- about Unixware notwithstanding, we include all of the
- relocations here. */
+ relocation section, since relocation sections are
+ never allocated under the BPABI. PLT relocs are also
+ included. */
if (htab->symbian_p)
{
unsigned int i;
This check is just to be on the safe side... */
if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
{
- (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
- "allocated in unsafe location"), abfd);
+ _bfd_error_handler (_("%B: error: Cortex-A8 erratum stub is "
+ "allocated in unsafe location"), abfd);
return FALSE;
}
{
/* There's not much we can do apart from complain if this
happens. */
- (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
- "of range (input file too large)"), abfd);
+ _bfd_error_handler (_("%B: error: Cortex-A8 erratum stub out "
+ "of range (input file too large)"), abfd);
return FALSE;
}
int insn_all_registers = initial_insn & 0x0000ffff;
int insn_low_registers, insn_high_registers;
int usable_register_mask;
- int nb_registers = popcount (insn_all_registers);
+ int nb_registers = elf32_arm_popcount (insn_all_registers);
int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
bfd_byte *current_stub_contents = base_stub_contents;
push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
create_instruction_branch_absolute
(initial_insn_addr - current_stub_contents));
-
/* Fill the remaining of the stub with deterministic contents. */
current_stub_contents =
BFD_ASSERT (!wback || !restore_rn);
/* - nb_registers > 8. */
- BFD_ASSERT (popcount (insn_all_registers) > 8);
+ BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
/* At this point, LDMxx initial insn loads between 9 and 14 registers. */
int usable_register_mask;
int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
- int nb_registers = popcount (insn_all_registers);
+ int nb_registers = elf32_arm_popcount (insn_all_registers);
bfd_byte *current_stub_contents = base_stub_contents;
BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
BFD_ASSERT (!wback || !restore_rn);
/* - nb_registers > 8. */
- BFD_ASSERT (popcount (insn_all_registers) > 8);
+ BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
/* At this point, LDMxx initial insn loads between 9 and 14 registers. */
}
else
{
- bfd_boolean is_dp = /* DP encoding. */
+ bfd_boolean is_dp = /* DP encoding. */
(initial_insn & 0xfe100f00) == 0xec100b00;
bfd_boolean is_ia_nobang = /* (IA without !). */
(((initial_insn << 7) >> 28) & 0xd) == 0x4;
/* End of stm32l4xx work-around. */
-static void
-elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
- asection *output_sec, Elf_Internal_Rela *rel)
-{
- BFD_ASSERT (output_sec && rel);
- struct bfd_elf_section_reloc_data *output_reldata;
- struct elf32_arm_link_hash_table *htab;
- struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
- Elf_Internal_Shdr *rel_hdr;
-
-
- if (oesd->rel.hdr)
- {
- rel_hdr = oesd->rel.hdr;
- output_reldata = &(oesd->rel);
- }
- else if (oesd->rela.hdr)
- {
- rel_hdr = oesd->rela.hdr;
- output_reldata = &(oesd->rela);
- }
- else
- {
- abort ();
- }
-
- bfd_byte *erel = rel_hdr->contents;
- erel += output_reldata->count * rel_hdr->sh_entsize;
- htab = elf32_arm_hash_table (info);
- SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
- output_reldata->count++;
-}
-
/* Do code byteswapping. Return FALSE afterwards so that the section is
written out as normal. */
if ((signed) branch_to_veneer < -(1 << 25)
|| (signed) branch_to_veneer >= (1 << 25))
- (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
- "range"), output_bfd);
+ _bfd_error_handler (_("%B: error: VFP11 veneer out of "
+ "range"), output_bfd);
insn |= (branch_to_veneer >> 2) & 0xffffff;
contents[endianflip ^ target] = insn & 0xff;
if ((signed) branch_from_veneer < -(1 << 25)
|| (signed) branch_from_veneer >= (1 << 25))
- (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
- "range"), output_bfd);
+ _bfd_error_handler (_("%B: error: VFP11 veneer out of "
+ "range"), output_bfd);
/* Original instruction. */
insn = errnode->u.v.branch->u.b.vfp_insn;
((signed) branch_to_veneer >= (1 << 24)) ?
branch_to_veneer - (1 << 24) : 0;
- (*_bfd_error_handler)
+ _bfd_error_handler
(_("%B(%#x): error: Cannot create STM32L4XX veneer. "
"Jump out of range by %ld bytes. "
"Cannot encode branch instruction. "),
STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
|| (signed) (veneer_r - veneer) >= (1 << 24))
{
- (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
- "veneer."), output_bfd);
+ _bfd_error_handler (_("%B: error: Cannot create STM32L4XX "
+ "veneer."), output_bfd);
continue;
}
adjust offset by hand. */
prel31_offset = text_sec->output_offset
+ text_sec->size;
-
- /* New relocation entity. */
- asection *text_out = text_sec->output_section;
- Elf_Internal_Rela rel;
- rel.r_addend = 0;
- rel.r_offset = exidx_offset;
- rel.r_info = ELF32_R_INFO (text_out->target_index,
- R_ARM_PREL31);
-
- elf32_arm_add_relocation (output_bfd, link_info,
- sec->output_section,
- &rel);
}
/* First address we can't unwind. */
{
struct _arm_elf_section_data *arm_data;
arm_data = get_arm_elf_section_data (sec);
- return arm_data->additional_reloc_count;
+
+ return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
}
/* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
- has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
+ has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
FALSE otherwise. ISECTION is the best guess matching section from the
input bfd IBFD, but it might be NULL. */
== iheaders[isection->sh_link]->bfd_section->output_section)
break;
}
-
+
if (i == 0)
{
/* Failing that we have to find a matching section ourselves. If
#define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
#define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
#define elf_backend_check_relocs elf32_arm_check_relocs
+#define elf_backend_update_relocs elf32_arm_update_relocs
#define elf_backend_relocate_section elf32_arm_relocate_section
#define elf_backend_write_section elf32_arm_write_section
#define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
#define elf_backend_plt_readonly 1
#define elf_backend_want_got_plt 1
#define elf_backend_want_plt_sym 0
+#define elf_backend_want_dynrelro 1
#define elf_backend_may_use_rel_p 1
#define elf_backend_may_use_rela_p 0
#define elf_backend_default_use_rela_p 0
+#define elf_backend_dtrel_excludes_plt 1
#define elf_backend_got_header_size 12
#define elf_backend_extern_protected_data 1
#define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
#define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
-#undef elf_backend_section_flags
+#undef elf_backend_section_flags
#define elf_backend_section_flags elf32_arm_section_flags
-#undef elf_backend_lookup_section_flags_hook
+#undef elf_backend_lookup_section_flags_hook
#define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
#include "elf32-target.h"
object file when linking. */
static bfd_boolean
-elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
+elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
{
+ bfd *obfd = info->output_bfd;
flagword out_flags;
flagword in_flags;
bfd_boolean flags_compatible = TRUE;
asection *sec;
/* Check if we have the same endianness. */
- if (! _bfd_generic_verify_endian_match (ibfd, obfd))
+ if (! _bfd_generic_verify_endian_match (ibfd, info))
return FALSE;
if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
return TRUE;
- if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
+ if (!elf32_arm_merge_eabi_attributes (ibfd, info))
return FALSE;
/* The input BFD must have had its flags initialised. */
#define elf_backend_default_use_rela_p 0
#undef elf_backend_want_plt_sym
#define elf_backend_want_plt_sym 0
+#undef elf_backend_dtrel_excludes_plt
+#define elf_backend_dtrel_excludes_plt 0
#undef ELF_MAXPAGESIZE
#define ELF_MAXPAGESIZE 0x8000