/* 32-bit ELF support for ARM
- Copyright (C) 1998-2018 Free Software Foundation, Inc.
+ Copyright (C) 1998-2019 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
#include <limits.h>
#include "bfd.h"
-#include "bfd_stdint.h"
#include "libiberty.h"
#include "libbfd.h"
#include "elf-bfd.h"
0x00000000, /* src_mask. */
0x00000000, /* dst_mask. */
FALSE), /* pcrel_offset. */
+ /* Relocations for Armv8.1-M Mainline. */
+ HOWTO (R_ARM_THM_BF16, /* type. */
+ 0, /* rightshift. */
+ 1, /* size (0 = byte, 1 = short, 2 = long). */
+ 16, /* bitsize. */
+ TRUE, /* pc_relative. */
+ 0, /* bitpos. */
+ complain_overflow_dont,/* do not complain_on_overflow. */
+ bfd_elf_generic_reloc, /* special_function. */
+ "R_ARM_THM_BF16", /* name. */
+ FALSE, /* partial_inplace. */
+ 0x001f0ffe, /* src_mask. */
+ 0x001f0ffe, /* dst_mask. */
+ TRUE), /* pcrel_offset. */
+ HOWTO (R_ARM_THM_BF12, /* type. */
+ 0, /* rightshift. */
+ 1, /* size (0 = byte, 1 = short, 2 = long). */
+ 12, /* bitsize. */
+ TRUE, /* pc_relative. */
+ 0, /* bitpos. */
+ complain_overflow_dont,/* do not complain_on_overflow. */
+ bfd_elf_generic_reloc, /* special_function. */
+ "R_ARM_THM_BF12", /* name. */
+ FALSE, /* partial_inplace. */
+ 0x00010ffe, /* src_mask. */
+ 0x00010ffe, /* dst_mask. */
+ TRUE), /* pcrel_offset. */
+ HOWTO (R_ARM_THM_BF18, /* type. */
+ 0, /* rightshift. */
+ 1, /* size (0 = byte, 1 = short, 2 = long). */
+ 18, /* bitsize. */
+ TRUE, /* pc_relative. */
+ 0, /* bitpos. */
+ complain_overflow_dont,/* do not complain_on_overflow. */
+ bfd_elf_generic_reloc, /* special_function. */
+ "R_ARM_THM_BF18", /* name. */
+ FALSE, /* partial_inplace. */
+ 0x007f0ffe, /* src_mask. */
+ 0x007f0ffe, /* dst_mask. */
+ TRUE), /* pcrel_offset. */
};
/* 160 onwards: */
-static reloc_howto_type elf32_arm_howto_table_2[1] =
+static reloc_howto_type elf32_arm_howto_table_2[8] =
{
HOWTO (R_ARM_IRELATIVE, /* type */
0, /* rightshift */
TRUE, /* partial_inplace */
0xffffffff, /* src_mask */
0xffffffff, /* dst_mask */
- FALSE) /* pcrel_offset */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_GOTFUNCDESC, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_GOTFUNCDESC", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_GOTOFFFUNCDESC",/* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_FUNCDESC, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_FUNCDESC", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_FUNCDESC_VALUE",/* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_GD32_FDPIC",/* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_LDM32_FDPIC",/* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_ARM_TLS_IE32_FDPIC",/* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
};
/* 249-255 extended, currently unused, relocations: */
if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
return &elf32_arm_howto_table_1[r_type];
- if (r_type == R_ARM_IRELATIVE)
+ if (r_type >= R_ARM_IRELATIVE
+ && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
if (r_type >= R_ARM_RREL32
{BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
{BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
{BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
+ {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
+ {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
+ {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
+ {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
+ {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
+ {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
+ {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
{BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
{BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
{BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
{BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
{BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
{BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
- {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
+ {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
+ {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
+ {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
+ {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
};
static reloc_howto_type *
case NT_PRPSINFO:
{
- char data[124];
+ char data[124] ATTRIBUTE_NONSTRING;
va_list ap;
va_start (ap, note_type);
memset (data, 0, sizeof (data));
strncpy (data + 28, va_arg (ap, const char *), 16);
+#if GCC_VERSION == 8000 || GCC_VERSION == 8001
+ DIAGNOSTIC_PUSH;
+ /* GCC 8.0 and 8.1 warn about 80 equals destination size with
+ -Wstringop-truncation:
+ https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
+ */
+ DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
+#endif
strncpy (data + 44, va_arg (ap, const char *), 80);
+#if GCC_VERSION == 8000 || GCC_VERSION == 8001
+ DIAGNOSTIC_POP;
+#endif
va_end (ap);
return elfcore_write_note (abfd, buf, bufsiz,
section. */
#define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
+/* FDPIC default stack size. */
+#define DEFAULT_STACK_SIZE 0x8000
+
static const unsigned long tls_trampoline [] =
{
0xe08e0000, /* add r0, lr, r0 */
0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
};
+/* ARM FDPIC PLT entry. */
+/* The last 5 words contain PLT lazy fragment code and data. */
+static const bfd_vma elf32_arm_fdpic_plt_entry [] =
+ {
+ 0xe59fc008, /* ldr r12, .L1 */
+ 0xe08cc009, /* add r12, r12, r9 */
+ 0xe59c9004, /* ldr r9, [r12, #4] */
+ 0xe59cf000, /* ldr pc, [r12] */
+ 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
+ 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
+ 0xe51fc00c, /* ldr r12, [pc, #-12] */
+ 0xe92d1000, /* push {r12} */
+ 0xe599c004, /* ldr r12, [r9, #4] */
+ 0xe599f000, /* ldr pc, [r9] */
+ };
+
+/* Thumb FDPIC PLT entry. */
+/* The last 5 words contain PLT lazy fragment code and data. */
+static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
+ {
+ 0xc00cf8df, /* ldr.w r12, .L1 */
+ 0x0c09eb0c, /* add.w r12, r12, r9 */
+ 0x9004f8dc, /* ldr.w r9, [r12, #4] */
+ 0xf000f8dc, /* ldr.w pc, [r12] */
+ 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
+ 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
+ 0xc008f85f, /* ldr.w r12, .L2 */
+ 0xcd04f84d, /* push {r12} */
+ 0xc004f8d9, /* ldr.w r12, [r9, #4] */
+ 0xf000f8d9, /* ldr.w pc, [r9] */
+ };
+
#ifdef FOUR_WORD_PLT
/* The first entry in a procedure linkage table looks like
struct elf_dyn_relocs *dyn_relocs;
};
+/* Structure to handle FDPIC support for local functions. */
+struct fdpic_local {
+ unsigned int funcdesc_cnt;
+ unsigned int gotofffuncdesc_cnt;
+ int funcdesc_offset;
+};
+
struct elf_arm_obj_tdata
{
struct elf_obj_tdata root;
/* Zero to warn when linking objects with incompatible wchar_t sizes. */
int no_wchar_size_warning;
+
+ /* Maintains FDPIC counters and funcdesc info. */
+ struct fdpic_local *local_fdpic_cnts;
};
#define elf_arm_tdata(bfd) \
#define elf32_arm_local_iplt(bfd) \
(elf_arm_tdata (bfd)->local_iplt)
+#define elf32_arm_local_fdpic_cnts(bfd) \
+ (elf_arm_tdata (bfd)->local_fdpic_cnts)
+
#define is_arm_elf(bfd) \
(bfd_get_flavour (bfd) == bfd_target_elf_flavour \
&& elf_tdata (bfd) != NULL \
#define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
+/* Structure to handle FDPIC support for extern functions. */
+struct fdpic_global {
+ unsigned int gotofffuncdesc_cnt;
+ unsigned int gotfuncdesc_cnt;
+ unsigned int funcdesc_cnt;
+ int funcdesc_offset;
+ int gotfuncdesc_offset;
+};
+
/* Arm ELF linker hash entry. */
struct elf32_arm_link_hash_entry
{
/* A pointer to the most recently used stub hash entry against this
symbol. */
struct elf32_arm_stub_hash_entry *stub_cache;
+
+ /* Counter for FDPIC relocations against this symbol. */
+ struct fdpic_global fdpic_cnts;
};
/* Traverse an arm ELF linker hash table. */
/* Offset in .plt section of tls_arm_trampoline. */
bfd_vma tls_trampoline;
- /* Data for R_ARM_TLS_LDM32 relocations. */
+ /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
union
{
bfd_signed_vma refcount;
/* True if the target system uses FDPIC. */
int fdpic_p;
+
+ /* Fixup section. Used for FDPIC. */
+ asection *srofixup;
};
+/* Add an FDPIC read-only fixup. */
+static void
+arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
+{
+ bfd_vma fixup_offset;
+
+ fixup_offset = srofixup->reloc_count++ * 4;
+ BFD_ASSERT (fixup_offset < srofixup->size);
+ bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
+}
+
static inline int
ctz (unsigned int mask)
{
#endif
}
+static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
+ asection *sreloc, Elf_Internal_Rela *rel);
+
+static void
+arm_elf_fill_funcdesc(bfd *output_bfd,
+ struct bfd_link_info *info,
+ int *funcdesc_offset,
+ int dynindx,
+ int offset,
+ bfd_vma addr,
+ bfd_vma dynreloc_value,
+ bfd_vma seg)
+{
+ if ((*funcdesc_offset & 1) == 0)
+ {
+ struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
+ asection *sgot = globals->root.sgot;
+
+ if (bfd_link_pic(info))
+ {
+ asection *srelgot = globals->root.srelgot;
+ Elf_Internal_Rela outrel;
+
+ outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
+ outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
+ outrel.r_addend = 0;
+
+ elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
+ bfd_put_32 (output_bfd, addr, sgot->contents + offset);
+ bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
+ }
+ else
+ {
+ struct elf_link_hash_entry *hgot = globals->root.hgot;
+ bfd_vma got_value = hgot->root.u.def.value
+ + hgot->root.u.def.section->output_section->vma
+ + hgot->root.u.def.section->output_offset;
+
+ arm_elf_add_rofixup(output_bfd, globals->srofixup,
+ sgot->output_section->vma + sgot->output_offset
+ + offset);
+ arm_elf_add_rofixup(output_bfd, globals->srofixup,
+ sgot->output_section->vma + sgot->output_offset
+ + offset + 4);
+ bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
+ bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
+ }
+ *funcdesc_offset |= 1;
+ }
+}
+
/* Create an entry in an ARM ELF linker hash table. */
static struct bfd_hash_entry *
ret->export_glue = NULL;
ret->stub_cache = NULL;
+
+ ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
+ ret->fdpic_cnts.gotfuncdesc_cnt = 0;
+ ret->fdpic_cnts.funcdesc_cnt = 0;
+ ret->fdpic_cnts.funcdesc_offset = -1;
+ ret->fdpic_cnts.gotfuncdesc_offset = -1;
}
return (struct bfd_hash_entry *) ret;
size = num_syms * (sizeof (bfd_signed_vma)
+ sizeof (struct arm_local_iplt_info *)
+ sizeof (bfd_vma)
- + sizeof (char));
+ + sizeof (char)
+ + sizeof (struct fdpic_local));
data = bfd_zalloc (abfd, size);
if (data == NULL)
return FALSE;
+ elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
+ data += num_syms * sizeof (struct fdpic_local);
+
elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
data += num_syms * sizeof (bfd_signed_vma);
return TRUE;
}
+static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
+
/* Return true if the PLT described by ARM_PLT requires a Thumb stub
before it. */
struct elf32_arm_link_hash_table *htab;
htab = elf32_arm_hash_table (info);
- return (arm_plt->thumb_refcount != 0
- || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
+
+ return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
+ || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
}
/* Return a pointer to the head of the dynamic reloc list that should
if (! _bfd_elf_create_got_section (dynobj, info))
return FALSE;
+ /* Also create .rofixup. */
+ if (htab->fdpic_p)
+ {
+ htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
+ (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
+ if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
+ return FALSE;
+ }
+
return TRUE;
}
arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
/* Force return logic to be reviewed for each new architecture. */
- BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
+ BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
if (arch == TAG_CPU_ARCH_V6_M
|| arch == TAG_CPU_ARCH_V6S_M
|| arch == TAG_CPU_ARCH_V7E_M
|| arch == TAG_CPU_ARCH_V8M_BASE
- || arch == TAG_CPU_ARCH_V8M_MAIN)
+ || arch == TAG_CPU_ARCH_V8M_MAIN
+ || arch == TAG_CPU_ARCH_V8_1M_MAIN)
return TRUE;
return FALSE;
arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
/* Force return logic to be reviewed for each new architecture. */
- BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
+ BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
return (arch == TAG_CPU_ARCH_V6T2
|| arch == TAG_CPU_ARCH_V7
|| arch == TAG_CPU_ARCH_V7E_M
|| arch == TAG_CPU_ARCH_V8
|| arch == TAG_CPU_ARCH_V8R
- || arch == TAG_CPU_ARCH_V8M_MAIN);
+ || arch == TAG_CPU_ARCH_V8M_MAIN
+ || arch == TAG_CPU_ARCH_V8_1M_MAIN);
}
/* Determine whether Thumb-2 BL instruction is available. */
bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
/* Force return logic to be reviewed for each new architecture. */
- BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
+ BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
/* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
return (arch == TAG_CPU_ARCH_V6T2
htab->obfd = saved_obfd;
}
+ if (htab->fdpic_p) {
+ htab->plt_header_size = 0;
+ if (info->flags & DF_BIND_NOW)
+ htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
+ else
+ htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
+ }
+
if (!htab->root.splt
|| !htab->root.srelplt
|| !htab->root.sdynbss
edir->plt.noncall_refcount += eind->plt.noncall_refcount;
eind->plt.noncall_refcount = 0;
+ /* Copy FDPIC counters. */
+ edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
+ edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
+ edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
+
/* We should only allocate a function to .iplt once the final
symbol information is known. */
BFD_ASSERT (!eind->is_iplt);
Tag_CPU_arch);
/* Force return logic to be reviewed for each new architecture. */
- BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
+ BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
return (arch == TAG_CPU_ARCH_V6T2
|| arch == TAG_CPU_ARCH_V6K
asection *section;
Elf_Internal_Sym *local_syms = NULL;
- if (!is_arm_elf (input_bfd))
+ if (!is_arm_elf (input_bfd)
+ || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0)
continue;
num_a8_relocs = 0;
s = bfd_get_linker_section (abfd, name);
BFD_ASSERT (s != NULL);
- contents = (bfd_byte *) bfd_alloc (abfd, size);
+ contents = (bfd_byte *) bfd_zalloc (abfd, size);
BFD_ASSERT (s->size == size);
s->contents = contents;
return;
globals->target1_is_rel = params->target1_is_rel;
- if (strcmp (params->target2_type, "rel") == 0)
+ if (globals->fdpic_p)
+ globals->target2_reloc = R_ARM_GOT32;
+ else if (strcmp (params->target2_type, "rel") == 0)
globals->target2_reloc = R_ARM_REL32;
else if (strcmp (params->target2_type, "abs") == 0)
globals->target2_reloc = R_ARM_ABS32;
globals->use_blx |= params->use_blx;
globals->vfp11_fix = params->vfp11_denorm_fix;
globals->stm32l4xx_fix = params->stm32l4xx_fix;
- globals->pic_veneer = params->pic_veneer;
+ if (globals->fdpic_p)
+ globals->pic_veneer = 1;
+ else
+ globals->pic_veneer = params->pic_veneer;
globals->fix_cortex_a8 = params->fix_cortex_a8;
globals->fix_arm1176 = params->fix_arm1176;
globals->cmse_implib = params->cmse_implib;
splt = htab->root.splt;
sgotplt = htab->root.sgotplt;
- /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
- elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
+ if (htab->fdpic_p)
+ {
+ /* Allocate room for R_ARM_FUNCDESC_VALUE. */
+ /* For lazy binding, relocations will be put into .rel.plt, in
+ .rel.got otherwise. */
+ /* FIXME: today we don't support lazy binding so put it in .rel.got */
+ if (info->flags & DF_BIND_NOW)
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
+ else
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
+ }
+ else
+ {
+ /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
+ }
/* If this is the first .plt entry, make room for the special
first entry. */
arm_plt->got_offset = sgotplt->size;
else
arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
- sgotplt->size += 4;
+ if (htab->fdpic_p)
+ /* Function descriptor takes 64 bits in GOT. */
+ sgotplt->size += 8;
+ else
+ sgotplt->size += 4;
}
}
in all the symbols for which we are making plt entries.
After the reserved .got.plt entries, all symbols appear in
the same order as in .plt. */
- plt_index = (got_offset - got_header_size) / 4;
+ if (htab->fdpic_p)
+ /* Function descriptor takes 8 bytes. */
+ plt_index = (got_offset - got_header_size) / 8;
+ else
+ plt_index = (got_offset - got_header_size) / 4;
/* Calculate the address of the GOT entry. */
got_address = (sgot->output_section->vma
| (tail_displacement & 0x00ffffff),
ptr + 12);
}
+ else if (htab->fdpic_p)
+ {
+ const bfd_vma *plt_entry = using_thumb_only(htab)
+ ? elf32_arm_fdpic_thumb_plt_entry
+ : elf32_arm_fdpic_plt_entry;
+
+ /* Fill-up Thumb stub if needed. */
+ if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
+ {
+ put_thumb_insn (htab, output_bfd,
+ elf32_arm_plt_thumb_stub[0], ptr - 4);
+ put_thumb_insn (htab, output_bfd,
+ elf32_arm_plt_thumb_stub[1], ptr - 2);
+ }
+ /* As we are using 32 bit instructions even for the Thumb
+ version, we have to use 'put_arm_insn' instead of
+ 'put_thumb_insn'. */
+ put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
+ put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
+ put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
+ put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
+ bfd_put_32 (output_bfd, got_offset, ptr + 16);
+
+ if (!(info->flags & DF_BIND_NOW))
+ {
+ /* funcdesc_value_reloc_offset. */
+ bfd_put_32 (output_bfd,
+ htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
+ ptr + 20);
+ put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
+ put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
+ put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
+ put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
+ }
+ }
else if (using_thumb_only (htab))
{
/* PR ld/16017: Generate thumb only PLT entries. */
}
else
{
- rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
- initial_got_entry = (splt->output_section->vma
- + splt->output_offset);
+ /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
+ used by PLT entry. */
+ if (htab->fdpic_p)
+ {
+ rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
+ initial_got_entry = 0;
+ }
+ else
+ {
+ rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
+ initial_got_entry = (splt->output_section->vma
+ + splt->output_offset);
+ }
}
/* Fill in the entry in the global offset table. */
bfd_put_32 (output_bfd, initial_got_entry,
sgot->contents + got_offset);
+
+ if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
+ {
+ /* Setup initial funcdesc value. */
+ /* FIXME: we don't support lazy binding because there is a
+ race condition between both words getting written and
+ some other thread attempting to read them. The ARM
+ architecture does not have an atomic 64 bit load/store
+ instruction that could be used to prevent it; it is
+ recommended that threaded FDPIC applications run with the
+ LD_BIND_NOW environment variable set. */
+ bfd_put_32(output_bfd, plt_address + 0x18,
+ sgot->contents + got_offset);
+ bfd_put_32(output_bfd, -1 /*TODO*/,
+ sgot->contents + got_offset + 4);
+ }
}
if (dynindx == -1)
elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
else
{
- loc = srel->contents + plt_index * RELOC_SIZE (htab);
- SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
+ if (htab->fdpic_p)
+ {
+ /* For FDPIC we put PLT relocationss into .rel.got when not
+ lazy binding otherwise we put them in .rel.plt. For now,
+ we don't support lazy binding so put it in .rel.got. */
+ if (info->flags & DF_BIND_NOW)
+ elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
+ else
+ elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
+ }
+ else
+ {
+ loc = srel->contents + plt_index * RELOC_SIZE (htab);
+ SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
+ }
}
return TRUE;
relocations are copied into the output file to be resolved at
run time. */
if ((bfd_link_pic (info)
- || globals->root.is_relocatable_executable)
+ || globals->root.is_relocatable_executable
+ || globals->fdpic_p)
&& (input_section->flags & SEC_ALLOC)
&& !(globals->vxworks_p
&& strcmp (input_section->output_section->name,
{
Elf_Internal_Rela outrel;
bfd_boolean skip, relocate;
+ int isrofixup = 0;
if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
&& !h->def_regular)
int symbol;
/* This symbol is local, or marked to become local. */
- BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
+ BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
+ || (globals->fdpic_p && !bfd_link_pic(info)));
if (globals->symbian_p)
{
asection *osec;
must use an R_ARM_IRELATIVE relocation to obtain the
correct run-time address. */
outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
+ else if (globals->fdpic_p && !bfd_link_pic(info))
+ isrofixup = 1;
else
outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
if (globals->use_rel)
outrel.r_addend += dynreloc_value;
}
- elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
+ if (isrofixup)
+ arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
+ else
+ elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
/* If this reloc is against an external symbol, we do not want to
fiddle with the addend. Otherwise, we need to include the symbol
/* PR 21523: Use an absolute value. The user of this reloc will
have already selected an ADD or SUB insn appropriately. */
- value = labs (relocation);
+ value = llabs (relocation);
if (value >= 0x1000)
return bfd_reloc_overflow;
else
{
Elf_Internal_Rela outrel;
+ int isrofixup = 0;
- if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
+ if (((h->dynindx != -1) || globals->fdpic_p)
+ && !SYMBOL_REFERENCES_LOCAL (info, h))
{
/* If the symbol doesn't resolve locally in a static
object, we have an undefined reference. If the
|| h->root.type != bfd_link_hash_undefweak))
outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
else
- outrel.r_info = 0;
+ {
+ outrel.r_info = 0;
+ if (globals->fdpic_p)
+ isrofixup = 1;
+ }
outrel.r_addend = dynreloc_value;
}
/* The GOT entry is initialized to zero by default.
See if we should install a different value. */
if (outrel.r_addend != 0
- && (outrel.r_info == 0 || globals->use_rel))
+ && (globals->use_rel || outrel.r_info == 0))
{
bfd_put_32 (output_bfd, outrel.r_addend,
sgot->contents + off);
outrel.r_addend = 0;
}
- if (outrel.r_info != 0)
+ if (isrofixup)
+ arm_elf_add_rofixup (output_bfd,
+ elf32_arm_hash_table(info)->srofixup,
+ sgot->output_section->vma
+ + sgot->output_offset + off);
+
+ else if (outrel.r_info != 0)
{
outrel.r_offset = (sgot->output_section->vma
+ sgot->output_offset
+ off);
elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
}
+
h->got.offset |= 1;
}
value = sgot->output_offset + off;
off &= ~1;
else
{
- if (globals->use_rel)
- bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
+ Elf_Internal_Rela outrel;
+ int isrofixup = 0;
- if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
+ if (dynreloc_st_type == STT_GNU_IFUNC)
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
+ else if (bfd_link_pic (info))
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
+ else
{
- Elf_Internal_Rela outrel;
+ outrel.r_info = 0;
+ if (globals->fdpic_p)
+ isrofixup = 1;
+ }
+ /* The GOT entry is initialized to zero by default.
+ See if we should install a different value. */
+ if (globals->use_rel || outrel.r_info == 0)
+ bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
+
+ if (isrofixup)
+ arm_elf_add_rofixup (output_bfd,
+ globals->srofixup,
+ sgot->output_section->vma
+ + sgot->output_offset + off);
+
+ else if (outrel.r_info != 0)
+ {
outrel.r_addend = addend + dynreloc_value;
outrel.r_offset = (sgot->output_section->vma
+ sgot->output_offset
+ off);
- if (dynreloc_st_type == STT_GNU_IFUNC)
- outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
- else
- outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
}
rel->r_addend);
case R_ARM_TLS_LDM32:
+ case R_ARM_TLS_LDM32_FDPIC:
{
bfd_vma off;
globals->tls_ldm_got.offset |= 1;
}
- value = sgot->output_section->vma + sgot->output_offset + off
- - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
+ if (r_type == R_ARM_TLS_LDM32_FDPIC)
+ {
+ bfd_put_32(output_bfd,
+ globals->root.sgot->output_offset + off,
+ contents + rel->r_offset);
+
+ return bfd_reloc_ok;
+ }
+ else
+ {
+ value = sgot->output_section->vma + sgot->output_offset + off
+ - (input_section->output_section->vma
+ + input_section->output_offset + rel->r_offset);
- return _bfd_final_link_relocate (howto, input_bfd, input_section,
- contents, rel->r_offset, value,
- rel->r_addend);
+ return _bfd_final_link_relocate (howto, input_bfd, input_section,
+ contents, rel->r_offset, value,
+ rel->r_addend);
+ }
}
case R_ARM_TLS_CALL:
case R_ARM_THM_TLS_CALL:
case R_ARM_TLS_GD32:
+ case R_ARM_TLS_GD32_FDPIC:
case R_ARM_TLS_IE32:
+ case R_ARM_TLS_IE32_FDPIC:
case R_ARM_TLS_GOTDESC:
case R_ARM_TLS_DESCSEQ:
case R_ARM_THM_TLS_DESCSEQ:
local_got_offsets[r_symndx] |= 1;
}
- if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
+ if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
off += 8;
else if (tls_type & GOT_TLS_GDESC)
off = offplt;
- (input_section->output_section->vma
+ input_section->output_offset + rel->r_offset));
- return _bfd_final_link_relocate (howto, input_bfd, input_section,
- contents, rel->r_offset, value,
- rel->r_addend);
+ if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
+ r_type == R_ARM_TLS_IE32_FDPIC))
+ {
+ /* For FDPIC relocations, resolve to the offset of the GOT
+ entry from the start of GOT. */
+ bfd_put_32(output_bfd,
+ globals->root.sgot->output_offset + off,
+ contents + rel->r_offset);
+
+ return bfd_reloc_ok;
+ }
+ else
+ {
+ return _bfd_final_link_relocate (howto, input_bfd, input_section,
+ contents, rel->r_offset, value,
+ rel->r_addend);
+ }
}
case R_ARM_TLS_LE32:
*unresolved_reloc_p = FALSE;
return bfd_reloc_ok;
- default:
- return bfd_reloc_notsupported;
- }
-}
+ case R_ARM_GOTOFFFUNCDESC:
+ {
+ if (h == NULL)
+ {
+ struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
+ int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
+ int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
+ bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
+ bfd_vma seg = -1;
+
+ if (bfd_link_pic(info) && dynindx == 0)
+ abort();
+
+ /* Resolve relocation. */
+ bfd_put_32(output_bfd, (offset + sgot->output_offset)
+ , contents + rel->r_offset);
+ /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
+ not done yet. */
+ arm_elf_fill_funcdesc(output_bfd, info,
+ &local_fdpic_cnts[r_symndx].funcdesc_offset,
+ dynindx, offset, addr, dynreloc_value, seg);
+ }
+ else
+ {
+ int dynindx;
+ int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
+ bfd_vma addr;
+ bfd_vma seg = -1;
-/* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
-static void
-arm_add_to_rel (bfd * abfd,
- bfd_byte * address,
- reloc_howto_type * howto,
- bfd_signed_vma increment)
-{
- bfd_signed_vma addend;
+ /* For static binaries, sym_sec can be null. */
+ if (sym_sec)
+ {
+ dynindx = elf_section_data (sym_sec->output_section)->dynindx;
+ addr = dynreloc_value - sym_sec->output_section->vma;
+ }
+ else
+ {
+ dynindx = 0;
+ addr = 0;
+ }
- if (howto->type == R_ARM_THM_CALL
- || howto->type == R_ARM_THM_JUMP24)
- {
- int upper_insn, lower_insn;
- int upper, lower;
+ if (bfd_link_pic(info) && dynindx == 0)
+ abort();
+
+ /* This case cannot occur since funcdesc is allocated by
+ the dynamic loader so we cannot resolve the relocation. */
+ if (h->dynindx != -1)
+ abort();
+
+ /* Resolve relocation. */
+ bfd_put_32(output_bfd, (offset + sgot->output_offset),
+ contents + rel->r_offset);
+ /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
+ arm_elf_fill_funcdesc(output_bfd, info,
+ &eh->fdpic_cnts.funcdesc_offset,
+ dynindx, offset, addr, dynreloc_value, seg);
+ }
+ }
+ *unresolved_reloc_p = FALSE;
+ return bfd_reloc_ok;
- upper_insn = bfd_get_16 (abfd, address);
- lower_insn = bfd_get_16 (abfd, address + 2);
- upper = upper_insn & 0x7ff;
- lower = lower_insn & 0x7ff;
+ case R_ARM_GOTFUNCDESC:
+ {
+ if (h != NULL)
+ {
+ Elf_Internal_Rela outrel;
- addend = (upper << 12) | (lower << 1);
+ /* Resolve relocation. */
+ bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
+ + sgot->output_offset),
+ contents + rel->r_offset);
+ /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
+ if(h->dynindx == -1)
+ {
+ int dynindx;
+ int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
+ bfd_vma addr;
+ bfd_vma seg = -1;
+
+ /* For static binaries sym_sec can be null. */
+ if (sym_sec)
+ {
+ dynindx = elf_section_data (sym_sec->output_section)->dynindx;
+ addr = dynreloc_value - sym_sec->output_section->vma;
+ }
+ else
+ {
+ dynindx = 0;
+ addr = 0;
+ }
+
+ /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
+ arm_elf_fill_funcdesc(output_bfd, info,
+ &eh->fdpic_cnts.funcdesc_offset,
+ dynindx, offset, addr, dynreloc_value, seg);
+ }
+
+ /* Add a dynamic relocation on GOT entry if not already done. */
+ if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
+ {
+ if (h->dynindx == -1)
+ {
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
+ if (h->root.type == bfd_link_hash_undefweak)
+ bfd_put_32(output_bfd, 0, sgot->contents
+ + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
+ else
+ bfd_put_32(output_bfd, sgot->output_section->vma
+ + sgot->output_offset
+ + (eh->fdpic_cnts.funcdesc_offset & ~1),
+ sgot->contents
+ + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
+ }
+ else
+ {
+ outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
+ }
+ outrel.r_offset = sgot->output_section->vma
+ + sgot->output_offset
+ + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
+ outrel.r_addend = 0;
+ if (h->dynindx == -1 && !bfd_link_pic(info))
+ if (h->root.type == bfd_link_hash_undefweak)
+ arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
+ else
+ arm_elf_add_rofixup(output_bfd, globals->srofixup,
+ outrel.r_offset);
+ else
+ elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
+ eh->fdpic_cnts.gotfuncdesc_offset |= 1;
+ }
+ }
+ else
+ {
+ /* Such relocation on static function should not have been
+ emitted by the compiler. */
+ abort();
+ }
+ }
+ *unresolved_reloc_p = FALSE;
+ return bfd_reloc_ok;
+
+ case R_ARM_FUNCDESC:
+ {
+ if (h == NULL)
+ {
+ struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
+ Elf_Internal_Rela outrel;
+ int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
+ int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
+ bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
+ bfd_vma seg = -1;
+
+ if (bfd_link_pic(info) && dynindx == 0)
+ abort();
+
+ /* Replace static FUNCDESC relocation with a
+ R_ARM_RELATIVE dynamic relocation or with a rofixup for
+ executable. */
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
+ outrel.r_offset = input_section->output_section->vma
+ + input_section->output_offset + rel->r_offset;
+ outrel.r_addend = 0;
+ if (bfd_link_pic(info))
+ elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
+ else
+ arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
+
+ bfd_put_32 (input_bfd, sgot->output_section->vma
+ + sgot->output_offset + offset, hit_data);
+
+ /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
+ arm_elf_fill_funcdesc(output_bfd, info,
+ &local_fdpic_cnts[r_symndx].funcdesc_offset,
+ dynindx, offset, addr, dynreloc_value, seg);
+ }
+ else
+ {
+ if (h->dynindx == -1)
+ {
+ int dynindx;
+ int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
+ bfd_vma addr;
+ bfd_vma seg = -1;
+ Elf_Internal_Rela outrel;
+
+ /* For static binaries sym_sec can be null. */
+ if (sym_sec)
+ {
+ dynindx = elf_section_data (sym_sec->output_section)->dynindx;
+ addr = dynreloc_value - sym_sec->output_section->vma;
+ }
+ else
+ {
+ dynindx = 0;
+ addr = 0;
+ }
+
+ if (bfd_link_pic(info) && dynindx == 0)
+ abort();
+
+ /* Replace static FUNCDESC relocation with a
+ R_ARM_RELATIVE dynamic relocation. */
+ outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
+ outrel.r_offset = input_section->output_section->vma
+ + input_section->output_offset + rel->r_offset;
+ outrel.r_addend = 0;
+ if (bfd_link_pic(info))
+ elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
+ else
+ arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
+
+ bfd_put_32 (input_bfd, sgot->output_section->vma
+ + sgot->output_offset + offset, hit_data);
+
+ /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
+ arm_elf_fill_funcdesc(output_bfd, info,
+ &eh->fdpic_cnts.funcdesc_offset,
+ dynindx, offset, addr, dynreloc_value, seg);
+ }
+ else
+ {
+ Elf_Internal_Rela outrel;
+
+ /* Add a dynamic relocation. */
+ outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
+ outrel.r_offset = input_section->output_section->vma
+ + input_section->output_offset + rel->r_offset;
+ outrel.r_addend = 0;
+ elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
+ }
+ }
+ }
+ *unresolved_reloc_p = FALSE;
+ return bfd_reloc_ok;
+
+ case R_ARM_THM_BF16:
+ {
+ bfd_vma relocation;
+ bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
+ bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
+
+ if (globals->use_rel)
+ {
+ bfd_vma immA = (upper_insn & 0x001f);
+ bfd_vma immB = (lower_insn & 0x07fe) >> 1;
+ bfd_vma immC = (lower_insn & 0x0800) >> 11;
+ addend = (immA << 12);
+ addend |= (immB << 2);
+ addend |= (immC << 1);
+ addend |= 1;
+ /* Sign extend. */
+ signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
+ }
+
+ relocation = value + signed_addend;
+ relocation -= (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset);
+
+ /* Put RELOCATION back into the insn. */
+ {
+ bfd_vma immA = (relocation & 0x0001f000) >> 12;
+ bfd_vma immB = (relocation & 0x00000ffc) >> 2;
+ bfd_vma immC = (relocation & 0x00000002) >> 1;
+
+ upper_insn = (upper_insn & 0xffe0) | immA;
+ lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
+ }
+
+ /* Put the relocated value back in the object file: */
+ bfd_put_16 (input_bfd, upper_insn, hit_data);
+ bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
+
+ return bfd_reloc_ok;
+ }
+
+ case R_ARM_THM_BF12:
+ {
+ bfd_vma relocation;
+ bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
+ bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
+
+ if (globals->use_rel)
+ {
+ bfd_vma immA = (upper_insn & 0x0001);
+ bfd_vma immB = (lower_insn & 0x07fe) >> 1;
+ bfd_vma immC = (lower_insn & 0x0800) >> 11;
+ addend = (immA << 12);
+ addend |= (immB << 2);
+ addend |= (immC << 1);
+ addend |= 1;
+ /* Sign extend. */
+ addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
+ signed_addend = addend;
+ }
+
+ relocation = value + signed_addend;
+ relocation -= (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset);
+
+ /* Put RELOCATION back into the insn. */
+ {
+ bfd_vma immA = (relocation & 0x00001000) >> 12;
+ bfd_vma immB = (relocation & 0x00000ffc) >> 2;
+ bfd_vma immC = (relocation & 0x00000002) >> 1;
+
+ upper_insn = (upper_insn & 0xfffe) | immA;
+ lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
+ }
+
+ /* Put the relocated value back in the object file: */
+ bfd_put_16 (input_bfd, upper_insn, hit_data);
+ bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
+
+ return bfd_reloc_ok;
+ }
+
+ case R_ARM_THM_BF18:
+ {
+ bfd_vma relocation;
+ bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
+ bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
+
+ if (globals->use_rel)
+ {
+ bfd_vma immA = (upper_insn & 0x007f);
+ bfd_vma immB = (lower_insn & 0x07fe) >> 1;
+ bfd_vma immC = (lower_insn & 0x0800) >> 11;
+ addend = (immA << 12);
+ addend |= (immB << 2);
+ addend |= (immC << 1);
+ addend |= 1;
+ /* Sign extend. */
+ addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
+ signed_addend = addend;
+ }
+
+ relocation = value + signed_addend;
+ relocation -= (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset);
+
+ /* Put RELOCATION back into the insn. */
+ {
+ bfd_vma immA = (relocation & 0x0007f000) >> 12;
+ bfd_vma immB = (relocation & 0x00000ffc) >> 2;
+ bfd_vma immC = (relocation & 0x00000002) >> 1;
+
+ upper_insn = (upper_insn & 0xff80) | immA;
+ lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
+ }
+
+ /* Put the relocated value back in the object file: */
+ bfd_put_16 (input_bfd, upper_insn, hit_data);
+ bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
+
+ return bfd_reloc_ok;
+ }
+
+ default:
+ return bfd_reloc_notsupported;
+ }
+}
+
+/* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
+static void
+arm_add_to_rel (bfd * abfd,
+ bfd_byte * address,
+ reloc_howto_type * howto,
+ bfd_signed_vma increment)
+{
+ bfd_signed_vma addend;
+
+ if (howto->type == R_ARM_THM_CALL
+ || howto->type == R_ARM_THM_JUMP24)
+ {
+ int upper_insn, lower_insn;
+ int upper, lower;
+
+ upper_insn = bfd_get_16 (abfd, address);
+ lower_insn = bfd_get_16 (abfd, address + 2);
+ upper = upper_insn & 0x7ff;
+ lower = lower_insn & 0x7ff;
+
+ addend = (upper << 12) | (lower << 1);
addend += increment;
addend >>= 1;
#define IS_ARM_TLS_RELOC(R_TYPE) \
((R_TYPE) == R_ARM_TLS_GD32 \
+ || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
|| (R_TYPE) == R_ARM_TLS_LDO32 \
|| (R_TYPE) == R_ARM_TLS_LDM32 \
+ || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
|| (R_TYPE) == R_ARM_TLS_DTPOFF32 \
|| (R_TYPE) == R_ARM_TLS_DTPMOD32 \
|| (R_TYPE) == R_ARM_TLS_TPOFF32 \
|| (R_TYPE) == R_ARM_TLS_LE32 \
|| (R_TYPE) == R_ARM_TLS_IE32 \
+ || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
|| IS_ARM_TLS_GNU_RELOC (R_TYPE))
/* Specific set of relocations for the gnu tls dialect. */
switch (arch)
{
+ case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
return bfd_mach_arm_5TE;
}
+ case TAG_CPU_ARCH_V5TEJ:
+ return bfd_mach_arm_5TEJ;
+ case TAG_CPU_ARCH_V6:
+ return bfd_mach_arm_6;
+ case TAG_CPU_ARCH_V6KZ:
+ return bfd_mach_arm_6KZ;
+ case TAG_CPU_ARCH_V6T2:
+ return bfd_mach_arm_6T2;
+ case TAG_CPU_ARCH_V6K:
+ return bfd_mach_arm_6K;
+ case TAG_CPU_ARCH_V7:
+ return bfd_mach_arm_7;
+ case TAG_CPU_ARCH_V6_M:
+ return bfd_mach_arm_6M;
+ case TAG_CPU_ARCH_V6S_M:
+ return bfd_mach_arm_6SM;
+ case TAG_CPU_ARCH_V7E_M:
+ return bfd_mach_arm_7EM;
+ case TAG_CPU_ARCH_V8:
+ return bfd_mach_arm_8;
+ case TAG_CPU_ARCH_V8R:
+ return bfd_mach_arm_8R;
+ case TAG_CPU_ARCH_V8M_BASE:
+ return bfd_mach_arm_8M_BASE;
+ case TAG_CPU_ARCH_V8M_MAIN:
+ return bfd_mach_arm_8M_MAIN;
+ case TAG_CPU_ARCH_V8_1M_MAIN:
+ return bfd_mach_arm_8_1M_MAIN;
+
default:
+ /* Force entry to be added for any new known Tag_CPU_arch value. */
+ BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
+
+ /* Unknown Tag_CPU_arch value. */
return bfd_mach_arm_unknown;
}
}
T(V8M_MAIN), /* V8-M BASELINE. */
T(V8M_MAIN) /* V8-M MAINLINE. */
};
+ const int v8_1m_mainline[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ -1, /* V4T. */
+ -1, /* V5T. */
+ -1, /* V5TE. */
+ -1, /* V5TEJ. */
+ -1, /* V6. */
+ -1, /* V6KZ. */
+ -1, /* V6T2. */
+ -1, /* V6K. */
+ T(V8_1M_MAIN), /* V7. */
+ T(V8_1M_MAIN), /* V6_M. */
+ T(V8_1M_MAIN), /* V6S_M. */
+ T(V8_1M_MAIN), /* V7E_M. */
+ -1, /* V8. */
+ -1, /* V8R. */
+ T(V8_1M_MAIN), /* V8-M BASELINE. */
+ T(V8_1M_MAIN), /* V8-M MAINLINE. */
+ -1, /* Unused (18). */
+ -1, /* Unused (19). */
+ -1, /* Unused (20). */
+ T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
+ };
const int v4t_plus_v6_m[] =
{
-1, /* PRE_V4. */
-1, /* V8R. */
T(V8M_BASE), /* V8-M BASELINE. */
T(V8M_MAIN), /* V8-M MAINLINE. */
+ -1, /* Unused (18). */
+ -1, /* Unused (19). */
+ -1, /* Unused (20). */
+ T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
};
const int *comb[] =
v8r,
v8m_baseline,
v8m_mainline,
+ NULL,
+ NULL,
+ NULL,
+ v8_1m_mainline,
/* Pseudo-architecture. */
v4t_plus_v6_m
};
case Tag_CPU_unaligned_access:
case Tag_T2EE_use:
case Tag_MPextension_use:
+ case Tag_MVE_arch:
/* Use the largest value specified. */
if (in_attr[i].i > out_attr[i].i)
out_attr[i].i = in_attr[i].i;
r_type = elf32_arm_tls_transition (info, r_type, h);
switch (r_type)
{
+ case R_ARM_GOTOFFFUNCDESC:
+ {
+ if (h == NULL)
+ {
+ if (!elf32_arm_allocate_local_sym_info (abfd))
+ return FALSE;
+ elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
+ elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
+ }
+ else
+ {
+ eh->fdpic_cnts.gotofffuncdesc_cnt++;
+ }
+ }
+ break;
+
+ case R_ARM_GOTFUNCDESC:
+ {
+ if (h == NULL)
+ {
+ /* Such a relocation is not supposed to be generated
+ by gcc on a static function. */
+ /* Anyway if needed it could be handled. */
+ abort();
+ }
+ else
+ {
+ eh->fdpic_cnts.gotfuncdesc_cnt++;
+ }
+ }
+ break;
+
+ case R_ARM_FUNCDESC:
+ {
+ if (h == NULL)
+ {
+ if (!elf32_arm_allocate_local_sym_info (abfd))
+ return FALSE;
+ elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
+ elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
+ }
+ else
+ {
+ eh->fdpic_cnts.funcdesc_cnt++;
+ }
+ }
+ break;
+
case R_ARM_GOT32:
case R_ARM_GOT_PREL:
case R_ARM_TLS_GD32:
+ case R_ARM_TLS_GD32_FDPIC:
case R_ARM_TLS_IE32:
+ case R_ARM_TLS_IE32_FDPIC:
case R_ARM_TLS_GOTDESC:
case R_ARM_TLS_DESCSEQ:
case R_ARM_THM_TLS_DESCSEQ:
switch (r_type)
{
case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
+ case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
+ case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
case R_ARM_TLS_GOTDESC:
case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
/* Fall through. */
case R_ARM_TLS_LDM32:
- if (r_type == R_ARM_TLS_LDM32)
+ case R_ARM_TLS_LDM32_FDPIC:
+ if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
htab->tls_ldm_got.refcount++;
/* Fall through. */
case R_ARM_THM_MOVT_PREL:
/* Should the interworking branches be listed here? */
- if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
+ if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
+ || htab->fdpic_p)
&& (sec->flags & SEC_ALLOC) != 0)
{
if (h == NULL
/* This relocation describes which C++ vtable entries are actually
used. Record for later use during GC. */
case R_ARM_GNU_VTENTRY:
- BFD_ASSERT (h != NULL);
- if (h != NULL
- && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
+ if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
return FALSE;
break;
}
if (elf32_arm_howto_from_type (r_type)->pc_relative)
p->pc_count += 1;
p->count += 1;
+ if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
+ && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
+ /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
+ that will become rofixup. */
+ /* This is due to the fact that we suppose all will become rofixup. */
+ fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
+ _bfd_error_handler
+ (_("FDPIC does not yet support %s relocation"
+ " to become dynamic for executable"),
+ elf32_arm_howto_table_1[r_type].name);
+ abort();
+ }
}
}
eadi = get_arm_elf_section_data (i);
edit_list = eadi->u.exidx.unwind_edit_list;
edit_tail = eadi->u.exidx.unwind_edit_tail;
- offset = o->vma + i->output_offset;
+ offset = i->output_offset;
if (eadi->elf.rel.hdr &&
eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
BFD_ARM_SPECIAL_SYM_TYPE_ANY);
}
-/* This is a copy of elf_find_function() from elf.c except that
+/* This is a version of _bfd_elf_find_function() from dwarf2.c except that
ARM mapping symbols are ignored when looking for function names
and STT_ARM_TFUNC is considered to a function type. */
static bfd_boolean
-arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
+arm_elf_find_function (bfd * abfd,
asymbol ** symbols,
asection * section,
bfd_vma offset,
bfd_vma low_func = 0;
asymbol ** p;
+ if (symbols == NULL)
+ return FALSE;
+
+ if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
+ return FALSE;
+
for (p = symbols; *p != NULL; p++)
{
elf_symbol_type *q;
/* Make sure this symbol is output as a dynamic symbol.
Undefined weak syms won't yet be marked as dynamic. */
- if (h->dynindx == -1 && !h->forced_local
+ if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
&& h->root.type == bfd_link_hash_undefweak)
{
if (! bfd_elf_link_record_dynamic_symbol (info, h))
if (tls_type & GOT_TLS_GD)
{
- /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
- the symbol is both GD and GDESC, got.offset may
- have been overwritten. */
+ /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
+ consecutive GOT slots. If the symbol is both GD
+ and GDESC, got.offset may have been
+ overwritten. */
h->got.offset = s->size;
s->size += 8;
}
if (tls_type & GOT_TLS_IE)
- /* R_ARM_TLS_IE32 needs one GOT slot. */
+ /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
+ slot. */
s->size += 4;
}
if ((tls_type & GOT_TLS_GD) && indx != 0)
elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
}
- else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
+ else if (((indx != -1) || htab->fdpic_p)
+ && !SYMBOL_REFERENCES_LOCAL (info, h))
{
if (htab->root.dynamic_sections_created)
/* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
|| h->root.type != bfd_link_hash_undefweak))
/* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
+ else if (htab->fdpic_p && tls_type == GOT_NORMAL)
+ /* Reserve room for rofixup for FDPIC executable. */
+ /* TLS relocs do not need space since they are completely
+ resolved. */
+ htab->srofixup->size += 4;
}
}
else
h->got.offset = (bfd_vma) -1;
+ /* FDPIC support. */
+ if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
+ {
+ /* Symbol musn't be exported. */
+ if (h->dynindx != -1)
+ abort();
+
+ /* We only allocate one function descriptor with its associated relocation. */
+ if (eh->fdpic_cnts.funcdesc_offset == -1)
+ {
+ asection *s = htab->root.sgot;
+
+ eh->fdpic_cnts.funcdesc_offset = s->size;
+ s->size += 8;
+ /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
+ if (bfd_link_pic(info))
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
+ else
+ htab->srofixup->size += 8;
+ }
+ }
+
+ if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
+ {
+ asection *s = htab->root.sgot;
+
+ if (htab->root.dynamic_sections_created && h->dynindx == -1
+ && !h->forced_local)
+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
+ return FALSE;
+
+ if (h->dynindx == -1)
+ {
+ /* We only allocate one function descriptor with its associated relocation. q */
+ if (eh->fdpic_cnts.funcdesc_offset == -1)
+ {
+
+ eh->fdpic_cnts.funcdesc_offset = s->size;
+ s->size += 8;
+ /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
+ if (bfd_link_pic(info))
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
+ else
+ htab->srofixup->size += 8;
+ }
+ }
+
+ /* Add one entry into the GOT and a R_ARM_FUNCDESC or
+ R_ARM_RELATIVE/rofixup relocation on it. */
+ eh->fdpic_cnts.gotfuncdesc_offset = s->size;
+ s->size += 4;
+ if (h->dynindx == -1 && !bfd_link_pic(info))
+ htab->srofixup->size += 4;
+ else
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
+ }
+
+ if (eh->fdpic_cnts.funcdesc_cnt > 0)
+ {
+ if (htab->root.dynamic_sections_created && h->dynindx == -1
+ && !h->forced_local)
+ if (! bfd_elf_link_record_dynamic_symbol (info, h))
+ return FALSE;
+
+ if (h->dynindx == -1)
+ {
+ /* We only allocate one function descriptor with its associated relocation. */
+ if (eh->fdpic_cnts.funcdesc_offset == -1)
+ {
+ asection *s = htab->root.sgot;
+
+ eh->fdpic_cnts.funcdesc_offset = s->size;
+ s->size += 8;
+ /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
+ if (bfd_link_pic(info))
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
+ else
+ htab->srofixup->size += 8;
+ }
+ }
+ if (h->dynindx == -1 && !bfd_link_pic(info))
+ {
+ /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
+ htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
+ }
+ else
+ {
+ /* Will need one dynamic reloc per reference. will be either
+ R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
+ elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
+ eh->fdpic_cnts.funcdesc_cnt);
+ }
+ }
+
/* Allocate stubs for exported Thumb functions on v4t. */
if (!htab->use_blx && h->dynindx != -1
&& h->def_regular
space for pc-relative relocs that have become local due to symbol
visibility changes. */
- if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
+ if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
{
/* Relocs that use pc_count are PC-relative forms, which will appear
on something like ".long foo - ." or "movw REG, foo - .". We want
/* Make sure undefined weak symbols are output as a dynamic
symbol in PIEs. */
- else if (h->dynindx == -1
+ else if (htab->root.dynamic_sections_created && h->dynindx == -1
&& !h->forced_local)
{
if (! bfd_elf_link_record_dynamic_symbol (info, h))
for (p = eh->dyn_relocs; p != NULL; p = p->next)
{
asection *sreloc = elf_section_data (p->sec)->sreloc;
+
if (h->type == STT_GNU_IFUNC
&& eh->plt.noncall_refcount == 0
&& SYMBOL_REFERENCES_LOCAL (info, h))
elf32_arm_allocate_irelocs (info, sreloc, p->count);
+ else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
+ elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
+ else if (htab->fdpic_p && !bfd_link_pic(info))
+ htab->srofixup->size += 4 * p->count;
else
elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
}
/* Not an error, just cut short the traversal. */
return FALSE;
}
+
return TRUE;
}
asection *srel;
bfd_boolean is_vxworks = htab->vxworks_p;
unsigned int symndx;
+ struct fdpic_local *local_fdpic_cnts;
if (! is_arm_elf (ibfd))
continue;
else if (p->count != 0)
{
srel = elf_section_data (p->sec)->sreloc;
- elf32_arm_allocate_dynrelocs (info, srel, p->count);
+ if (htab->fdpic_p && !bfd_link_pic(info))
+ htab->srofixup->size += 4 * p->count;
+ else
+ elf32_arm_allocate_dynrelocs (info, srel, p->count);
if ((p->sec->output_section->flags & SEC_READONLY) != 0)
info->flags |= DF_TEXTREL;
}
local_iplt_ptr = elf32_arm_local_iplt (ibfd);
local_tls_type = elf32_arm_local_got_tls_type (ibfd);
local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
+ local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
symndx = 0;
s = htab->root.sgot;
srel = htab->root.srelgot;
for (; local_got < end_local_got;
++local_got, ++local_iplt_ptr, ++local_tls_type,
- ++local_tlsdesc_gotent, ++symndx)
+ ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
{
*local_tlsdesc_gotent = (bfd_vma) -1;
local_iplt = *local_iplt_ptr;
+
+ /* FDPIC support. */
+ if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
+ {
+ if (local_fdpic_cnts->funcdesc_offset == -1)
+ {
+ local_fdpic_cnts->funcdesc_offset = s->size;
+ s->size += 8;
+
+ /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
+ if (bfd_link_pic(info))
+ elf32_arm_allocate_dynrelocs (info, srel, 1);
+ else
+ htab->srofixup->size += 8;
+ }
+ }
+
+ if (local_fdpic_cnts->funcdesc_cnt > 0)
+ {
+ if (local_fdpic_cnts->funcdesc_offset == -1)
+ {
+ local_fdpic_cnts->funcdesc_offset = s->size;
+ s->size += 8;
+
+ /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
+ if (bfd_link_pic(info))
+ elf32_arm_allocate_dynrelocs (info, srel, 1);
+ else
+ htab->srofixup->size += 8;
+ }
+
+ /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
+ if (bfd_link_pic(info))
+ elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
+ else
+ htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
+ }
+
if (local_iplt != NULL)
{
struct elf_dyn_relocs *p;
&& (local_iplt == NULL
|| local_iplt->arm.noncall_refcount == 0))
elf32_arm_allocate_irelocs (info, srel, 1);
- else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
+ else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
{
- if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
- || *local_tls_type & GOT_TLS_GD)
+ if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
elf32_arm_allocate_dynrelocs (info, srel, 1);
+ else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
+ htab->srofixup->size += 4;
- if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
+ if ((bfd_link_pic (info) || htab->fdpic_p)
+ && *local_tls_type & GOT_TLS_GDESC)
{
elf32_arm_allocate_dynrelocs (info,
htab->root.srelplt, 1);
if (htab->tls_ldm_got.refcount > 0)
{
/* Allocate two GOT entries and one dynamic relocation (if necessary)
- for R_ARM_TLS_LDM32 relocations. */
+ for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
htab->tls_ldm_got.offset = htab->root.sgot->size;
htab->root.sgot->size += 8;
if (bfd_link_pic (info))
else
htab->tls_ldm_got.offset = -1;
+ /* At the very end of the .rofixup section is a pointer to the GOT,
+ reserve space for it. */
+ if (htab->fdpic_p && htab->srofixup != NULL)
+ htab->srofixup->size += 4;
+
/* Allocate global sym .plt and .got entries, and space for global
sym dynamic relocs. */
elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
&& s != htab->root.iplt
&& s != htab->root.igotplt
&& s != htab->root.sdynbss
- && s != htab->root.sdynrelro)
+ && s != htab->root.sdynrelro
+ && s != htab->srofixup)
{
/* It's not one of our sections, so don't allocate space. */
continue;
struct bfd_link_info *info)
{
asection *tls_sec;
+ struct elf32_arm_link_hash_table *htab;
+
+ htab = elf32_arm_hash_table (info);
if (bfd_link_relocatable (info))
return TRUE;
(*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
}
}
+
+ if (htab->fdpic_p && !bfd_link_relocatable (info)
+ && !bfd_elf_stack_segment_size (output_bfd, info,
+ "__stacksize", DEFAULT_STACK_SIZE))
+ return FALSE;
+
return TRUE;
}
}
/* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
- the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
- to the ".got" section. */
+ and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
+ it is relative to the ".got" section. */
if (h == htab->root.hdynamic
- || (!htab->vxworks_p && h == htab->root.hgot))
+ || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
sym->st_shndx = SHN_ABS;
return TRUE;
elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
}
+ /* At the very end of the .rofixup section is a pointer to the GOT. */
+ if (htab->fdpic_p && htab->srofixup != NULL)
+ {
+ struct elf_link_hash_entry *hgot = htab->root.hgot;
+
+ bfd_vma got_value = hgot->root.u.def.value
+ + hgot->root.u.def.section->output_section->vma
+ + hgot->root.u.def.section->output_offset;
+
+ arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
+
+ /* Make sure we allocated and generated the same number of fixups. */
+ BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
+ }
+
return TRUE;
}
if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
return FALSE;
}
+ else if (htab->fdpic_p)
+ {
+ enum map_symbol_type type = using_thumb_only(htab)
+ ? ARM_MAP_THUMB
+ : ARM_MAP_ARM;
+
+ if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
+ if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
+ return FALSE;
+ if (!elf32_arm_output_map_sym (osi, type, addr))
+ return FALSE;
+ if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
+ return FALSE;
+ if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
+ if (!elf32_arm_output_map_sym (osi, type, addr + 24))
+ return FALSE;
+ }
else if (using_thumb_only (htab))
{
if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
return FALSE;
}
- else if (using_thumb_only (htab))
+ else if (using_thumb_only (htab) && !htab->fdpic_p)
{
if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
return FALSE;
if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
return FALSE;
}
- else if (!htab->symbian_p)
+ else if (!htab->symbian_p && !htab->fdpic_p)
{
if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
return FALSE;
Elf_Internal_Sym *sym, const char **namep,
flagword *flagsp, asection **secp, bfd_vma *valp)
{
- if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
- && (abfd->flags & DYNAMIC) == 0
- && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
- elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
-
if (elf32_arm_hash_table (info) == NULL)
return FALSE;
return ret;
}
+/* We need dynamic symbols for every section, since segments can
+ relocate independently. */
+static bfd_boolean
+elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info
+ ATTRIBUTE_UNUSED,
+ asection *p ATTRIBUTE_UNUSED)
+{
+ switch (elf_section_data (p)->this_hdr.sh_type)
+ {
+ case SHT_PROGBITS:
+ case SHT_NOBITS:
+ /* If sh_type is yet undecided, assume it could be
+ SHT_PROGBITS/SHT_NOBITS. */
+ case SHT_NULL:
+ return FALSE;
+
+ /* There shouldn't be section relative relocations
+ against any other section. */
+ default:
+ return TRUE;
+ }
+}
+
#undef elf32_bed
#define elf32_bed elf32_arm_fdpic_bed
#undef bfd_elf32_bfd_link_hash_table_create
-#define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
+#define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
+
+#undef elf_backend_omit_section_dynsym
+#define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
#include "elf32-target.h"
+
#undef elf_match_priority
#undef ELF_OSABI
+#undef elf_backend_omit_section_dynsym
/* VxWorks Targets. */