Several fixes related to ARC PIE support.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
index e9c0d20aaf219f907663e6fa7c0299eeb112bf5a..a2402ba383ddaf837a567725a9721ea799723643 100644 (file)
@@ -1,5 +1,5 @@
 /* 32-bit ELF support for ARM
-   Copyright 1998-2013 Free Software Foundation, Inc.
+   Copyright (C) 1998-2016 Free Software Foundation, Inc.
 
    This file is part of BFD, the Binary File Descriptor library.
 
@@ -79,7 +79,7 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
   /* No relocation.  */
   HOWTO (R_ARM_NONE,           /* type */
         0,                     /* rightshift */
-        0,                     /* size (0 = byte, 1 = short, 2 = long) */
+        3,                     /* size (0 = byte, 1 = short, 2 = long) */
         0,                     /* bitsize */
         FALSE,                 /* pc_relative */
         0,                     /* bitpos */
@@ -1606,7 +1606,7 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
         FALSE,                 /* pc_relative */
         0,                     /* bitpos */
         complain_overflow_bitfield,/* complain_on_overflow */
-        bfd_elf_generic_reloc, /* special_function */
+        NULL,                  /* special_function */
         "R_ARM_TLS_LE32",      /* name */
         TRUE,                  /* partial_inplace */
         0xffffffff,            /* src_mask */
@@ -1689,6 +1689,60 @@ static reloc_howto_type elf32_arm_howto_table_1[] =
         0x00000000,            /* src_mask */
         0x00000000,            /* dst_mask */
         FALSE),                /* pcrel_offset */
+  EMPTY_HOWTO (130),
+  EMPTY_HOWTO (131),
+  HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
+        0,                     /* rightshift.  */
+        1,                     /* size (0 = byte, 1 = short, 2 = long).  */
+        16,                    /* bitsize.  */
+        FALSE,                 /* pc_relative.  */
+        0,                     /* bitpos.  */
+        complain_overflow_bitfield,/* complain_on_overflow.  */
+        bfd_elf_generic_reloc, /* special_function.  */
+        "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
+        FALSE,                 /* partial_inplace.  */
+        0x00000000,            /* src_mask.  */
+        0x00000000,            /* dst_mask.  */
+        FALSE),                /* pcrel_offset.  */
+  HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
+        0,                     /* rightshift.  */
+        1,                     /* size (0 = byte, 1 = short, 2 = long).  */
+        16,                    /* bitsize.  */
+        FALSE,                 /* pc_relative.  */
+        0,                     /* bitpos.  */
+        complain_overflow_bitfield,/* complain_on_overflow.  */
+        bfd_elf_generic_reloc, /* special_function.  */
+        "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
+        FALSE,                 /* partial_inplace.  */
+        0x00000000,            /* src_mask.  */
+        0x00000000,            /* dst_mask.  */
+        FALSE),                /* pcrel_offset.  */
+  HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
+        0,                     /* rightshift.  */
+        1,                     /* size (0 = byte, 1 = short, 2 = long).  */
+        16,                    /* bitsize.  */
+        FALSE,                 /* pc_relative.  */
+        0,                     /* bitpos.  */
+        complain_overflow_bitfield,/* complain_on_overflow.  */
+        bfd_elf_generic_reloc, /* special_function.  */
+        "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
+        FALSE,                 /* partial_inplace.  */
+        0x00000000,            /* src_mask.  */
+        0x00000000,            /* dst_mask.  */
+        FALSE),                /* pcrel_offset.  */
+  HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
+        0,                     /* rightshift.  */
+        1,                     /* size (0 = byte, 1 = short, 2 = long).  */
+        16,                    /* bitsize.  */
+        FALSE,                 /* pc_relative.  */
+        0,                     /* bitpos.  */
+        complain_overflow_bitfield,/* complain_on_overflow.  */
+        bfd_elf_generic_reloc, /* special_function.  */
+        "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
+        FALSE,                 /* partial_inplace.  */
+        0x00000000,            /* src_mask.  */
+        0x00000000,            /* dst_mask.  */
+        FALSE),                /* pcrel_offset.  */
 };
 
 /* 160 onwards: */
@@ -1889,7 +1943,11 @@ static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
     {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
     {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
     {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
-    {BFD_RELOC_ARM_V4BX,            R_ARM_V4BX}
+    {BFD_RELOC_ARM_V4BX,            R_ARM_V4BX},
+    {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
+    {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
+    {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
+    {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
   };
 
 static reloc_howto_type *
@@ -2040,9 +2098,9 @@ elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
     }
 }
 
-#define TARGET_LITTLE_SYM               bfd_elf32_littlearm_vec
+#define TARGET_LITTLE_SYM               arm_elf32_le_vec
 #define TARGET_LITTLE_NAME              "elf32-littlearm"
-#define TARGET_BIG_SYM                  bfd_elf32_bigarm_vec
+#define TARGET_BIG_SYM                  arm_elf32_be_vec
 #define TARGET_BIG_NAME                 "elf32-bigarm"
 
 #define elf_backend_grok_prstatus      elf32_arm_nabi_grok_prstatus
@@ -2072,11 +2130,16 @@ typedef unsigned short int insn16;
 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
 #define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
 
+#define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
+#define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
+
 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
 #define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
 
 #define STUB_ENTRY_NAME   "__%s_veneer"
 
+#define CMSE_PREFIX "__acle_se_"
+
 /* The name of the dynamic interpreter.  This is put in the .interp
    section.  */
 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
@@ -2125,7 +2188,7 @@ static const bfd_vma elf32_arm_plt_entry [] =
   0x00000000,          /* unused               */
 };
 
-#else
+#else /* not FOUR_WORD_PLT */
 
 /* The first entry in a procedure linkage table looks like
    this.  It is set up so that any shared library function that is
@@ -2140,16 +2203,55 @@ static const bfd_vma elf32_arm_plt0_entry [] =
   0x00000000,          /* &GOT[0] - .          */
 };
 
-/* Subsequent entries in a procedure linkage table look like
-   this.  */
-static const bfd_vma elf32_arm_plt_entry [] =
+/* By default subsequent entries in a procedure linkage table look like
+   this. Offsets that don't fit into 28 bits will cause link error.  */
+static const bfd_vma elf32_arm_plt_entry_short [] =
 {
   0xe28fc600,          /* add   ip, pc, #0xNN00000 */
   0xe28cca00,          /* add   ip, ip, #0xNN000   */
   0xe5bcf000,          /* ldr   pc, [ip, #0xNNN]!  */
 };
 
-#endif
+/* When explicitly asked, we'll use this "long" entry format
+   which can cope with arbitrary displacements.  */
+static const bfd_vma elf32_arm_plt_entry_long [] =
+{
+  0xe28fc200,           /* add   ip, pc, #0xN0000000 */
+  0xe28cc600,          /* add   ip, ip, #0xNN00000  */
+  0xe28cca00,          /* add   ip, ip, #0xNN000    */
+  0xe5bcf000,          /* ldr   pc, [ip, #0xNNN]!   */
+};
+
+static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
+
+#endif /* not FOUR_WORD_PLT */
+
+/* The first entry in a procedure linkage table looks like this.
+   It is set up so that any shared library function that is called before the
+   relocation has been set up calls the dynamic linker first.  */
+static const bfd_vma elf32_thumb2_plt0_entry [] =
+{
+  /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
+     an instruction maybe encoded to one or two array elements.  */
+  0xf8dfb500,          /* push    {lr}          */
+  0x44fee008,          /* ldr.w   lr, [pc, #8]  */
+                       /* add     lr, pc        */
+  0xff08f85e,          /* ldr.w   pc, [lr, #8]! */
+  0x00000000,          /* &GOT[0] - .           */
+};
+
+/* Subsequent entries in a procedure linkage table for thumb only target
+   look like this.  */
+static const bfd_vma elf32_thumb2_plt_entry [] =
+{
+  /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
+     an instruction maybe encoded to one or two array elements.  */
+  0x0c00f240,          /* movw    ip, #0xNNNN    */
+  0x0c00f2c0,          /* movt    ip, #0xNNNN    */
+  0xf8dc44fc,           /* add     ip, pc         */
+  0xbf00f000            /* ldr.w   pc, [ip]       */
+                       /* nop                    */
+};
 
 /* The format of the first entry in the procedure linkage table
    for a VxWorks executable.  */
@@ -2244,6 +2346,8 @@ static const bfd_vma elf32_arm_nacl_plt_entry [] =
 #define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
+#define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
+#define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
 
 enum stub_insn_type
 {
@@ -2258,6 +2362,8 @@ enum stub_insn_type
    is inserted in arm_build_one_stub().  */
 #define THUMB16_BCOND_INSN(X)  {(X), THUMB16_TYPE, R_ARM_NONE, 1}
 #define THUMB32_INSN(X)                {(X), THUMB32_TYPE, R_ARM_NONE, 0}
+#define THUMB32_MOVT(X)                {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
+#define THUMB32_MOVW(X)                {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
 #define THUMB32_B_INSN(X, Z)   {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
 #define ARM_INSN(X)            {(X), ARM_TYPE, R_ARM_NONE, 0}
 #define ARM_REL_INSN(X, Z)     {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
@@ -2300,6 +2406,22 @@ static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
 };
 
+/* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
+static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
+{
+  THUMB32_INSN (0xf85ff000),         /* ldr.w  pc, [pc, #-0] */
+  DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
+};
+
+/* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
+   M-profile architectures.  */
+static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
+{
+  THUMB32_MOVW (0xf2400c00),        /* mov.w ip, R_ARM_MOVW_ABS_NC */
+  THUMB32_MOVT (0xf2c00c00),        /* movt  ip, R_ARM_MOVT_ABS << 16 */
+  THUMB16_INSN (0x4760),             /* bx   ip */
+};
+
 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
    allowed.  */
 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
@@ -2441,6 +2563,13 @@ static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
 };
 
+/* Stub used for transition to secure state (aka SG veneer).  */
+static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
+{
+  THUMB32_INSN (0xe97fe97f),           /* sg.  */
+  THUMB32_B_INSN (0xf000b800, -4),     /* b.w original_branch_dest.  */
+};
+
 
 /* Cortex-A8 erratum-workaround stubs.  */
 
@@ -2520,21 +2649,26 @@ static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
   DEF_STUB(long_branch_v4t_thumb_tls_pic) \
   DEF_STUB(long_branch_arm_nacl) \
   DEF_STUB(long_branch_arm_nacl_pic) \
+  DEF_STUB(cmse_branch_thumb_only) \
   DEF_STUB(a8_veneer_b_cond) \
   DEF_STUB(a8_veneer_b) \
   DEF_STUB(a8_veneer_bl) \
-  DEF_STUB(a8_veneer_blx)
+  DEF_STUB(a8_veneer_blx) \
+  DEF_STUB(long_branch_thumb2_only) \
+  DEF_STUB(long_branch_thumb2_only_pure)
 
 #define DEF_STUB(x) arm_stub_##x,
 enum elf32_arm_stub_type
 {
   arm_stub_none,
   DEF_STUBS
-  /* Note the first a8_veneer type */
-  arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
+  max_stub_type
 };
 #undef DEF_STUB
 
+/* Note the first a8_veneer type.  */
+const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
+
 typedef struct
 {
   const insn_sequence* template_sequence;
@@ -2564,8 +2698,12 @@ struct elf32_arm_stub_hash_entry
   bfd_vma target_value;
   asection *target_section;
 
-  /* Offset to apply to relocation referencing target_value.  */
-  bfd_vma target_addend;
+  /* Same as above but for the source of the branch to the stub.  Used for
+     Cortex-A8 erratum workaround to patch it to branch to the stub.  As
+     such, source section does not need to be recorded since Cortex-A8 erratum
+     workaround stubs are only generated when both source and target are in the
+     same section.  */
+  bfd_vma source_value;
 
   /* The instruction which caused this stub to be generated (only valid for
      Cortex-A8 erratum workaround stubs at present).  */
@@ -2638,6 +2776,36 @@ typedef struct elf32_vfp11_erratum_list
 }
 elf32_vfp11_erratum_list;
 
+/* Information about a STM32L4XX erratum veneer, or a branch to such a
+   veneer.  */
+typedef enum
+{
+  STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
+  STM32L4XX_ERRATUM_VENEER
+}
+elf32_stm32l4xx_erratum_type;
+
+typedef struct elf32_stm32l4xx_erratum_list
+{
+  struct elf32_stm32l4xx_erratum_list *next;
+  bfd_vma vma;
+  union
+  {
+    struct
+    {
+      struct elf32_stm32l4xx_erratum_list *veneer;
+      unsigned int insn;
+    } b;
+    struct
+    {
+      struct elf32_stm32l4xx_erratum_list *branch;
+      unsigned int id;
+    } v;
+  } u;
+  elf32_stm32l4xx_erratum_type type;
+}
+elf32_stm32l4xx_erratum_list;
+
 typedef enum
 {
   DELETE_EXIDX_ENTRY,
@@ -2668,6 +2836,9 @@ typedef struct _arm_elf_section_data
   /* Information about CPU errata.  */
   unsigned int erratumcount;
   elf32_vfp11_erratum_list *erratumlist;
+  unsigned int stm32l4xx_erratumcount;
+  elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
+  unsigned int additional_reloc_count;
   /* Information about unwind tables.  */
   union
   {
@@ -2701,7 +2872,7 @@ struct a8_erratum_fix
   bfd *input_bfd;
   asection *section;
   bfd_vma offset;
-  bfd_vma addend;
+  bfd_vma target_offset;
   unsigned long orig_insn;
   char *stub_name;
   enum elf32_arm_stub_type stub_type;
@@ -2901,6 +3072,10 @@ struct elf32_arm_link_hash_table
      veneers.  */
   bfd_size_type vfp11_erratum_glue_size;
 
+ /* The size in bytes of the section containing glue for STM32L4XX erratum
+     veneers.  */
+  bfd_size_type stm32l4xx_erratum_glue_size;
+
   /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
      holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
      elf32_arm_write_section().  */
@@ -2941,6 +3116,13 @@ struct elf32_arm_link_hash_table
   /* Global counter for the number of fixes we have emitted.  */
   int num_vfp11_fixes;
 
+  /* What sort of code sequences we should look for which may trigger the
+     STM32L4XX erratum.  */
+  bfd_arm_stm32l4xx_fix stm32l4xx_fix;
+
+  /* Global counter for the number of fixes we have emitted.  */
+  int num_stm32l4xx_fixes;
+
   /* Nonzero to force PIC branch veneers.  */
   int pic_veneer;
 
@@ -2962,6 +3144,14 @@ struct elf32_arm_link_hash_table
   /* True if the target uses REL relocations.  */
   int use_rel;
 
+  /* Nonzero if import library must be a secure gateway import library
+     as per ARMv8-M Security Extensions.  */
+  int cmse_implib;
+
+  /* The import library whose symbols' address must remain stable in
+     the import library generated.  */
+  bfd *in_implib_bfd;
+
   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
   bfd_vma next_tls_desc_index;
 
@@ -3012,22 +3202,66 @@ struct elf32_arm_link_hash_table
   bfd *stub_bfd;
 
   /* Linker call-backs.  */
-  asection * (*add_stub_section) (const char *, asection *, unsigned int);
+  asection * (*add_stub_section) (const char *, asection *, asection *,
+                                 unsigned int);
   void (*layout_sections_again) (void);
 
   /* Array to keep track of which stub sections have been created, and
      information on stub grouping.  */
   struct map_stub *stub_group;
 
+  /* Input stub section holding secure gateway veneers.  */
+  asection *cmse_stub_sec;
+
+  /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
+     start to be allocated.  */
+  bfd_vma new_cmse_stub_offset;
+
   /* Number of elements in stub_group.  */
-  int top_id;
+  unsigned int top_id;
 
   /* Assorted information used by elf32_arm_size_stubs.  */
   unsigned int bfd_count;
-  int top_index;
+  unsigned int top_index;
   asection **input_list;
 };
 
+static inline int
+ctz (unsigned int mask)
+{
+#if GCC_VERSION >= 3004
+  return __builtin_ctz (mask);
+#else
+  unsigned int i;
+
+  for (i = 0; i < 8 * sizeof (mask); i++)
+    {
+      if (mask & 0x1)
+       break;
+      mask = (mask >> 1);
+    }
+  return i;
+#endif
+}
+
+static inline int
+popcount (unsigned int mask)
+{
+#if GCC_VERSION >= 3004
+  return __builtin_popcount (mask);
+#else
+  unsigned int i, sum = 0;
+
+  for (i = 0; i < 8 * sizeof (mask); i++)
+    {
+      if (mask & 0x1)
+       sum++;
+      mask = (mask >> 1);
+    }
+  return sum;
+#endif
+}
+
 /* Create an entry in an ARM ELF linker hash table.  */
 
 static struct bfd_hash_entry *
@@ -3131,12 +3365,16 @@ elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
    union and *ARM_PLT at the ARM-specific information.  */
 
 static bfd_boolean
-elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
+elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
+                       struct elf32_arm_link_hash_entry *h,
                        unsigned long r_symndx, union gotplt_union **root_plt,
                        struct arm_plt_info **arm_plt)
 {
   struct arm_local_iplt_info *local_iplt;
 
+  if (globals->root.splt == NULL && globals->root.iplt == NULL)
+    return FALSE;
+
   if (h != NULL)
     {
       *root_plt = &h->root.plt;
@@ -3230,15 +3468,15 @@ stub_hash_newfunc (struct bfd_hash_entry *entry,
       /* Initialize the local fields.  */
       eh = (struct elf32_arm_stub_hash_entry *) entry;
       eh->stub_sec = NULL;
-      eh->stub_offset = 0;
+      eh->stub_offset = (bfd_vma) -1;
+      eh->source_value = 0;
       eh->target_value = 0;
       eh->target_section = NULL;
-      eh->target_addend = 0;
       eh->orig_insn = 0;
       eh->stub_type = arm_stub_none;
       eh->stub_size = 0;
       eh->stub_template = NULL;
-      eh->stub_template_size = 0;
+      eh->stub_template_size = -1;
       eh->h = NULL;
       eh->id_sec = NULL;
       eh->output_name = NULL;
@@ -3317,6 +3555,79 @@ create_ifunc_sections (struct bfd_link_info *info)
   return TRUE;
 }
 
+/* Determine if we're dealing with a Thumb only architecture.  */
+
+static bfd_boolean
+using_thumb_only (struct elf32_arm_link_hash_table *globals)
+{
+  int arch;
+  int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
+                                         Tag_CPU_arch_profile);
+
+  if (profile)
+    return profile == 'M';
+
+  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
+
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
+  if (arch == TAG_CPU_ARCH_V6_M
+      || arch == TAG_CPU_ARCH_V6S_M
+      || arch == TAG_CPU_ARCH_V7E_M
+      || arch == TAG_CPU_ARCH_V8M_BASE
+      || arch == TAG_CPU_ARCH_V8M_MAIN)
+    return TRUE;
+
+  return FALSE;
+}
+
+/* Determine if we're dealing with a Thumb-2 object.  */
+
+static bfd_boolean
+using_thumb2 (struct elf32_arm_link_hash_table *globals)
+{
+  int arch;
+  int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
+                                           Tag_THUMB_ISA_use);
+
+  if (thumb_isa)
+    return thumb_isa == 2;
+
+  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
+
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
+  return (arch == TAG_CPU_ARCH_V6T2
+         || arch == TAG_CPU_ARCH_V7
+         || arch == TAG_CPU_ARCH_V7E_M
+         || arch == TAG_CPU_ARCH_V8
+         || arch == TAG_CPU_ARCH_V8M_MAIN);
+}
+
+/* Determine whether Thumb-2 BL instruction is available.  */
+
+static bfd_boolean
+using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
+{
+  int arch =
+    bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
+
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
+  /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M).  */
+  return (arch == TAG_CPU_ARCH_V6T2
+         || arch >= TAG_CPU_ARCH_V7);
+}
+
 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
    .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
    hash table.  */
@@ -3337,7 +3648,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
     return FALSE;
 
   htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
-  if (!info->shared)
+  if (!bfd_link_pic (info))
     htab->srelbss = bfd_get_linker_section (dynobj,
                                            RELOC_SECTION (htab, ".bss"));
 
@@ -3346,7 +3657,7 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
       if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
        return FALSE;
 
-      if (info->shared)
+      if (bfd_link_pic (info))
        {
          htab->plt_header_size = 0;
          htab->plt_entry_size
@@ -3359,12 +3670,31 @@ elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
          htab->plt_entry_size
            = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
        }
+
+      if (elf_elfheader (dynobj))
+       elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
+    }
+  else
+    {
+      /* PR ld/16017
+        Test for thumb only architectures.  Note - we cannot just call
+        using_thumb_only() as the attributes in the output bfd have not been
+        initialised at this point, so instead we use the input bfd.  */
+      bfd * saved_obfd = htab->obfd;
+
+      htab->obfd = dynobj;
+      if (using_thumb_only (htab))
+       {
+         htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
+         htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
+       }
+      htab->obfd = saved_obfd;
     }
 
   if (!htab->root.splt
       || !htab->root.srelplt
       || !htab->sdynbss
-      || (!info->shared && !htab->srelbss))
+      || (!bfd_link_pic (info) && !htab->srelbss))
     abort ();
 
   return TRUE;
@@ -3437,6 +3767,18 @@ elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
 }
 
+/* Destroy an ARM elf linker hash table.  */
+
+static void
+elf32_arm_link_hash_table_free (bfd *obfd)
+{
+  struct elf32_arm_link_hash_table *ret
+    = (struct elf32_arm_link_hash_table *) obfd->link.hash;
+
+  bfd_hash_table_free (&ret->stub_hash_table);
+  _bfd_elf_link_hash_table_free (obfd);
+}
+
 /* Create an ARM elf linker hash table.  */
 
 static struct bfd_link_hash_table *
@@ -3459,12 +3801,13 @@ elf32_arm_link_hash_table_create (bfd *abfd)
     }
 
   ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
+  ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
 #ifdef FOUR_WORD_PLT
   ret->plt_header_size = 16;
   ret->plt_entry_size = 16;
 #else
   ret->plt_header_size = 20;
-  ret->plt_entry_size = 12;
+  ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
 #endif
   ret->use_rel = 1;
   ret->obfd = abfd;
@@ -3472,56 +3815,14 @@ elf32_arm_link_hash_table_create (bfd *abfd)
   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
                            sizeof (struct elf32_arm_stub_hash_entry)))
     {
-      free (ret);
+      _bfd_elf_link_hash_table_free (abfd);
       return NULL;
     }
+  ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
 
   return &ret->root.root;
 }
 
-/* Free the derived linker hash table.  */
-
-static void
-elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
-{
-  struct elf32_arm_link_hash_table *ret
-    = (struct elf32_arm_link_hash_table *) hash;
-
-  bfd_hash_table_free (&ret->stub_hash_table);
-  _bfd_elf_link_hash_table_free (hash);
-}
-
-/* Determine if we're dealing with a Thumb only architecture.  */
-
-static bfd_boolean
-using_thumb_only (struct elf32_arm_link_hash_table *globals)
-{
-  int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                      Tag_CPU_arch);
-  int profile;
-
-  if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
-    return TRUE;
-
-  if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
-    return FALSE;
-
-  profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                     Tag_CPU_arch_profile);
-
-  return profile == 'M';
-}
-
-/* Determine if we're dealing with a Thumb-2 object.  */
-
-static bfd_boolean
-using_thumb2 (struct elf32_arm_link_hash_table *globals)
-{
-  int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                      Tag_CPU_arch);
-  return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
-}
-
 /* Determine what kind of NOPs are available.  */
 
 static bfd_boolean
@@ -3529,19 +3830,16 @@ arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
 {
   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
                                             Tag_CPU_arch);
-  return arch == TAG_CPU_ARCH_V6T2
-        || arch == TAG_CPU_ARCH_V6K
-        || arch == TAG_CPU_ARCH_V7
-        || arch == TAG_CPU_ARCH_V7E_M;
-}
 
-static bfd_boolean
-arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
-{
-  const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                            Tag_CPU_arch);
-  return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
-         || arch == TAG_CPU_ARCH_V7E_M);
+  /* Force return logic to be reviewed for each new architecture.  */
+  BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
+             || arch == TAG_CPU_ARCH_V8M_BASE
+             || arch == TAG_CPU_ARCH_V8M_MAIN);
+
+  return (arch == TAG_CPU_ARCH_V6T2
+         || arch == TAG_CPU_ARCH_V6K
+         || arch == TAG_CPU_ARCH_V7
+         || arch == TAG_CPU_ARCH_V8);
 }
 
 static bfd_boolean
@@ -3550,11 +3848,14 @@ arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
   switch (stub_type)
     {
     case arm_stub_long_branch_thumb_only:
+    case arm_stub_long_branch_thumb2_only:
+    case arm_stub_long_branch_thumb2_only_pure:
     case arm_stub_long_branch_v4t_thumb_arm:
     case arm_stub_short_branch_v4t_thumb_arm:
     case arm_stub_long_branch_v4t_thumb_arm_pic:
     case arm_stub_long_branch_v4t_thumb_tls_pic:
     case arm_stub_long_branch_thumb_only_pic:
+    case arm_stub_cmse_branch_thumb_only:
       return TRUE;
     case arm_stub_none:
       BFD_FAIL ();
@@ -3583,13 +3884,14 @@ arm_type_of_stub (struct bfd_link_info *info,
   bfd_signed_vma branch_offset;
   unsigned int r_type;
   struct elf32_arm_link_hash_table * globals;
-  int thumb2;
-  int thumb_only;
+  bfd_boolean thumb2, thumb2_bl, thumb_only;
   enum elf32_arm_stub_type stub_type = arm_stub_none;
   int use_plt = 0;
   enum arm_st_branch_type branch_type = *actual_branch_type;
   union gotplt_union *root_plt;
   struct arm_plt_info *arm_plt;
+  int arch;
+  int thumb2_movw;
 
   if (branch_type == ST_BRANCH_LONG)
     return stub_type;
@@ -3599,8 +3901,13 @@ arm_type_of_stub (struct bfd_link_info *info,
     return stub_type;
 
   thumb_only = using_thumb_only (globals);
-
   thumb2 = using_thumb2 (globals);
+  thumb2_bl = using_thumb2_bl (globals);
+
+  arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
+
+  /* True for architectures that implement the thumb2 movw instruction.  */
+  thumb2_movw = thumb2 || (arch  == TAG_CPU_ARCH_V8M_BASE);
 
   /* Determine where the call point is.  */
   location = (input_sec->output_offset
@@ -3611,7 +3918,8 @@ arm_type_of_stub (struct bfd_link_info *info,
 
   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
      are considering a function call relocation.  */
-  if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
+  if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
+                     || r_type == R_ARM_THM_JUMP19)
       && branch_type == ST_BRANCH_TO_ARM)
     branch_type = ST_BRANCH_TO_THUMB;
 
@@ -3619,8 +3927,9 @@ arm_type_of_stub (struct bfd_link_info *info,
      the address of the appropriate trampoline.  */
   if (r_type != R_ARM_TLS_CALL
       && r_type != R_ARM_THM_TLS_CALL
-      && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
-                                &root_plt, &arm_plt)
+      && elf32_arm_get_plt_info (input_bfd, globals, hash,
+                                ELF32_R_SYM (rel->r_info), &root_plt,
+                                &arm_plt)
       && root_plt->offset != (bfd_vma) -1)
     {
       asection *splt;
@@ -3655,7 +3964,7 @@ arm_type_of_stub (struct bfd_link_info *info,
   branch_offset = (bfd_signed_vma)(destination - location);
 
   if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
-      || r_type == R_ARM_THM_TLS_CALL)
+      || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
     {
       /* Handle cases where:
         - this call goes too far (different Thumb/Thumb2 max
@@ -3665,16 +3974,21 @@ arm_type_of_stub (struct bfd_link_info *info,
           but only if this call is not through a PLT entry. Indeed,
           PLT stubs handle mode switching already.
       */
-      if ((!thumb2
+      if ((!thumb2_bl
            && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
                || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
-         || (thumb2
+         || (thumb2_bl
              && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
                  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
+         || (thumb2
+             && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
+                 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
+             && (r_type == R_ARM_THM_JUMP19))
          || (branch_type == ST_BRANCH_TO_ARM
              && (((r_type == R_ARM_THM_CALL
                    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
-                 || (r_type == R_ARM_THM_JUMP24))
+                 || (r_type == R_ARM_THM_JUMP24)
+                  || (r_type == R_ARM_THM_JUMP19))
              && !use_plt))
        {
          if (branch_type == ST_BRANCH_TO_THUMB)
@@ -3682,7 +3996,16 @@ arm_type_of_stub (struct bfd_link_info *info,
              /* Thumb to thumb.  */
              if (!thumb_only)
                {
-                 stub_type = (info->shared | globals->pic_veneer)
+                 if (input_sec->flags & SEC_ELF_PURECODE)
+                   (*_bfd_error_handler) (_("%B(%s): warning: long branch "
+                                            " veneers used in section with "
+                                            "SHF_ARM_PURECODE section "
+                                            "attribute is only supported"
+                                            " for M-profile targets that "
+                                            "implement the movw "
+                                            "instruction."));
+
+                 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
                    /* PIC stubs.  */
                    ? ((globals->use_blx
                        && (r_type == R_ARM_THM_CALL))
@@ -3704,15 +4027,39 @@ arm_type_of_stub (struct bfd_link_info *info,
                }
              else
                {
-                 stub_type = (info->shared | globals->pic_veneer)
-                   /* PIC stub.  */
-                   ? arm_stub_long_branch_thumb_only_pic
-                   /* non-PIC stub.  */
-                   : arm_stub_long_branch_thumb_only;
+                 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
+                     stub_type = arm_stub_long_branch_thumb2_only_pure;
+                 else
+                   {
+                     if (input_sec->flags & SEC_ELF_PURECODE)
+                       (*_bfd_error_handler) (_("%B(%s): warning: long branch "
+                                                " veneers used in section with "
+                                                "SHF_ARM_PURECODE section "
+                                                "attribute is only supported"
+                                                " for M-profile targets that "
+                                                "implement the movw "
+                                                "instruction."));
+
+                     stub_type = (bfd_link_pic (info) | globals->pic_veneer)
+                       /* PIC stub.  */
+                       ? arm_stub_long_branch_thumb_only_pic
+                       /* non-PIC stub.  */
+                       : (thumb2 ? arm_stub_long_branch_thumb2_only
+                                 : arm_stub_long_branch_thumb_only);
+                   }
                }
            }
          else
            {
+             if (input_sec->flags & SEC_ELF_PURECODE)
+               (*_bfd_error_handler) (_("%B(%s): warning: long branch "
+                                        " veneers used in section with "
+                                        "SHF_ARM_PURECODE section "
+                                        "attribute is only supported"
+                                        " for M-profile targets that "
+                                        "implement the movw "
+                                        "instruction."));
+
              /* Thumb to arm.  */
              if (sym_sec != NULL
                  && sym_sec->owner != NULL
@@ -3725,10 +4072,10 @@ arm_type_of_stub (struct bfd_link_info *info,
                }
 
              stub_type =
-               (info->shared | globals->pic_veneer)
+               (bfd_link_pic (info) | globals->pic_veneer)
                /* PIC stubs.  */
                ? (r_type == R_ARM_THM_TLS_CALL
-                  /* TLS PIC stubs */
+                  /* TLS PIC stubs */
                   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
                      : arm_stub_long_branch_v4t_thumb_tls_pic)
                   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
@@ -3757,6 +4104,14 @@ arm_type_of_stub (struct bfd_link_info *info,
           || r_type == R_ARM_PLT32
           || r_type == R_ARM_TLS_CALL)
     {
+      if (input_sec->flags & SEC_ELF_PURECODE)
+       (*_bfd_error_handler) (_("%B(%s): warning: long branch "
+                                " veneers used in section with "
+                                "SHF_ARM_PURECODE section "
+                                "attribute is only supported"
+                                " for M-profile targets that "
+                                "implement the movw "
+                                "instruction."));
       if (branch_type == ST_BRANCH_TO_THUMB)
        {
          /* Arm to thumb.  */
@@ -3779,7 +4134,7 @@ arm_type_of_stub (struct bfd_link_info *info,
              || (r_type == R_ARM_JUMP24)
              || (r_type == R_ARM_PLT32))
            {
-             stub_type = (info->shared | globals->pic_veneer)
+             stub_type = (bfd_link_pic (info) | globals->pic_veneer)
                /* PIC stubs.  */
                ? ((globals->use_blx)
                   /* V5T and above.  */
@@ -3802,10 +4157,10 @@ arm_type_of_stub (struct bfd_link_info *info,
              || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
            {
              stub_type =
-               (info->shared | globals->pic_veneer)
+               (bfd_link_pic (info) | globals->pic_veneer)
                /* PIC stubs.  */
                ? (r_type == R_ARM_TLS_CALL
-                  /* TLS PIC Stub */
+                  /* TLS PIC Stub */
                   ? arm_stub_long_branch_any_tls_pic
                   : (globals->nacl_p
                      ? arm_stub_long_branch_arm_nacl_pic
@@ -3917,66 +4272,192 @@ elf32_arm_get_stub_entry (const asection *input_section,
   return stub_entry;
 }
 
-/* Find or create a stub section.  Returns a pointer to the stub section, and
-   the section to which the stub section will be attached (in *LINK_SEC_P).
+/* Whether veneers of type STUB_TYPE require to be in a dedicated output
+   section.  */
+
+static bfd_boolean
+arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return TRUE;
+
+    default:
+      return FALSE;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* Required alignment (as a power of 2) for the dedicated section holding
+   veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
+   with input sections.  */
+
+static int
+arm_dedicated_stub_output_section_required_alignment
+  (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  switch (stub_type)
+    {
+    /* Vectors of Secure Gateway veneers must be aligned on 32byte
+       boundary.  */
+    case arm_stub_cmse_branch_thumb_only:
+      return 5;
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return 0;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* Name of the dedicated output section to put veneers of type STUB_TYPE, or
+   NULL if veneers of this type are interspersed with input sections.  */
+
+static const char *
+arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return ".gnu.sgstubs";
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return NULL;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* If veneers of type STUB_TYPE should go in a dedicated output section,
+   returns the address of the hash table field in HTAB holding a pointer to the
+   corresponding input section.  Otherwise, returns NULL.  */
+
+static asection **
+arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
+                                     enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return &htab->cmse_stub_sec;
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return NULL;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
+   is the section that branch into veneer and can be NULL if stub should go in
+   a dedicated output section.  Returns a pointer to the stub section, and the
+   section to which the stub section will be attached (in *LINK_SEC_P).
    LINK_SEC_P may be NULL.  */
 
 static asection *
 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
-                                  struct elf32_arm_link_hash_table *htab)
+                                  struct elf32_arm_link_hash_table *htab,
+                                  enum elf32_arm_stub_type stub_type)
 {
-  asection *link_sec;
-  asection *stub_sec;
+  asection *link_sec, *out_sec, **stub_sec_p;
+  const char *stub_sec_prefix;
+  bfd_boolean dedicated_output_section =
+    arm_dedicated_stub_output_section_required (stub_type);
+  int align;
 
-  link_sec = htab->stub_group[section->id].link_sec;
-  BFD_ASSERT (link_sec != NULL);
-  stub_sec = htab->stub_group[section->id].stub_sec;
-
-  if (stub_sec == NULL)
+  if (dedicated_output_section)
     {
-      stub_sec = htab->stub_group[link_sec->id].stub_sec;
-      if (stub_sec == NULL)
+      bfd *output_bfd = htab->obfd;
+      const char *out_sec_name =
+       arm_dedicated_stub_output_section_name (stub_type);
+      link_sec = NULL;
+      stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+      stub_sec_prefix = out_sec_name;
+      align = arm_dedicated_stub_output_section_required_alignment (stub_type);
+      out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
+      if (out_sec == NULL)
        {
-         size_t namelen;
-         bfd_size_type len;
-         char *s_name;
+         (*_bfd_error_handler) (_("No address assigned to the veneers output "
+                                  "section %s"), out_sec_name);
+         return NULL;
+       }
+    }
+  else
+    {
+      link_sec = htab->stub_group[section->id].link_sec;
+      BFD_ASSERT (link_sec != NULL);
+      stub_sec_p = &htab->stub_group[section->id].stub_sec;
+      if (*stub_sec_p == NULL)
+       stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
+      stub_sec_prefix = link_sec->name;
+      out_sec = link_sec->output_section;
+      align = htab->nacl_p ? 4 : 3;
+    }
+
+  if (*stub_sec_p == NULL)
+    {
+      size_t namelen;
+      bfd_size_type len;
+      char *s_name;
+
+      namelen = strlen (stub_sec_prefix);
+      len = namelen + sizeof (STUB_SUFFIX);
+      s_name = (char *) bfd_alloc (htab->stub_bfd, len);
+      if (s_name == NULL)
+       return NULL;
 
-         namelen = strlen (link_sec->name);
-         len = namelen + sizeof (STUB_SUFFIX);
-         s_name = (char *) bfd_alloc (htab->stub_bfd, len);
-         if (s_name == NULL)
-           return NULL;
+      memcpy (s_name, stub_sec_prefix, namelen);
+      memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
+      *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
+                                              align);
+      if (*stub_sec_p == NULL)
+       return NULL;
 
-         memcpy (s_name, link_sec->name, namelen);
-         memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
-         stub_sec = (*htab->add_stub_section) (s_name, link_sec,
-                                               htab->nacl_p ? 4 : 3);
-         if (stub_sec == NULL)
-           return NULL;
-         htab->stub_group[link_sec->id].stub_sec = stub_sec;
-       }
-      htab->stub_group[section->id].stub_sec = stub_sec;
+      out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
+                       | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
+                       | SEC_KEEP;
     }
 
+  if (!dedicated_output_section)
+    htab->stub_group[section->id].stub_sec = *stub_sec_p;
+
   if (link_sec_p)
     *link_sec_p = link_sec;
 
-  return stub_sec;
+  return *stub_sec_p;
 }
 
 /* Add a new stub entry to the stub hash.  Not all fields of the new
    stub entry are initialised.  */
 
 static struct elf32_arm_stub_hash_entry *
-elf32_arm_add_stub (const char *stub_name,
-                   asection *section,
-                   struct elf32_arm_link_hash_table *htab)
+elf32_arm_add_stub (const char *stub_name, asection *section,
+                   struct elf32_arm_link_hash_table *htab,
+                   enum elf32_arm_stub_type stub_type)
 {
   asection *link_sec;
   asection *stub_sec;
   struct elf32_arm_stub_hash_entry *stub_entry;
 
-  stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
+  stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
+                                               stub_type);
   if (stub_sec == NULL)
     return NULL;
 
@@ -3985,6 +4466,8 @@ elf32_arm_add_stub (const char *stub_name,
                                     TRUE, FALSE);
   if (stub_entry == NULL)
     {
+      if (section == NULL)
+       section = stub_sec;
       (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
                             section->owner,
                             stub_name);
@@ -3992,7 +4475,7 @@ elf32_arm_add_stub (const char *stub_name,
     }
 
   stub_entry->stub_sec = stub_sec;
-  stub_entry->stub_offset = 0;
+  stub_entry->stub_offset = (bfd_vma) -1;
   stub_entry->id_sec = link_sec;
 
   return stub_entry;
@@ -4024,6 +4507,26 @@ put_thumb_insn (struct elf32_arm_link_hash_table * htab,
     bfd_putb16 (val, ptr);
 }
 
+/* Store a Thumb2 insn into an output section not processed by
+   elf32_arm_write_section.  */
+
+static void
+put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
+                bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
+{
+  /* T2 instructions are 16-bit streamed.  */
+  if (htab->byteswap_code != bfd_little_endian (output_bfd))
+    {
+      bfd_putl16 ((val >> 16) & 0xffff, ptr);
+      bfd_putl16 ((val & 0xffff), ptr + 2);
+    }
+  else
+    {
+      bfd_putb16 ((val >> 16) & 0xffff, ptr);
+      bfd_putb16 ((val & 0xffff), ptr + 2);
+    }
+}
+
 /* If it's possible to change R_TYPE to a more efficient access
    model, return the new reloc type.  */
 
@@ -4033,7 +4536,8 @@ elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
 {
   int is_local = (h == NULL);
 
-  if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
+  if (bfd_link_pic (info)
+      || (h && h->root.type == bfd_link_hash_undefweak))
     return r_type;
 
   /* We do not support relaxations for Old TLS models.  */
@@ -4069,6 +4573,8 @@ arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
     case arm_stub_long_branch_any_any:
     case arm_stub_long_branch_v4t_arm_thumb:
     case arm_stub_long_branch_thumb_only:
+    case arm_stub_long_branch_thumb2_only:
+    case arm_stub_long_branch_thumb2_only_pure:
     case arm_stub_long_branch_v4t_thumb_thumb:
     case arm_stub_long_branch_v4t_thumb_arm:
     case arm_stub_short_branch_v4t_thumb_arm:
@@ -4080,6 +4586,7 @@ arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
     case arm_stub_long_branch_thumb_only_pic:
     case arm_stub_long_branch_any_tls_pic:
     case arm_stub_long_branch_v4t_thumb_tls_pic:
+    case arm_stub_cmse_branch_thumb_only:
     case arm_stub_a8_veneer_blx:
       return 4;
 
@@ -4092,14 +4599,76 @@ arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
     }
 }
 
+/* Returns whether stubs of type STUB_TYPE take over the symbol they are
+   veneering (TRUE) or have their own symbol (FALSE).  */
+
 static bfd_boolean
-arm_build_one_stub (struct bfd_hash_entry *gen_entry,
-                   void * in_arg)
+arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
 {
-#define MAXRELOCS 3
-  struct elf32_arm_stub_hash_entry *stub_entry;
-  struct elf32_arm_link_hash_table *globals;
-  struct bfd_link_info *info;
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return TRUE;
+
+    default:
+      return FALSE;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* Returns the padding needed for the dedicated section used stubs of type
+   STUB_TYPE.  */
+
+static int
+arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
+{
+  if (stub_type >= max_stub_type)
+    abort ();  /* Should be unreachable.  */
+
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return 32;
+
+    default:
+      return 0;
+    }
+
+  abort ();  /* Should be unreachable.  */
+}
+
+/* If veneers of type STUB_TYPE should go in a dedicated output section,
+   returns the address of the hash table field in HTAB holding the offset at
+   which new veneers should be layed out in the stub section.  */
+
+static bfd_vma*
+arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
+                               enum elf32_arm_stub_type stub_type)
+{
+  switch (stub_type)
+    {
+    case arm_stub_cmse_branch_thumb_only:
+      return &htab->new_cmse_stub_offset;
+
+    default:
+      BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
+      return NULL;
+    }
+}
+
+static bfd_boolean
+arm_build_one_stub (struct bfd_hash_entry *gen_entry,
+                   void * in_arg)
+{
+#define MAXRELOCS 3
+  bfd_boolean removed_sg_veneer;
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  struct elf32_arm_link_hash_table *globals;
+  struct bfd_link_info *info;
   asection *stub_sec;
   bfd *stub_bfd;
   bfd_byte *loc;
@@ -4111,6 +4680,7 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
   int stub_reloc_offset[MAXRELOCS] = {0, 0};
   int nrelocs = 0;
+  int just_allocated = 0;
 
   /* Massage our args to the form they really have.  */
   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
@@ -4127,8 +4697,12 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
     /* We have to do less-strictly-aligned fixes last.  */
     return TRUE;
 
-  /* Make a note of the offset within the stubs for this entry.  */
-  stub_entry->stub_offset = stub_sec->size;
+  /* Assign a slot at the end of section if none assigned yet.  */
+  if (stub_entry->stub_offset == (bfd_vma) -1)
+    {
+      stub_entry->stub_offset = stub_sec->size;
+      just_allocated = 1;
+    }
   loc = stub_sec->contents + stub_entry->stub_offset;
 
   stub_bfd = stub_sec->owner;
@@ -4202,7 +4776,8 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
        }
     }
 
-  stub_sec->size += size;
+  if (just_allocated)
+    stub_sec->size += size;
 
   /* Stub size has already been computed in arm_size_one_stub. Check
      consistency.  */
@@ -4212,70 +4787,43 @@ arm_build_one_stub (struct bfd_hash_entry *gen_entry,
   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
     sym_value |= 1;
 
-  /* Assume there is at least one and at most MAXRELOCS entries to relocate
-     in each stub.  */
-  BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
+  /* Assume non empty slots have at least one and at most MAXRELOCS entries
+     to relocate in each stub.  */
+  removed_sg_veneer =
+    (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
+  BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
 
   for (i = 0; i < nrelocs; i++)
-    if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
-       || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
-       || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
-       || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
-      {
-       Elf_Internal_Rela rel;
-       bfd_boolean unresolved_reloc;
-       char *error_message;
-       enum arm_st_branch_type branch_type
-         = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
-            ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
-       bfd_vma points_to = sym_value + stub_entry->target_addend;
-
-       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
-       rel.r_info = ELF32_R_INFO (0,
-                                  template_sequence[stub_reloc_idx[i]].r_type);
-       rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
-
-       if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
-         /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
-            template should refer back to the instruction after the original
-            branch.  */
-         points_to = sym_value;
-
-       /* There may be unintended consequences if this is not true.  */
-       BFD_ASSERT (stub_entry->h == NULL);
-
-       /* Note: _bfd_final_link_relocate doesn't handle these relocations
-          properly.  We should probably use this function unconditionally,
-          rather than only for certain relocations listed in the enclosing
-          conditional, for the sake of consistency.  */
-       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
-           (template_sequence[stub_reloc_idx[i]].r_type),
-         stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
-         points_to, info, stub_entry->target_section, "", STT_FUNC,
-         branch_type, (struct elf_link_hash_entry *) stub_entry->h,
-         &unresolved_reloc, &error_message);
-      }
-    else
-      {
-       Elf_Internal_Rela rel;
-       bfd_boolean unresolved_reloc;
-       char *error_message;
-       bfd_vma points_to = sym_value + stub_entry->target_addend
-         + template_sequence[stub_reloc_idx[i]].reloc_addend;
-
-       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
-       rel.r_info = ELF32_R_INFO (0,
-                                  template_sequence[stub_reloc_idx[i]].r_type);
-       rel.r_addend = 0;
-
-       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
-           (template_sequence[stub_reloc_idx[i]].r_type),
-         stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
-         points_to, info, stub_entry->target_section, "", STT_FUNC,
-         stub_entry->branch_type,
-         (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
-         &error_message);
-      }
+    {
+      Elf_Internal_Rela rel;
+      bfd_boolean unresolved_reloc;
+      char *error_message;
+      bfd_vma points_to =
+       sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
+
+      rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
+      rel.r_info = ELF32_R_INFO (0,
+                                template_sequence[stub_reloc_idx[i]].r_type);
+      rel.r_addend = 0;
+
+      if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
+       /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
+          template should refer back to the instruction after the original
+          branch.  We use target_section as Cortex-A8 erratum workaround stubs
+          are only generated when both source and target are in the same
+          section.  */
+       points_to = stub_entry->target_section->output_section->vma
+                   + stub_entry->target_section->output_offset
+                   + stub_entry->source_value;
+
+      elf32_arm_final_link_relocate (elf32_arm_howto_from_type
+         (template_sequence[stub_reloc_idx[i]].r_type),
+          stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
+          points_to, info, stub_entry->target_section, "", STT_FUNC,
+          stub_entry->branch_type,
+          (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
+          &error_message);
+    }
 
   return TRUE;
 #undef MAXRELOCS
@@ -4345,9 +4893,17 @@ arm_size_one_stub (struct bfd_hash_entry *gen_entry,
   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
                                      &template_size);
 
-  stub_entry->stub_size = size;
-  stub_entry->stub_template = template_sequence;
-  stub_entry->stub_template_size = template_size;
+  /* Initialized to -1.  Null size indicates an empty slot full of zeros.  */
+  if (stub_entry->stub_template_size)
+    {
+      stub_entry->stub_size = size;
+      stub_entry->stub_template = template_sequence;
+      stub_entry->stub_template_size = template_size;
+    }
+
+  /* Already accounted for.  */
+  if (stub_entry->stub_offset != (bfd_vma) -1)
+    return TRUE;
 
   size = (size + 7) & ~7;
   stub_entry->stub_sec->size += size;
@@ -4367,7 +4923,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd,
 {
   bfd *input_bfd;
   unsigned int bfd_count;
-  int top_id, top_index;
+  unsigned int top_id, top_index;
   asection *section;
   asection **input_list, **list;
   bfd_size_type amt;
@@ -4381,7 +4937,7 @@ elf32_arm_setup_section_lists (bfd *output_bfd,
   /* Count the number of input BFDs and find the top input section id.  */
   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
        input_bfd != NULL;
-       input_bfd = input_bfd->link_next)
+       input_bfd = input_bfd->link.next)
     {
       bfd_count += 1;
       for (section = input_bfd->sections;
@@ -4868,7 +5424,8 @@ cortex_a8_erratum_scan (bfd *input_bfd,
                          a8_fixes[num_a8_fixes].input_bfd = input_bfd;
                          a8_fixes[num_a8_fixes].section = section;
                          a8_fixes[num_a8_fixes].offset = i;
-                         a8_fixes[num_a8_fixes].addend = offset;
+                         a8_fixes[num_a8_fixes].target_offset =
+                           target - base_vma;
                          a8_fixes[num_a8_fixes].orig_insn = insn;
                          a8_fixes[num_a8_fixes].stub_name = stub_name;
                          a8_fixes[num_a8_fixes].stub_type = stub_type;
@@ -4897,178 +5454,787 @@ cortex_a8_erratum_scan (bfd *input_bfd,
   return FALSE;
 }
 
-/* Determine and set the size of the stub section for a final link.
+/* Create or update a stub entry depending on whether the stub can already be
+   found in HTAB.  The stub is identified by:
+   - its type STUB_TYPE
+   - its source branch (note that several can share the same stub) whose
+     section and relocation (if any) are given by SECTION and IRELA
+     respectively
+   - its target symbol whose input section, hash, name, value and branch type
+     are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
+     respectively
 
-   The basic idea here is to examine all the relocations looking for
-   PC-relative calls to a target that is unreachable with a "bl"
-   instruction.  */
+   If found, the value of the stub's target symbol is updated from SYM_VALUE
+   and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
+   TRUE and the stub entry is initialized.
 
-bfd_boolean
-elf32_arm_size_stubs (bfd *output_bfd,
-                     bfd *stub_bfd,
-                     struct bfd_link_info *info,
-                     bfd_signed_vma group_size,
-                     asection * (*add_stub_section) (const char *, asection *,
-                                                     unsigned int),
-                     void (*layout_sections_again) (void))
+   Returns the stub that was created or updated, or NULL if an error
+   occurred.  */
+
+static struct elf32_arm_stub_hash_entry *
+elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
+                      enum elf32_arm_stub_type stub_type, asection *section,
+                      Elf_Internal_Rela *irela, asection *sym_sec,
+                      struct elf32_arm_link_hash_entry *hash, char *sym_name,
+                      bfd_vma sym_value, enum arm_st_branch_type branch_type,
+                      bfd_boolean *new_stub)
 {
-  bfd_size_type stub_group_size;
-  bfd_boolean stubs_always_after_branch;
-  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
-  struct a8_erratum_fix *a8_fixes = NULL;
-  unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
-  struct a8_erratum_reloc *a8_relocs = NULL;
-  unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
+  const asection *id_sec;
+  char *stub_name;
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  unsigned int r_type;
+  bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
 
-  if (htab == NULL)
-    return FALSE;
+  BFD_ASSERT (stub_type != arm_stub_none);
+  *new_stub = FALSE;
 
-  if (htab->fix_cortex_a8)
+  if (sym_claimed)
+    stub_name = sym_name;
+  else
     {
-      a8_fixes = (struct a8_erratum_fix *)
-         bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
-      a8_relocs = (struct a8_erratum_reloc *)
-         bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
+      BFD_ASSERT (irela);
+      BFD_ASSERT (section);
+
+      /* Support for grouping stub sections.  */
+      id_sec = htab->stub_group[section->id].link_sec;
+
+      /* Get the name of this stub.  */
+      stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
+                                      stub_type);
+      if (!stub_name)
+       return NULL;
     }
 
-  /* Propagate mach to stub bfd, because it may not have been
-     finalized when we created stub_bfd.  */
-  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
-                    bfd_get_mach (output_bfd));
+  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
+                                    FALSE);
+  /* The proper stub has already been created, just update its value.  */
+  if (stub_entry != NULL)
+    {
+      if (!sym_claimed)
+       free (stub_name);
+      stub_entry->target_value = sym_value;
+      return stub_entry;
+    }
 
-  /* Stash our params away.  */
-  htab->stub_bfd = stub_bfd;
-  htab->add_stub_section = add_stub_section;
-  htab->layout_sections_again = layout_sections_again;
-  stubs_always_after_branch = group_size < 0;
+  stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
+  if (stub_entry == NULL)
+    {
+      if (!sym_claimed)
+       free (stub_name);
+      return NULL;
+    }
 
-  /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
-     as the first half of a 32-bit branch straddling two 4K pages.  This is a
-     crude way of enforcing that.  */
-  if (htab->fix_cortex_a8)
-    stubs_always_after_branch = 1;
+  stub_entry->target_value = sym_value;
+  stub_entry->target_section = sym_sec;
+  stub_entry->stub_type = stub_type;
+  stub_entry->h = hash;
+  stub_entry->branch_type = branch_type;
 
-  if (group_size < 0)
-    stub_group_size = -group_size;
+  if (sym_claimed)
+    stub_entry->output_name = sym_name;
   else
-    stub_group_size = group_size;
-
-  if (stub_group_size == 1)
     {
-      /* Default values.  */
-      /* Thumb branch range is +-4MB has to be used as the default
-        maximum size (a given section can contain both ARM and Thumb
-        code, so the worst case has to be taken into account).
+      if (sym_name == NULL)
+       sym_name = "unnamed";
+      stub_entry->output_name = (char *)
+       bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
+                                  + strlen (sym_name));
+      if (stub_entry->output_name == NULL)
+       {
+         free (stub_name);
+         return NULL;
+       }
 
-        This value is 24K less than that, which allows for 2025
-        12-byte stubs.  If we exceed that, then we will fail to link.
-        The user will have to relink with an explicit group size
-        option.  */
-      stub_group_size = 4170000;
+      /* For historical reasons, use the existing names for ARM-to-Thumb and
+        Thumb-to-ARM stubs.  */
+      r_type = ELF32_R_TYPE (irela->r_info);
+      if ((r_type == (unsigned int) R_ARM_THM_CALL
+          || r_type == (unsigned int) R_ARM_THM_JUMP24
+          || r_type == (unsigned int) R_ARM_THM_JUMP19)
+         && branch_type == ST_BRANCH_TO_ARM)
+       sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
+      else if ((r_type == (unsigned int) R_ARM_CALL
+               || r_type == (unsigned int) R_ARM_JUMP24)
+              && branch_type == ST_BRANCH_TO_THUMB)
+       sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
+      else
+       sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
     }
 
-  group_sections (htab, stub_group_size, stubs_always_after_branch);
+  *new_stub = TRUE;
+  return stub_entry;
+}
 
-  /* If we're applying the cortex A8 fix, we need to determine the
-     program header size now, because we cannot change it later --
-     that could alter section placements.  Notice the A8 erratum fix
-     ends up requiring the section addresses to remain unchanged
-     modulo the page size.  That's something we cannot represent
-     inside BFD, and we don't want to force the section alignment to
-     be the page size.  */
-  if (htab->fix_cortex_a8)
-    (*htab->layout_sections_again) ();
+/* Scan symbols in INPUT_BFD to identify secure entry functions needing a
+   gateway veneer to transition from non secure to secure state and create them
+   accordingly.
 
-  while (1)
+   "ARMv8-M Security Extensions: Requirements on Development Tools" document
+   defines the conditions that govern Secure Gateway veneer creation for a
+   given symbol <SYM> as follows:
+   - it has function type
+   - it has non local binding
+   - a symbol named __acle_se_<SYM> (called special symbol) exists with the
+     same type, binding and value as <SYM> (called normal symbol).
+   An entry function can handle secure state transition itself in which case
+   its special symbol would have a different value from the normal symbol.
+
+   OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
+   entry mapping while HTAB gives the name to hash entry mapping.
+   *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
+   created.
+
+   The return value gives whether a stub failed to be allocated.  */
+
+static bfd_boolean
+cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
+          obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
+          int *cmse_stub_created)
+{
+  const struct elf_backend_data *bed;
+  Elf_Internal_Shdr *symtab_hdr;
+  unsigned i, j, sym_count, ext_start;
+  Elf_Internal_Sym *cmse_sym, *local_syms;
+  struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
+  enum arm_st_branch_type branch_type;
+  char *sym_name, *lsym_name;
+  bfd_vma sym_value;
+  asection *section;
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
+
+  bed = get_elf_backend_data (input_bfd);
+  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+  sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
+  ext_start = symtab_hdr->sh_info;
+  is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
+           && out_attr[Tag_CPU_arch_profile].i == 'M');
+
+  local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
+  if (local_syms == NULL)
+    local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+                                      symtab_hdr->sh_info, 0, NULL, NULL,
+                                      NULL);
+  if (symtab_hdr->sh_info && local_syms == NULL)
+    return FALSE;
+
+  /* Scan symbols.  */
+  for (i = 0; i < sym_count; i++)
     {
-      bfd *input_bfd;
-      unsigned int bfd_indx;
-      asection *stub_sec;
-      bfd_boolean stub_changed = FALSE;
-      unsigned prev_num_a8_fixes = num_a8_fixes;
+      cmse_invalid = FALSE;
 
-      num_a8_fixes = 0;
-      for (input_bfd = info->input_bfds, bfd_indx = 0;
-          input_bfd != NULL;
-          input_bfd = input_bfd->link_next, bfd_indx++)
+      if (i < ext_start)
        {
-         Elf_Internal_Shdr *symtab_hdr;
-         asection *section;
-         Elf_Internal_Sym *local_syms = NULL;
+         cmse_sym = &local_syms[i];
+         /* Not a special symbol.  */
+         if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
+           continue;
+         sym_name = bfd_elf_string_from_elf_section (input_bfd,
+                                                     symtab_hdr->sh_link,
+                                                     cmse_sym->st_name);
+         /* Special symbol with local binding.  */
+         cmse_invalid = TRUE;
+       }
+      else
+       {
+         cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
+         sym_name = (char *) cmse_hash->root.root.root.string;
 
-         if (!is_arm_elf (input_bfd))
+         /* Not a special symbol.  */
+         if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
            continue;
 
-         num_a8_relocs = 0;
+         /* Special symbol has incorrect binding or type.  */
+         if ((cmse_hash->root.root.type != bfd_link_hash_defined
+              && cmse_hash->root.root.type != bfd_link_hash_defweak)
+             || cmse_hash->root.type != STT_FUNC)
+           cmse_invalid = TRUE;
+       }
 
-         /* We'll need the symbol table in a second.  */
-         symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
-         if (symtab_hdr->sh_info == 0)
+      if (!is_v8m)
+       {
+         (*_bfd_error_handler) (_("%B: Special symbol `%s' only allowed for "
+                                  "ARMv8-M architecture or later."),
+                                input_bfd, sym_name);
+         is_v8m = TRUE; /* Avoid multiple warning.  */
+         ret = FALSE;
+       }
+
+      if (cmse_invalid)
+       {
+         (*_bfd_error_handler) (_("%B: invalid special symbol `%s'."),
+                                input_bfd, sym_name);
+         (*_bfd_error_handler) (_("It must be a global or weak function "
+                                  "symbol."));
+         ret = FALSE;
+         if (i < ext_start)
            continue;
+       }
 
-         /* Walk over each section attached to the input bfd.  */
-         for (section = input_bfd->sections;
-              section != NULL;
-              section = section->next)
-           {
-             Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
+      sym_name += strlen (CMSE_PREFIX);
+      hash = (struct elf32_arm_link_hash_entry *)
+       elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
 
-             /* If there aren't any relocs, then there's nothing more
-                to do.  */
-             if ((section->flags & SEC_RELOC) == 0
-                 || section->reloc_count == 0
-                 || (section->flags & SEC_CODE) == 0)
-               continue;
+      /* No associated normal symbol or it is neither global nor weak.  */
+      if (!hash
+         || (hash->root.root.type != bfd_link_hash_defined
+             && hash->root.root.type != bfd_link_hash_defweak)
+         || hash->root.type != STT_FUNC)
+       {
+         /* Initialize here to avoid warning about use of possibly
+            uninitialized variable.  */
+         j = 0;
 
-             /* If this section is a link-once section that will be
-                discarded, then don't create any stubs.  */
-             if (section->output_section == NULL
-                 || section->output_section->owner != output_bfd)
-               continue;
+         if (!hash)
+           {
+             /* Searching for a normal symbol with local binding.  */
+             for (; j < ext_start; j++)
+               {
+                 lsym_name =
+                   bfd_elf_string_from_elf_section (input_bfd,
+                                                    symtab_hdr->sh_link,
+                                                    local_syms[j].st_name);
+                 if (!strcmp (sym_name, lsym_name))
+                   break;
+               }
+           }
 
-             /* Get the relocs.  */
-             internal_relocs
-               = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
-                                            NULL, info->keep_memory);
-             if (internal_relocs == NULL)
-               goto error_ret_free_local;
+         if (hash || j < ext_start)
+           {
+             (*_bfd_error_handler)
+               (_("%B: invalid standard symbol `%s'."), input_bfd, sym_name);
+             (*_bfd_error_handler)
+               (_("It must be a global or weak function symbol."));
+           }
+         else
+           (*_bfd_error_handler)
+             (_("%B: absent standard symbol `%s'."), input_bfd, sym_name);
+         ret = FALSE;
+         if (!hash)
+           continue;
+       }
 
-             /* Now examine each relocation.  */
-             irela = internal_relocs;
-             irelaend = irela + section->reloc_count;
-             for (; irela < irelaend; irela++)
-               {
-                 unsigned int r_type, r_indx;
-                 enum elf32_arm_stub_type stub_type;
-                 struct elf32_arm_stub_hash_entry *stub_entry;
-                 asection *sym_sec;
-                 bfd_vma sym_value;
-                 bfd_vma destination;
-                 struct elf32_arm_link_hash_entry *hash;
-                 const char *sym_name;
-                 char *stub_name;
-                 const asection *id_sec;
-                 unsigned char st_type;
-                 enum arm_st_branch_type branch_type;
-                 bfd_boolean created_stub = FALSE;
+      sym_value = hash->root.root.u.def.value;
+      section = hash->root.root.u.def.section;
 
-                 r_type = ELF32_R_TYPE (irela->r_info);
-                 r_indx = ELF32_R_SYM (irela->r_info);
+      if (cmse_hash->root.root.u.def.section != section)
+       {
+         (*_bfd_error_handler)
+           (_("%B: `%s' and its special symbol are in different sections."),
+            input_bfd, sym_name);
+         ret = FALSE;
+       }
+      if (cmse_hash->root.root.u.def.value != sym_value)
+       continue; /* Ignore: could be an entry function starting with SG.  */
 
-                 if (r_type >= (unsigned int) R_ARM_max)
-                   {
-                     bfd_set_error (bfd_error_bad_value);
-                   error_ret_free_internal:
-                     if (elf_section_data (section)->relocs == NULL)
-                       free (internal_relocs);
-                     goto error_ret_free_local;
-                   }
+       /* If this section is a link-once section that will be discarded, then
+          don't create any stubs.  */
+      if (section->output_section == NULL)
+       {
+         (*_bfd_error_handler)
+           (_("%B: entry function `%s' not output."), input_bfd, sym_name);
+         continue;
+       }
 
-                 hash = NULL;
-                 if (r_indx >= symtab_hdr->sh_info)
-                   hash = elf32_arm_hash_entry
-                     (elf_sym_hashes (input_bfd)
-                      [r_indx - symtab_hdr->sh_info]);
+      if (hash->root.size == 0)
+       {
+         (*_bfd_error_handler)
+           (_("%B: entry function `%s' is empty."), input_bfd, sym_name);
+         ret = FALSE;
+       }
+
+      if (!ret)
+       continue;
+      branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
+      stub_entry
+       = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
+                                NULL, NULL, section, hash, sym_name,
+                                sym_value, branch_type, &new_stub);
+
+      if (stub_entry == NULL)
+        ret = FALSE;
+      else
+       {
+         BFD_ASSERT (new_stub);
+         (*cmse_stub_created)++;
+       }
+    }
+
+  if (!symtab_hdr->contents)
+    free (local_syms);
+  return ret;
+}
+
+/* Return TRUE iff a symbol identified by its linker HASH entry is a secure
+   code entry function, ie can be called from non secure code without using a
+   veneer.  */
+
+static bfd_boolean
+cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
+{
+  uint32_t first_insn;
+  asection *section;
+  file_ptr offset;
+  bfd *abfd;
+
+  /* Defined symbol of function type.  */
+  if (hash->root.root.type != bfd_link_hash_defined
+      && hash->root.root.type != bfd_link_hash_defweak)
+    return FALSE;
+  if (hash->root.type != STT_FUNC)
+    return FALSE;
+
+  /* Read first instruction.  */
+  section = hash->root.root.u.def.section;
+  abfd = section->owner;
+  offset = hash->root.root.u.def.value - section->vma;
+  if (!bfd_get_section_contents (abfd, section, &first_insn, offset,
+                                sizeof (first_insn)))
+    return FALSE;
+
+  /* Start by SG instruction.  */
+  return first_insn == 0xe97fe97f;
+}
+
+/* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
+   secure gateway veneers (ie. the veneers was not in the input import library)
+   and there is no output import library (GEN_INFO->out_implib_bfd is NULL.  */
+
+static bfd_boolean
+arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
+{
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  struct bfd_link_info *info;
+
+  /* Massage our args to the form they really have.  */
+  stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+  info = (struct bfd_link_info *) gen_info;
+
+  if (info->out_implib_bfd)
+    return TRUE;
+
+  if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
+    return TRUE;
+
+  if (stub_entry->stub_offset == (bfd_vma) -1)
+    (*_bfd_error_handler) ("  %s", stub_entry->output_name);
+
+  return TRUE;
+}
+
+/* Set offset of each secure gateway veneers so that its address remain
+   identical to the one in the input import library referred by
+   HTAB->in_implib_bfd.  A warning is issued for veneers that disappeared
+   (present in input import library but absent from the executable being
+   linked) or if new veneers appeared and there is no output import library
+   (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
+   number of secure gateway veneers found in the input import library.
+
+   The function returns whether an error occurred.  If no error occurred,
+   *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
+   and this function and HTAB->new_cmse_stub_offset is set to the biggest
+   veneer observed set for new veneers to be layed out after.  */
+
+static bfd_boolean
+set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
+                                 struct elf32_arm_link_hash_table *htab,
+                                 int *cmse_stub_created)
+{
+  long symsize;
+  char *sym_name;
+  flagword flags;
+  long i, symcount;
+  bfd *in_implib_bfd;
+  asection *stub_out_sec;
+  bfd_boolean ret = TRUE;
+  Elf_Internal_Sym *intsym;
+  const char *out_sec_name;
+  bfd_size_type cmse_stub_size;
+  asymbol **sympp = NULL, *sym;
+  struct elf32_arm_link_hash_entry *hash;
+  const insn_sequence *cmse_stub_template;
+  struct elf32_arm_stub_hash_entry *stub_entry;
+  int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
+  bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
+  bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
+
+  /* No input secure gateway import library.  */
+  if (!htab->in_implib_bfd)
+    return TRUE;
+
+  in_implib_bfd = htab->in_implib_bfd;
+  if (!htab->cmse_implib)
+    {
+      (*_bfd_error_handler) (_("%B: --in-implib only supported for Secure "
+                              "Gateway import libraries."), in_implib_bfd);
+      return FALSE;
+    }
+
+  /* Get symbol table size.  */
+  symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
+  if (symsize < 0)
+    return FALSE;
+
+  /* Read in the input secure gateway import library's symbol table.  */
+  sympp = (asymbol **) xmalloc (symsize);
+  symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
+  if (symcount < 0)
+    {
+      ret = FALSE;
+      goto free_sym_buf;
+    }
+
+  htab->new_cmse_stub_offset = 0;
+  cmse_stub_size =
+    find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
+                                &cmse_stub_template,
+                                &cmse_stub_template_size);
+  out_sec_name =
+    arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
+  stub_out_sec =
+    bfd_get_section_by_name (htab->obfd, out_sec_name);
+  if (stub_out_sec != NULL)
+    cmse_stub_sec_vma = stub_out_sec->vma;
+
+  /* Set addresses of veneers mentionned in input secure gateway import
+     library's symbol table.  */
+  for (i = 0; i < symcount; i++)
+    {
+      sym = sympp[i];
+      flags = sym->flags;
+      sym_name = (char *) bfd_asymbol_name (sym);
+      intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
+
+      if (sym->section != bfd_abs_section_ptr
+         || !(flags & (BSF_GLOBAL | BSF_WEAK))
+         || (flags & BSF_FUNCTION) != BSF_FUNCTION
+         || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
+             != ST_BRANCH_TO_THUMB))
+       {
+         (*_bfd_error_handler) (_("%B: invalid import library entry: `%s'."),
+                                in_implib_bfd, sym_name);
+         (*_bfd_error_handler) (_("Symbol should be absolute, global and "
+                                  "refer to Thumb functions."));
+         ret = FALSE;
+         continue;
+       }
+
+      veneer_value = bfd_asymbol_value (sym);
+      stub_offset = veneer_value - cmse_stub_sec_vma;
+      stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
+                                        FALSE, FALSE);
+      hash = (struct elf32_arm_link_hash_entry *)
+       elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
+
+      /* Stub entry should have been created by cmse_scan or the symbol be of
+        a secure function callable from non secure code.  */
+      if (!stub_entry && !hash)
+       {
+         bfd_boolean new_stub;
+
+         (*_bfd_error_handler)
+           (_("Entry function `%s' disappeared from secure code."), sym_name);
+         hash = (struct elf32_arm_link_hash_entry *)
+           elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
+         stub_entry
+           = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
+                                    NULL, NULL, bfd_abs_section_ptr, hash,
+                                    sym_name, veneer_value,
+                                    ST_BRANCH_TO_THUMB, &new_stub);
+         if (stub_entry == NULL)
+           ret = FALSE;
+         else
+         {
+           BFD_ASSERT (new_stub);
+           new_cmse_stubs_created++;
+           (*cmse_stub_created)++;
+         }
+         stub_entry->stub_template_size = stub_entry->stub_size = 0;
+         stub_entry->stub_offset = stub_offset;
+       }
+      /* Symbol found is not callable from non secure code.  */
+      else if (!stub_entry)
+       {
+         if (!cmse_entry_fct_p (hash))
+           {
+             (*_bfd_error_handler) (_("`%s' refers to a non entry function."),
+                                    sym_name);
+             ret = FALSE;
+           }
+         continue;
+       }
+      else
+       {
+         /* Only stubs for SG veneers should have been created.  */
+         BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
+
+         /* Check visibility hasn't changed.  */
+         if (!!(flags & BSF_GLOBAL)
+             != (hash->root.root.type == bfd_link_hash_defined))
+           (*_bfd_error_handler)
+             (_("%B: visibility of symbol `%s' has changed."), in_implib_bfd,
+              sym_name);
+
+         stub_entry->stub_offset = stub_offset;
+       }
+
+      /* Size should match that of a SG veneer.  */
+      if (intsym->st_size != cmse_stub_size)
+       {
+         (*_bfd_error_handler) (_("%B: incorrect size for symbol `%s'."),
+                                in_implib_bfd, sym_name);
+         ret = FALSE;
+       }
+
+      /* Previous veneer address is before current SG veneer section.  */
+      if (veneer_value < cmse_stub_sec_vma)
+       {
+         /* Avoid offset underflow.  */
+         if (stub_entry)
+           stub_entry->stub_offset = 0;
+         stub_offset = 0;
+         ret = FALSE;
+       }
+
+      /* Complain if stub offset not a multiple of stub size.  */
+      if (stub_offset % cmse_stub_size)
+       {
+         (*_bfd_error_handler)
+           (_("Offset of veneer for entry function `%s' not a multiple of "
+              "its size."), sym_name);
+         ret = FALSE;
+       }
+
+      if (!ret)
+       continue;
+
+      new_cmse_stubs_created--;
+      if (veneer_value < cmse_stub_array_start)
+       cmse_stub_array_start = veneer_value;
+      next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
+      if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
+       htab->new_cmse_stub_offset = next_cmse_stub_offset;
+    }
+
+  if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
+    {
+      BFD_ASSERT (new_cmse_stubs_created > 0);
+      (*_bfd_error_handler)
+       (_("new entry function(s) introduced but no output import library "
+          "specified:"));
+      bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
+    }
+
+  if (cmse_stub_array_start != cmse_stub_sec_vma)
+    {
+      (*_bfd_error_handler)
+       (_("Start address of `%s' is different from previous link."),
+        out_sec_name);
+      ret = FALSE;
+    }
+
+free_sym_buf:
+  free (sympp);
+  return ret;
+}
+
+/* Determine and set the size of the stub section for a final link.
+
+   The basic idea here is to examine all the relocations looking for
+   PC-relative calls to a target that is unreachable with a "bl"
+   instruction.  */
+
+bfd_boolean
+elf32_arm_size_stubs (bfd *output_bfd,
+                     bfd *stub_bfd,
+                     struct bfd_link_info *info,
+                     bfd_signed_vma group_size,
+                     asection * (*add_stub_section) (const char *, asection *,
+                                                     asection *,
+                                                     unsigned int),
+                     void (*layout_sections_again) (void))
+{
+  bfd_boolean ret = TRUE;
+  obj_attribute *out_attr;
+  int cmse_stub_created = 0;
+  bfd_size_type stub_group_size;
+  bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
+  struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+  struct a8_erratum_fix *a8_fixes = NULL;
+  unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
+  struct a8_erratum_reloc *a8_relocs = NULL;
+  unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
+
+  if (htab == NULL)
+    return FALSE;
+
+  if (htab->fix_cortex_a8)
+    {
+      a8_fixes = (struct a8_erratum_fix *)
+         bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
+      a8_relocs = (struct a8_erratum_reloc *)
+         bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
+    }
+
+  /* Propagate mach to stub bfd, because it may not have been
+     finalized when we created stub_bfd.  */
+  bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
+                    bfd_get_mach (output_bfd));
+
+  /* Stash our params away.  */
+  htab->stub_bfd = stub_bfd;
+  htab->add_stub_section = add_stub_section;
+  htab->layout_sections_again = layout_sections_again;
+  stubs_always_after_branch = group_size < 0;
+
+  out_attr = elf_known_obj_attributes_proc (output_bfd);
+  m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
+
+  /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
+     as the first half of a 32-bit branch straddling two 4K pages.  This is a
+     crude way of enforcing that.  */
+  if (htab->fix_cortex_a8)
+    stubs_always_after_branch = 1;
+
+  if (group_size < 0)
+    stub_group_size = -group_size;
+  else
+    stub_group_size = group_size;
+
+  if (stub_group_size == 1)
+    {
+      /* Default values.  */
+      /* Thumb branch range is +-4MB has to be used as the default
+        maximum size (a given section can contain both ARM and Thumb
+        code, so the worst case has to be taken into account).
+
+        This value is 24K less than that, which allows for 2025
+        12-byte stubs.  If we exceed that, then we will fail to link.
+        The user will have to relink with an explicit group size
+        option.  */
+      stub_group_size = 4170000;
+    }
+
+  group_sections (htab, stub_group_size, stubs_always_after_branch);
+
+  /* If we're applying the cortex A8 fix, we need to determine the
+     program header size now, because we cannot change it later --
+     that could alter section placements.  Notice the A8 erratum fix
+     ends up requiring the section addresses to remain unchanged
+     modulo the page size.  That's something we cannot represent
+     inside BFD, and we don't want to force the section alignment to
+     be the page size.  */
+  if (htab->fix_cortex_a8)
+    (*htab->layout_sections_again) ();
+
+  while (1)
+    {
+      bfd *input_bfd;
+      unsigned int bfd_indx;
+      asection *stub_sec;
+      enum elf32_arm_stub_type stub_type;
+      bfd_boolean stub_changed = FALSE;
+      unsigned prev_num_a8_fixes = num_a8_fixes;
+
+      num_a8_fixes = 0;
+      for (input_bfd = info->input_bfds, bfd_indx = 0;
+          input_bfd != NULL;
+          input_bfd = input_bfd->link.next, bfd_indx++)
+       {
+         Elf_Internal_Shdr *symtab_hdr;
+         asection *section;
+         Elf_Internal_Sym *local_syms = NULL;
+
+         if (!is_arm_elf (input_bfd))
+           continue;
+
+         num_a8_relocs = 0;
+
+         /* We'll need the symbol table in a second.  */
+         symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+         if (symtab_hdr->sh_info == 0)
+           continue;
+
+         /* Limit scan of symbols to object file whose profile is
+            Microcontroller to not hinder performance in the general case.  */
+         if (m_profile && first_veneer_scan)
+           {
+             struct elf_link_hash_entry **sym_hashes;
+
+             sym_hashes = elf_sym_hashes (input_bfd);
+             if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
+                             &cmse_stub_created))
+               goto error_ret_free_local;
+
+             if (cmse_stub_created != 0)
+               stub_changed = TRUE;
+           }
+
+         /* Walk over each section attached to the input bfd.  */
+         for (section = input_bfd->sections;
+              section != NULL;
+              section = section->next)
+           {
+             Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
+
+             /* If there aren't any relocs, then there's nothing more
+                to do.  */
+             if ((section->flags & SEC_RELOC) == 0
+                 || section->reloc_count == 0
+                 || (section->flags & SEC_CODE) == 0)
+               continue;
+
+             /* If this section is a link-once section that will be
+                discarded, then don't create any stubs.  */
+             if (section->output_section == NULL
+                 || section->output_section->owner != output_bfd)
+               continue;
+
+             /* Get the relocs.  */
+             internal_relocs
+               = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
+                                            NULL, info->keep_memory);
+             if (internal_relocs == NULL)
+               goto error_ret_free_local;
+
+             /* Now examine each relocation.  */
+             irela = internal_relocs;
+             irelaend = irela + section->reloc_count;
+             for (; irela < irelaend; irela++)
+               {
+                 unsigned int r_type, r_indx;
+                 asection *sym_sec;
+                 bfd_vma sym_value;
+                 bfd_vma destination;
+                 struct elf32_arm_link_hash_entry *hash;
+                 const char *sym_name;
+                 unsigned char st_type;
+                 enum arm_st_branch_type branch_type;
+                 bfd_boolean created_stub = FALSE;
+
+                 r_type = ELF32_R_TYPE (irela->r_info);
+                 r_indx = ELF32_R_SYM (irela->r_info);
+
+                 if (r_type >= (unsigned int) R_ARM_max)
+                   {
+                     bfd_set_error (bfd_error_bad_value);
+                   error_ret_free_internal:
+                     if (elf_section_data (section)->relocs == NULL)
+                       free (internal_relocs);
+                   /* Fall through.  */
+                   error_ret_free_local:
+                     if (local_syms != NULL
+                         && (symtab_hdr->contents
+                             != (unsigned char *) local_syms))
+                       free (local_syms);
+                     return FALSE;
+                   }
+
+                 hash = NULL;
+                 if (r_indx >= symtab_hdr->sh_info)
+                   hash = elf32_arm_hash_entry
+                     (elf_sym_hashes (input_bfd)
+                      [r_indx - symtab_hdr->sh_info]);
 
                  /* Only look for stubs on branch instructions, or
                     non-relaxed TLSCALL  */
@@ -5089,1600 +6255,2249 @@ elf32_arm_size_stubs (bfd *output_bfd,
                               & GOT_TLS_GDESC) != 0))
                    continue;
 
-                 /* Now determine the call target, its name, value,
-                    section.  */
-                 sym_sec = NULL;
-                 sym_value = 0;
-                 destination = 0;
-                 sym_name = NULL;
+                 /* Now determine the call target, its name, value,
+                    section.  */
+                 sym_sec = NULL;
+                 sym_value = 0;
+                 destination = 0;
+                 sym_name = NULL;
+
+                 if (r_type == (unsigned int) R_ARM_TLS_CALL
+                     || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
+                   {
+                     /* A non-relaxed TLS call.  The target is the
+                        plt-resident trampoline and nothing to do
+                        with the symbol.  */
+                     BFD_ASSERT (htab->tls_trampoline > 0);
+                     sym_sec = htab->root.splt;
+                     sym_value = htab->tls_trampoline;
+                     hash = 0;
+                     st_type = STT_FUNC;
+                     branch_type = ST_BRANCH_TO_ARM;
+                   }
+                 else if (!hash)
+                   {
+                     /* It's a local symbol.  */
+                     Elf_Internal_Sym *sym;
+
+                     if (local_syms == NULL)
+                       {
+                         local_syms
+                           = (Elf_Internal_Sym *) symtab_hdr->contents;
+                         if (local_syms == NULL)
+                           local_syms
+                             = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
+                                                     symtab_hdr->sh_info, 0,
+                                                     NULL, NULL, NULL);
+                         if (local_syms == NULL)
+                           goto error_ret_free_internal;
+                       }
+
+                     sym = local_syms + r_indx;
+                     if (sym->st_shndx == SHN_UNDEF)
+                       sym_sec = bfd_und_section_ptr;
+                     else if (sym->st_shndx == SHN_ABS)
+                       sym_sec = bfd_abs_section_ptr;
+                     else if (sym->st_shndx == SHN_COMMON)
+                       sym_sec = bfd_com_section_ptr;
+                     else
+                       sym_sec =
+                         bfd_section_from_elf_index (input_bfd, sym->st_shndx);
+
+                     if (!sym_sec)
+                       /* This is an undefined symbol.  It can never
+                          be resolved.  */
+                       continue;
+
+                     if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
+                       sym_value = sym->st_value;
+                     destination = (sym_value + irela->r_addend
+                                    + sym_sec->output_offset
+                                    + sym_sec->output_section->vma);
+                     st_type = ELF_ST_TYPE (sym->st_info);
+                     branch_type =
+                       ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
+                     sym_name
+                       = bfd_elf_string_from_elf_section (input_bfd,
+                                                          symtab_hdr->sh_link,
+                                                          sym->st_name);
+                   }
+                 else
+                   {
+                     /* It's an external symbol.  */
+                     while (hash->root.root.type == bfd_link_hash_indirect
+                            || hash->root.root.type == bfd_link_hash_warning)
+                       hash = ((struct elf32_arm_link_hash_entry *)
+                               hash->root.root.u.i.link);
+
+                     if (hash->root.root.type == bfd_link_hash_defined
+                         || hash->root.root.type == bfd_link_hash_defweak)
+                       {
+                         sym_sec = hash->root.root.u.def.section;
+                         sym_value = hash->root.root.u.def.value;
+
+                         struct elf32_arm_link_hash_table *globals =
+                                                 elf32_arm_hash_table (info);
+
+                         /* For a destination in a shared library,
+                            use the PLT stub as target address to
+                            decide whether a branch stub is
+                            needed.  */
+                         if (globals != NULL
+                             && globals->root.splt != NULL
+                             && hash != NULL
+                             && hash->root.plt.offset != (bfd_vma) -1)
+                           {
+                             sym_sec = globals->root.splt;
+                             sym_value = hash->root.plt.offset;
+                             if (sym_sec->output_section != NULL)
+                               destination = (sym_value
+                                              + sym_sec->output_offset
+                                              + sym_sec->output_section->vma);
+                           }
+                         else if (sym_sec->output_section != NULL)
+                           destination = (sym_value + irela->r_addend
+                                          + sym_sec->output_offset
+                                          + sym_sec->output_section->vma);
+                       }
+                     else if ((hash->root.root.type == bfd_link_hash_undefined)
+                              || (hash->root.root.type == bfd_link_hash_undefweak))
+                       {
+                         /* For a shared library, use the PLT stub as
+                            target address to decide whether a long
+                            branch stub is needed.
+                            For absolute code, they cannot be handled.  */
+                         struct elf32_arm_link_hash_table *globals =
+                           elf32_arm_hash_table (info);
+
+                         if (globals != NULL
+                             && globals->root.splt != NULL
+                             && hash != NULL
+                             && hash->root.plt.offset != (bfd_vma) -1)
+                           {
+                             sym_sec = globals->root.splt;
+                             sym_value = hash->root.plt.offset;
+                             if (sym_sec->output_section != NULL)
+                               destination = (sym_value
+                                              + sym_sec->output_offset
+                                              + sym_sec->output_section->vma);
+                           }
+                         else
+                           continue;
+                       }
+                     else
+                       {
+                         bfd_set_error (bfd_error_bad_value);
+                         goto error_ret_free_internal;
+                       }
+                     st_type = hash->root.type;
+                     branch_type =
+                       ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
+                     sym_name = hash->root.root.root.string;
+                   }
+
+                 do
+                   {
+                     bfd_boolean new_stub;
+                     struct elf32_arm_stub_hash_entry *stub_entry;
+
+                     /* Determine what (if any) linker stub is needed.  */
+                     stub_type = arm_type_of_stub (info, section, irela,
+                                                   st_type, &branch_type,
+                                                   hash, destination, sym_sec,
+                                                   input_bfd, sym_name);
+                     if (stub_type == arm_stub_none)
+                       break;
+
+                     /* We've either created a stub for this reloc already,
+                        or we are about to.  */
+                     stub_entry =
+                       elf32_arm_create_stub (htab, stub_type, section, irela,
+                                              sym_sec, hash,
+                                              (char *) sym_name, sym_value,
+                                              branch_type, &new_stub);
+
+                     created_stub = stub_entry != NULL;
+                     if (!created_stub)
+                       goto error_ret_free_internal;
+                     else if (!new_stub)
+                       break;
+                     else
+                       stub_changed = TRUE;
+                   }
+                 while (0);
+
+                 /* Look for relocations which might trigger Cortex-A8
+                    erratum.  */
+                 if (htab->fix_cortex_a8
+                     && (r_type == (unsigned int) R_ARM_THM_JUMP24
+                         || r_type == (unsigned int) R_ARM_THM_JUMP19
+                         || r_type == (unsigned int) R_ARM_THM_CALL
+                         || r_type == (unsigned int) R_ARM_THM_XPC22))
+                   {
+                     bfd_vma from = section->output_section->vma
+                                    + section->output_offset
+                                    + irela->r_offset;
+
+                     if ((from & 0xfff) == 0xffe)
+                       {
+                         /* Found a candidate.  Note we haven't checked the
+                            destination is within 4K here: if we do so (and
+                            don't create an entry in a8_relocs) we can't tell
+                            that a branch should have been relocated when
+                            scanning later.  */
+                         if (num_a8_relocs == a8_reloc_table_size)
+                           {
+                             a8_reloc_table_size *= 2;
+                             a8_relocs = (struct a8_erratum_reloc *)
+                                 bfd_realloc (a8_relocs,
+                                              sizeof (struct a8_erratum_reloc)
+                                              * a8_reloc_table_size);
+                           }
+
+                         a8_relocs[num_a8_relocs].from = from;
+                         a8_relocs[num_a8_relocs].destination = destination;
+                         a8_relocs[num_a8_relocs].r_type = r_type;
+                         a8_relocs[num_a8_relocs].branch_type = branch_type;
+                         a8_relocs[num_a8_relocs].sym_name = sym_name;
+                         a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
+                         a8_relocs[num_a8_relocs].hash = hash;
+
+                         num_a8_relocs++;
+                       }
+                   }
+               }
+
+             /* We're done with the internal relocs, free them.  */
+             if (elf_section_data (section)->relocs == NULL)
+               free (internal_relocs);
+           }
+
+         if (htab->fix_cortex_a8)
+           {
+             /* Sort relocs which might apply to Cortex-A8 erratum.  */
+             qsort (a8_relocs, num_a8_relocs,
+                    sizeof (struct a8_erratum_reloc),
+                    &a8_reloc_compare);
+
+             /* Scan for branches which might trigger Cortex-A8 erratum.  */
+             if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
+                                         &num_a8_fixes, &a8_fix_table_size,
+                                         a8_relocs, num_a8_relocs,
+                                         prev_num_a8_fixes, &stub_changed)
+                 != 0)
+               goto error_ret_free_local;
+           }
+
+         if (local_syms != NULL
+             && symtab_hdr->contents != (unsigned char *) local_syms)
+           {
+             if (!info->keep_memory)
+               free (local_syms);
+             else
+               symtab_hdr->contents = (unsigned char *) local_syms;
+           }
+       }
+
+      if (first_veneer_scan
+         && !set_cmse_veneer_addr_from_implib (info, htab,
+                                               &cmse_stub_created))
+       ret = FALSE;
+
+      if (prev_num_a8_fixes != num_a8_fixes)
+       stub_changed = TRUE;
+
+      if (!stub_changed)
+       break;
+
+      /* OK, we've added some stubs.  Find out the new size of the
+        stub sections.  */
+      for (stub_sec = htab->stub_bfd->sections;
+          stub_sec != NULL;
+          stub_sec = stub_sec->next)
+       {
+         /* Ignore non-stub sections.  */
+         if (!strstr (stub_sec->name, STUB_SUFFIX))
+           continue;
+
+         stub_sec->size = 0;
+       }
+
+      /* Add new SG veneers after those already in the input import
+        library.  */
+      for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
+          stub_type++)
+       {
+         bfd_vma *start_offset_p;
+         asection **stub_sec_p;
+
+         start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
+         stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+         if (start_offset_p == NULL)
+           continue;
+
+         BFD_ASSERT (stub_sec_p != NULL);
+         if (*stub_sec_p != NULL)
+           (*stub_sec_p)->size = *start_offset_p;
+       }
+
+      /* Compute stub section size, considering padding.  */
+      bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
+      for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
+          stub_type++)
+       {
+         int size, padding;
+         asection **stub_sec_p;
+
+         padding = arm_dedicated_stub_section_padding (stub_type);
+         stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+         /* Skip if no stub input section or no stub section padding
+            required.  */
+         if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
+           continue;
+         /* Stub section padding required but no dedicated section.  */
+         BFD_ASSERT (stub_sec_p);
+
+         size = (*stub_sec_p)->size;
+         size = (size + padding - 1) & ~(padding - 1);
+         (*stub_sec_p)->size = size;
+       }
+
+      /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
+      if (htab->fix_cortex_a8)
+       for (i = 0; i < num_a8_fixes; i++)
+         {
+           stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
+                        a8_fixes[i].section, htab, a8_fixes[i].stub_type);
+
+           if (stub_sec == NULL)
+             return FALSE;
+
+           stub_sec->size
+             += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
+                                             NULL);
+         }
+
+
+      /* Ask the linker to do its stuff.  */
+      (*htab->layout_sections_again) ();
+      first_veneer_scan = FALSE;
+    }
+
+  /* Add stubs for Cortex-A8 erratum fixes now.  */
+  if (htab->fix_cortex_a8)
+    {
+      for (i = 0; i < num_a8_fixes; i++)
+       {
+         struct elf32_arm_stub_hash_entry *stub_entry;
+         char *stub_name = a8_fixes[i].stub_name;
+         asection *section = a8_fixes[i].section;
+         unsigned int section_id = a8_fixes[i].section->id;
+         asection *link_sec = htab->stub_group[section_id].link_sec;
+         asection *stub_sec = htab->stub_group[section_id].stub_sec;
+         const insn_sequence *template_sequence;
+         int template_size, size = 0;
+
+         stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+                                            TRUE, FALSE);
+         if (stub_entry == NULL)
+           {
+             (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+                                    section->owner,
+                                    stub_name);
+             return FALSE;
+           }
+
+         stub_entry->stub_sec = stub_sec;
+         stub_entry->stub_offset = (bfd_vma) -1;
+         stub_entry->id_sec = link_sec;
+         stub_entry->stub_type = a8_fixes[i].stub_type;
+         stub_entry->source_value = a8_fixes[i].offset;
+         stub_entry->target_section = a8_fixes[i].section;
+         stub_entry->target_value = a8_fixes[i].target_offset;
+         stub_entry->orig_insn = a8_fixes[i].orig_insn;
+         stub_entry->branch_type = a8_fixes[i].branch_type;
+
+         size = find_stub_size_and_template (a8_fixes[i].stub_type,
+                                             &template_sequence,
+                                             &template_size);
+
+         stub_entry->stub_size = size;
+         stub_entry->stub_template = template_sequence;
+         stub_entry->stub_template_size = template_size;
+       }
+
+      /* Stash the Cortex-A8 erratum fix array for use later in
+        elf32_arm_write_section().  */
+      htab->a8_erratum_fixes = a8_fixes;
+      htab->num_a8_erratum_fixes = num_a8_fixes;
+    }
+  else
+    {
+      htab->a8_erratum_fixes = NULL;
+      htab->num_a8_erratum_fixes = 0;
+    }
+  return ret;
+}
+
+/* Build all the stubs associated with the current output file.  The
+   stubs are kept in a hash table attached to the main linker hash
+   table.  We also set up the .plt entries for statically linked PIC
+   functions here.  This function is called via arm_elf_finish in the
+   linker.  */
+
+bfd_boolean
+elf32_arm_build_stubs (struct bfd_link_info *info)
+{
+  asection *stub_sec;
+  struct bfd_hash_table *table;
+  enum elf32_arm_stub_type stub_type;
+  struct elf32_arm_link_hash_table *htab;
+
+  htab = elf32_arm_hash_table (info);
+  if (htab == NULL)
+    return FALSE;
+
+  for (stub_sec = htab->stub_bfd->sections;
+       stub_sec != NULL;
+       stub_sec = stub_sec->next)
+    {
+      bfd_size_type size;
+
+      /* Ignore non-stub sections.  */
+      if (!strstr (stub_sec->name, STUB_SUFFIX))
+       continue;
+
+      /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
+        must at least be done for stub section requiring padding and for SG
+        veneers to ensure that a non secure code branching to a removed SG
+        veneer causes an error.  */
+      size = stub_sec->size;
+      stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
+      if (stub_sec->contents == NULL && size != 0)
+       return FALSE;
+
+      stub_sec->size = 0;
+    }
+
+  /* Add new SG veneers after those already in the input import library.  */
+  for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
+    {
+      bfd_vma *start_offset_p;
+      asection **stub_sec_p;
+
+      start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
+      stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
+      if (start_offset_p == NULL)
+       continue;
+
+      BFD_ASSERT (stub_sec_p != NULL);
+      if (*stub_sec_p != NULL)
+       (*stub_sec_p)->size = *start_offset_p;
+    }
+
+  /* Build the stubs as directed by the stub hash table.  */
+  table = &htab->stub_hash_table;
+  bfd_hash_traverse (table, arm_build_one_stub, info);
+  if (htab->fix_cortex_a8)
+    {
+      /* Place the cortex a8 stubs last.  */
+      htab->fix_cortex_a8 = -1;
+      bfd_hash_traverse (table, arm_build_one_stub, info);
+    }
+
+  return TRUE;
+}
+
+/* Locate the Thumb encoded calling stub for NAME.  */
+
+static struct elf_link_hash_entry *
+find_thumb_glue (struct bfd_link_info *link_info,
+                const char *name,
+                char **error_message)
+{
+  char *tmp_name;
+  struct elf_link_hash_entry *hash;
+  struct elf32_arm_link_hash_table *hash_table;
+
+  /* We need a pointer to the armelf specific hash table.  */
+  hash_table = elf32_arm_hash_table (link_info);
+  if (hash_table == NULL)
+    return NULL;
 
-                 if (r_type == (unsigned int) R_ARM_TLS_CALL
-                     || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
-                   {
-                     /* A non-relaxed TLS call.  The target is the
-                        plt-resident trampoline and nothing to do
-                        with the symbol.  */
-                     BFD_ASSERT (htab->tls_trampoline > 0);
-                     sym_sec = htab->root.splt;
-                     sym_value = htab->tls_trampoline;
-                     hash = 0;
-                     st_type = STT_FUNC;
-                     branch_type = ST_BRANCH_TO_ARM;
-                   }
-                 else if (!hash)
-                   {
-                     /* It's a local symbol.  */
-                     Elf_Internal_Sym *sym;
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
+                                 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
 
-                     if (local_syms == NULL)
-                       {
-                         local_syms
-                           = (Elf_Internal_Sym *) symtab_hdr->contents;
-                         if (local_syms == NULL)
-                           local_syms
-                             = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
-                                                     symtab_hdr->sh_info, 0,
-                                                     NULL, NULL, NULL);
-                         if (local_syms == NULL)
-                           goto error_ret_free_internal;
-                       }
+  BFD_ASSERT (tmp_name);
 
-                     sym = local_syms + r_indx;
-                     if (sym->st_shndx == SHN_UNDEF)
-                       sym_sec = bfd_und_section_ptr;
-                     else if (sym->st_shndx == SHN_ABS)
-                       sym_sec = bfd_abs_section_ptr;
-                     else if (sym->st_shndx == SHN_COMMON)
-                       sym_sec = bfd_com_section_ptr;
-                     else
-                       sym_sec =
-                         bfd_section_from_elf_index (input_bfd, sym->st_shndx);
+  sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
 
-                     if (!sym_sec)
-                       /* This is an undefined symbol.  It can never
-                          be resolved. */
-                       continue;
+  hash = elf_link_hash_lookup
+    (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
 
-                     if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
-                       sym_value = sym->st_value;
-                     destination = (sym_value + irela->r_addend
-                                    + sym_sec->output_offset
-                                    + sym_sec->output_section->vma);
-                     st_type = ELF_ST_TYPE (sym->st_info);
-                     branch_type = ARM_SYM_BRANCH_TYPE (sym);
-                     sym_name
-                       = bfd_elf_string_from_elf_section (input_bfd,
-                                                          symtab_hdr->sh_link,
-                                                          sym->st_name);
-                   }
-                 else
-                   {
-                     /* It's an external symbol.  */
-                     while (hash->root.root.type == bfd_link_hash_indirect
-                            || hash->root.root.type == bfd_link_hash_warning)
-                       hash = ((struct elf32_arm_link_hash_entry *)
-                               hash->root.root.u.i.link);
+  if (hash == NULL
+      && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
+                  tmp_name, name) == -1)
+    *error_message = (char *) bfd_errmsg (bfd_error_system_call);
 
-                     if (hash->root.root.type == bfd_link_hash_defined
-                         || hash->root.root.type == bfd_link_hash_defweak)
-                       {
-                         sym_sec = hash->root.root.u.def.section;
-                         sym_value = hash->root.root.u.def.value;
+  free (tmp_name);
 
-                         struct elf32_arm_link_hash_table *globals =
-                                                 elf32_arm_hash_table (info);
+  return hash;
+}
 
-                         /* For a destination in a shared library,
-                            use the PLT stub as target address to
-                            decide whether a branch stub is
-                            needed.  */
-                         if (globals != NULL
-                             && globals->root.splt != NULL
-                             && hash != NULL
-                             && hash->root.plt.offset != (bfd_vma) -1)
-                           {
-                             sym_sec = globals->root.splt;
-                             sym_value = hash->root.plt.offset;
-                             if (sym_sec->output_section != NULL)
-                               destination = (sym_value
-                                              + sym_sec->output_offset
-                                              + sym_sec->output_section->vma);
-                           }
-                         else if (sym_sec->output_section != NULL)
-                           destination = (sym_value + irela->r_addend
-                                          + sym_sec->output_offset
-                                          + sym_sec->output_section->vma);
-                       }
-                     else if ((hash->root.root.type == bfd_link_hash_undefined)
-                              || (hash->root.root.type == bfd_link_hash_undefweak))
-                       {
-                         /* For a shared library, use the PLT stub as
-                            target address to decide whether a long
-                            branch stub is needed.
-                            For absolute code, they cannot be handled.  */
-                         struct elf32_arm_link_hash_table *globals =
-                           elf32_arm_hash_table (info);
+/* Locate the ARM encoded calling stub for NAME.  */
+
+static struct elf_link_hash_entry *
+find_arm_glue (struct bfd_link_info *link_info,
+              const char *name,
+              char **error_message)
+{
+  char *tmp_name;
+  struct elf_link_hash_entry *myh;
+  struct elf32_arm_link_hash_table *hash_table;
+
+  /* We need a pointer to the elfarm specific hash table.  */
+  hash_table = elf32_arm_hash_table (link_info);
+  if (hash_table == NULL)
+    return NULL;
+
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
+                                 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+
+  BFD_ASSERT (tmp_name);
+
+  sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
+
+  myh = elf_link_hash_lookup
+    (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+
+  if (myh == NULL
+      && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
+                  tmp_name, name) == -1)
+    *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+
+  free (tmp_name);
+
+  return myh;
+}
+
+/* ARM->Thumb glue (static images):
+
+   .arm
+   __func_from_arm:
+   ldr r12, __func_addr
+   bx  r12
+   __func_addr:
+   .word func    @ behave as if you saw a ARM_32 reloc.
+
+   (v5t static images)
+   .arm
+   __func_from_arm:
+   ldr pc, __func_addr
+   __func_addr:
+   .word func    @ behave as if you saw a ARM_32 reloc.
+
+   (relocatable images)
+   .arm
+   __func_from_arm:
+   ldr r12, __func_offset
+   add r12, r12, pc
+   bx  r12
+   __func_offset:
+   .word func - .   */
+
+#define ARM2THUMB_STATIC_GLUE_SIZE 12
+static const insn32 a2t1_ldr_insn = 0xe59fc000;
+static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
+static const insn32 a2t3_func_addr_insn = 0x00000001;
+
+#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
+static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
+static const insn32 a2t2v5_func_addr_insn = 0x00000001;
+
+#define ARM2THUMB_PIC_GLUE_SIZE 16
+static const insn32 a2t1p_ldr_insn = 0xe59fc004;
+static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
+static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
+
+/* Thumb->ARM:                          Thumb->(non-interworking aware) ARM
+
+     .thumb                             .thumb
+     .align 2                           .align 2
+ __func_from_thumb:                 __func_from_thumb:
+     bx pc                              push {r6, lr}
+     nop                                ldr  r6, __func_addr
+     .arm                               mov  lr, pc
+     b func                             bx   r6
+                                       .arm
+                                   ;; back_to_thumb
+                                       ldmia r13! {r6, lr}
+                                       bx    lr
+                                   __func_addr:
+                                       .word        func  */
+
+#define THUMB2ARM_GLUE_SIZE 8
+static const insn16 t2a1_bx_pc_insn = 0x4778;
+static const insn16 t2a2_noop_insn = 0x46c0;
+static const insn32 t2a3_b_insn = 0xea000000;
+
+#define VFP11_ERRATUM_VENEER_SIZE 8
+#define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
+#define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
+
+#define ARM_BX_VENEER_SIZE 12
+static const insn32 armbx1_tst_insn = 0xe3100001;
+static const insn32 armbx2_moveq_insn = 0x01a0f000;
+static const insn32 armbx3_bx_insn = 0xe12fff10;
+
+#ifndef ELFARM_NABI_C_INCLUDED
+static void
+arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
+{
+  asection * s;
+  bfd_byte * contents;
+
+  if (size == 0)
+    {
+      /* Do not include empty glue sections in the output.  */
+      if (abfd != NULL)
+       {
+         s = bfd_get_linker_section (abfd, name);
+         if (s != NULL)
+           s->flags |= SEC_EXCLUDE;
+       }
+      return;
+    }
+
+  BFD_ASSERT (abfd != NULL);
+
+  s = bfd_get_linker_section (abfd, name);
+  BFD_ASSERT (s != NULL);
+
+  contents = (bfd_byte *) bfd_alloc (abfd, size);
+
+  BFD_ASSERT (s->size == size);
+  s->contents = contents;
+}
+
+bfd_boolean
+bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
+{
+  struct elf32_arm_link_hash_table * globals;
 
-                         if (globals != NULL
-                             && globals->root.splt != NULL
-                             && hash != NULL
-                             && hash->root.plt.offset != (bfd_vma) -1)
-                           {
-                             sym_sec = globals->root.splt;
-                             sym_value = hash->root.plt.offset;
-                             if (sym_sec->output_section != NULL)
-                               destination = (sym_value
-                                              + sym_sec->output_offset
-                                              + sym_sec->output_section->vma);
-                           }
-                         else
-                           continue;
-                       }
-                     else
-                       {
-                         bfd_set_error (bfd_error_bad_value);
-                         goto error_ret_free_internal;
-                       }
-                     st_type = hash->root.type;
-                     branch_type = hash->root.target_internal;
-                     sym_name = hash->root.root.root.string;
-                   }
+  globals = elf32_arm_hash_table (info);
+  BFD_ASSERT (globals != NULL);
 
-                 do
-                   {
-                     /* Determine what (if any) linker stub is needed.  */
-                     stub_type = arm_type_of_stub (info, section, irela,
-                                                   st_type, &branch_type,
-                                                   hash, destination, sym_sec,
-                                                   input_bfd, sym_name);
-                     if (stub_type == arm_stub_none)
-                       break;
+  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+                                  globals->arm_glue_size,
+                                  ARM2THUMB_GLUE_SECTION_NAME);
 
-                     /* Support for grouping stub sections.  */
-                     id_sec = htab->stub_group[section->id].link_sec;
+  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+                                  globals->thumb_glue_size,
+                                  THUMB2ARM_GLUE_SECTION_NAME);
 
-                     /* Get the name of this stub.  */
-                     stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
-                                                      irela, stub_type);
-                     if (!stub_name)
-                       goto error_ret_free_internal;
+  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+                                  globals->vfp11_erratum_glue_size,
+                                  VFP11_ERRATUM_VENEER_SECTION_NAME);
 
-                     /* We've either created a stub for this reloc already,
-                        or we are about to.  */
-                     created_stub = TRUE;
+  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+                                  globals->stm32l4xx_erratum_glue_size,
+                                  STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
 
-                     stub_entry = arm_stub_hash_lookup
-                                    (&htab->stub_hash_table, stub_name,
-                                     FALSE, FALSE);
-                     if (stub_entry != NULL)
-                       {
-                         /* The proper stub has already been created.  */
-                         free (stub_name);
-                         stub_entry->target_value = sym_value;
-                         break;
-                       }
+  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
+                                  globals->bx_glue_size,
+                                  ARM_BX_GLUE_SECTION_NAME);
 
-                     stub_entry = elf32_arm_add_stub (stub_name, section,
-                                                      htab);
-                     if (stub_entry == NULL)
-                       {
-                         free (stub_name);
-                         goto error_ret_free_internal;
-                       }
+  return TRUE;
+}
 
-                     stub_entry->target_value = sym_value;
-                     stub_entry->target_section = sym_sec;
-                     stub_entry->stub_type = stub_type;
-                     stub_entry->h = hash;
-                     stub_entry->branch_type = branch_type;
-
-                     if (sym_name == NULL)
-                       sym_name = "unnamed";
-                     stub_entry->output_name = (char *)
-                         bfd_alloc (htab->stub_bfd,
-                                    sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
-                                    + strlen (sym_name));
-                     if (stub_entry->output_name == NULL)
-                       {
-                         free (stub_name);
-                         goto error_ret_free_internal;
-                       }
+/* Allocate space and symbols for calling a Thumb function from Arm mode.
+   returns the symbol identifying the stub.  */
 
-                     /* For historical reasons, use the existing names for
-                        ARM-to-Thumb and Thumb-to-ARM stubs.  */
-                     if ((r_type == (unsigned int) R_ARM_THM_CALL
-                          || r_type == (unsigned int) R_ARM_THM_JUMP24)
-                         && branch_type == ST_BRANCH_TO_ARM)
-                       sprintf (stub_entry->output_name,
-                                THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
-                     else if ((r_type == (unsigned int) R_ARM_CALL
-                              || r_type == (unsigned int) R_ARM_JUMP24)
-                              && branch_type == ST_BRANCH_TO_THUMB)
-                       sprintf (stub_entry->output_name,
-                                ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
-                     else
-                       sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
-                                sym_name);
+static struct elf_link_hash_entry *
+record_arm_to_thumb_glue (struct bfd_link_info * link_info,
+                         struct elf_link_hash_entry * h)
+{
+  const char * name = h->root.root.string;
+  asection * s;
+  char * tmp_name;
+  struct elf_link_hash_entry * myh;
+  struct bfd_link_hash_entry * bh;
+  struct elf32_arm_link_hash_table * globals;
+  bfd_vma val;
+  bfd_size_type size;
 
-                     stub_changed = TRUE;
-                   }
-                 while (0);
+  globals = elf32_arm_hash_table (link_info);
+  BFD_ASSERT (globals != NULL);
+  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
 
-                 /* Look for relocations which might trigger Cortex-A8
-                    erratum.  */
-                 if (htab->fix_cortex_a8
-                     && (r_type == (unsigned int) R_ARM_THM_JUMP24
-                         || r_type == (unsigned int) R_ARM_THM_JUMP19
-                         || r_type == (unsigned int) R_ARM_THM_CALL
-                         || r_type == (unsigned int) R_ARM_THM_XPC22))
-                   {
-                     bfd_vma from = section->output_section->vma
-                                    + section->output_offset
-                                    + irela->r_offset;
+  s = bfd_get_linker_section
+    (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
 
-                     if ((from & 0xfff) == 0xffe)
-                       {
-                         /* Found a candidate.  Note we haven't checked the
-                            destination is within 4K here: if we do so (and
-                            don't create an entry in a8_relocs) we can't tell
-                            that a branch should have been relocated when
-                            scanning later.  */
-                         if (num_a8_relocs == a8_reloc_table_size)
-                           {
-                             a8_reloc_table_size *= 2;
-                             a8_relocs = (struct a8_erratum_reloc *)
-                                 bfd_realloc (a8_relocs,
-                                              sizeof (struct a8_erratum_reloc)
-                                              * a8_reloc_table_size);
-                           }
+  BFD_ASSERT (s != NULL);
 
-                         a8_relocs[num_a8_relocs].from = from;
-                         a8_relocs[num_a8_relocs].destination = destination;
-                         a8_relocs[num_a8_relocs].r_type = r_type;
-                         a8_relocs[num_a8_relocs].branch_type = branch_type;
-                         a8_relocs[num_a8_relocs].sym_name = sym_name;
-                         a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
-                         a8_relocs[num_a8_relocs].hash = hash;
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
+                                 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
 
-                         num_a8_relocs++;
-                       }
-                   }
-               }
+  BFD_ASSERT (tmp_name);
 
-             /* We're done with the internal relocs, free them.  */
-             if (elf_section_data (section)->relocs == NULL)
-               free (internal_relocs);
-           }
+  sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
 
-         if (htab->fix_cortex_a8)
-           {
-             /* Sort relocs which might apply to Cortex-A8 erratum.  */
-             qsort (a8_relocs, num_a8_relocs,
-                    sizeof (struct a8_erratum_reloc),
-                    &a8_reloc_compare);
+  myh = elf_link_hash_lookup
+    (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
 
-             /* Scan for branches which might trigger Cortex-A8 erratum.  */
-             if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
-                                         &num_a8_fixes, &a8_fix_table_size,
-                                         a8_relocs, num_a8_relocs,
-                                         prev_num_a8_fixes, &stub_changed)
-                 != 0)
-               goto error_ret_free_local;
-           }
-       }
+  if (myh != NULL)
+    {
+      /* We've already seen this guy.  */
+      free (tmp_name);
+      return myh;
+    }
 
-      if (prev_num_a8_fixes != num_a8_fixes)
-       stub_changed = TRUE;
+  /* The only trick here is using hash_table->arm_glue_size as the value.
+     Even though the section isn't allocated yet, this is where we will be
+     putting it.  The +1 on the value marks that the stub has not been
+     output yet - not that it is a Thumb function.  */
+  bh = NULL;
+  val = globals->arm_glue_size + 1;
+  _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
+                                   tmp_name, BSF_GLOBAL, s, val,
+                                   NULL, TRUE, FALSE, &bh);
 
-      if (!stub_changed)
-       break;
+  myh = (struct elf_link_hash_entry *) bh;
+  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
+  myh->forced_local = 1;
 
-      /* OK, we've added some stubs.  Find out the new size of the
-        stub sections.  */
-      for (stub_sec = htab->stub_bfd->sections;
-          stub_sec != NULL;
-          stub_sec = stub_sec->next)
-       {
-         /* Ignore non-stub sections.  */
-         if (!strstr (stub_sec->name, STUB_SUFFIX))
-           continue;
+  free (tmp_name);
 
-         stub_sec->size = 0;
-       }
+  if (bfd_link_pic (link_info)
+      || globals->root.is_relocatable_executable
+      || globals->pic_veneer)
+    size = ARM2THUMB_PIC_GLUE_SIZE;
+  else if (globals->use_blx)
+    size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
+  else
+    size = ARM2THUMB_STATIC_GLUE_SIZE;
 
-      bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
+  s->size += size;
+  globals->arm_glue_size += size;
 
-      /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
-      if (htab->fix_cortex_a8)
-       for (i = 0; i < num_a8_fixes; i++)
-         {
-           stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
-                        a8_fixes[i].section, htab);
+  return myh;
+}
 
-           if (stub_sec == NULL)
-             goto error_ret_free_local;
+/* Allocate space for ARMv4 BX veneers.  */
 
-           stub_sec->size
-             += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
-                                             NULL);
-         }
+static void
+record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
+{
+  asection * s;
+  struct elf32_arm_link_hash_table *globals;
+  char *tmp_name;
+  struct elf_link_hash_entry *myh;
+  struct bfd_link_hash_entry *bh;
+  bfd_vma val;
 
+  /* BX PC does not need a veneer.  */
+  if (reg == 15)
+    return;
 
-      /* Ask the linker to do its stuff.  */
-      (*htab->layout_sections_again) ();
-    }
+  globals = elf32_arm_hash_table (link_info);
+  BFD_ASSERT (globals != NULL);
+  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
 
-  /* Add stubs for Cortex-A8 erratum fixes now.  */
-  if (htab->fix_cortex_a8)
-    {
-      for (i = 0; i < num_a8_fixes; i++)
-       {
-         struct elf32_arm_stub_hash_entry *stub_entry;
-         char *stub_name = a8_fixes[i].stub_name;
-         asection *section = a8_fixes[i].section;
-         unsigned int section_id = a8_fixes[i].section->id;
-         asection *link_sec = htab->stub_group[section_id].link_sec;
-         asection *stub_sec = htab->stub_group[section_id].stub_sec;
-         const insn_sequence *template_sequence;
-         int template_size, size = 0;
+  /* Check if this veneer has already been allocated.  */
+  if (globals->bx_glue_offset[reg])
+    return;
 
-         stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
-                                            TRUE, FALSE);
-         if (stub_entry == NULL)
-           {
-             (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
-                                    section->owner,
-                                    stub_name);
-             return FALSE;
-           }
+  s = bfd_get_linker_section
+    (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
+
+  BFD_ASSERT (s != NULL);
+
+  /* Add symbol for veneer.  */
+  tmp_name = (char *)
+      bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
 
-         stub_entry->stub_sec = stub_sec;
-         stub_entry->stub_offset = 0;
-         stub_entry->id_sec = link_sec;
-         stub_entry->stub_type = a8_fixes[i].stub_type;
-         stub_entry->target_section = a8_fixes[i].section;
-         stub_entry->target_value = a8_fixes[i].offset;
-         stub_entry->target_addend = a8_fixes[i].addend;
-         stub_entry->orig_insn = a8_fixes[i].orig_insn;
-         stub_entry->branch_type = a8_fixes[i].branch_type;
+  BFD_ASSERT (tmp_name);
 
-         size = find_stub_size_and_template (a8_fixes[i].stub_type,
-                                             &template_sequence,
-                                             &template_size);
+  sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
 
-         stub_entry->stub_size = size;
-         stub_entry->stub_template = template_sequence;
-         stub_entry->stub_template_size = template_size;
-       }
+  myh = elf_link_hash_lookup
+    (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
 
-      /* Stash the Cortex-A8 erratum fix array for use later in
-        elf32_arm_write_section().  */
-      htab->a8_erratum_fixes = a8_fixes;
-      htab->num_a8_erratum_fixes = num_a8_fixes;
-    }
-  else
-    {
-      htab->a8_erratum_fixes = NULL;
-      htab->num_a8_erratum_fixes = 0;
-    }
-  return TRUE;
+  BFD_ASSERT (myh == NULL);
 
- error_ret_free_local:
-  return FALSE;
+  bh = NULL;
+  val = globals->bx_glue_size;
+  _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
+                                   tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
+                                   NULL, TRUE, FALSE, &bh);
+
+  myh = (struct elf_link_hash_entry *) bh;
+  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
+  myh->forced_local = 1;
+
+  s->size += ARM_BX_VENEER_SIZE;
+  globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
+  globals->bx_glue_size += ARM_BX_VENEER_SIZE;
 }
 
-/* Build all the stubs associated with the current output file.  The
-   stubs are kept in a hash table attached to the main linker hash
-   table.  We also set up the .plt entries for statically linked PIC
-   functions here.  This function is called via arm_elf_finish in the
-   linker.  */
 
-bfd_boolean
-elf32_arm_build_stubs (struct bfd_link_info *info)
-{
-  asection *stub_sec;
-  struct bfd_hash_table *table;
-  struct elf32_arm_link_hash_table *htab;
+/* Add an entry to the code/data map for section SEC.  */
 
-  htab = elf32_arm_hash_table (info);
-  if (htab == NULL)
-    return FALSE;
+static void
+elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
+{
+  struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
+  unsigned int newidx;
 
-  for (stub_sec = htab->stub_bfd->sections;
-       stub_sec != NULL;
-       stub_sec = stub_sec->next)
+  if (sec_data->map == NULL)
     {
-      bfd_size_type size;
+      sec_data->map = (elf32_arm_section_map *)
+         bfd_malloc (sizeof (elf32_arm_section_map));
+      sec_data->mapcount = 0;
+      sec_data->mapsize = 1;
+    }
 
-      /* Ignore non-stub sections.  */
-      if (!strstr (stub_sec->name, STUB_SUFFIX))
-       continue;
+  newidx = sec_data->mapcount++;
 
-      /* Allocate memory to hold the linker stubs.  */
-      size = stub_sec->size;
-      stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
-      if (stub_sec->contents == NULL && size != 0)
-       return FALSE;
-      stub_sec->size = 0;
+  if (sec_data->mapcount > sec_data->mapsize)
+    {
+      sec_data->mapsize *= 2;
+      sec_data->map = (elf32_arm_section_map *)
+         bfd_realloc_or_free (sec_data->map, sec_data->mapsize
+                              * sizeof (elf32_arm_section_map));
     }
 
-  /* Build the stubs as directed by the stub hash table.  */
-  table = &htab->stub_hash_table;
-  bfd_hash_traverse (table, arm_build_one_stub, info);
-  if (htab->fix_cortex_a8)
+  if (sec_data->map)
     {
-      /* Place the cortex a8 stubs last.  */
-      htab->fix_cortex_a8 = -1;
-      bfd_hash_traverse (table, arm_build_one_stub, info);
+      sec_data->map[newidx].vma = vma;
+      sec_data->map[newidx].type = type;
     }
-
-  return TRUE;
 }
 
-/* Locate the Thumb encoded calling stub for NAME.  */
 
-static struct elf_link_hash_entry *
-find_thumb_glue (struct bfd_link_info *link_info,
-                const char *name,
-                char **error_message)
+/* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
+   veneers are handled for now.  */
+
+static bfd_vma
+record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
+                            elf32_vfp11_erratum_list *branch,
+                            bfd *branch_bfd,
+                            asection *branch_sec,
+                            unsigned int offset)
 {
-  char *tmp_name;
-  struct elf_link_hash_entry *hash;
+  asection *s;
   struct elf32_arm_link_hash_table *hash_table;
+  char *tmp_name;
+  struct elf_link_hash_entry *myh;
+  struct bfd_link_hash_entry *bh;
+  bfd_vma val;
+  struct _arm_elf_section_data *sec_data;
+  elf32_vfp11_erratum_list *newerr;
 
-  /* We need a pointer to the armelf specific hash table.  */
   hash_table = elf32_arm_hash_table (link_info);
-  if (hash_table == NULL)
-    return NULL;
+  BFD_ASSERT (hash_table != NULL);
+  BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
 
-  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
-                                 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
+  s = bfd_get_linker_section
+    (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
+
+  sec_data = elf32_arm_section_data (s);
+
+  BFD_ASSERT (s != NULL);
+
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
+                                 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
 
   BFD_ASSERT (tmp_name);
 
-  sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
+  sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
+          hash_table->num_vfp11_fixes);
 
-  hash = elf_link_hash_lookup
-    (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+  myh = elf_link_hash_lookup
+    (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
 
-  if (hash == NULL
-      && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
-                  tmp_name, name) == -1)
-    *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+  BFD_ASSERT (myh == NULL);
+
+  bh = NULL;
+  val = hash_table->vfp11_erratum_glue_size;
+  _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
+                                   tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
+                                   NULL, TRUE, FALSE, &bh);
+
+  myh = (struct elf_link_hash_entry *) bh;
+  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
+  myh->forced_local = 1;
+
+  /* Link veneer back to calling location.  */
+  sec_data->erratumcount += 1;
+  newerr = (elf32_vfp11_erratum_list *)
+      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
+
+  newerr->type = VFP11_ERRATUM_ARM_VENEER;
+  newerr->vma = -1;
+  newerr->u.v.branch = branch;
+  newerr->u.v.id = hash_table->num_vfp11_fixes;
+  branch->u.b.veneer = newerr;
+
+  newerr->next = sec_data->erratumlist;
+  sec_data->erratumlist = newerr;
+
+  /* A symbol for the return from the veneer.  */
+  sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
+          hash_table->num_vfp11_fixes);
+
+  myh = elf_link_hash_lookup
+    (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
+
+  if (myh != NULL)
+    abort ();
+
+  bh = NULL;
+  val = offset + 4;
+  _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
+                                   branch_sec, val, NULL, TRUE, FALSE, &bh);
+
+  myh = (struct elf_link_hash_entry *) bh;
+  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
+  myh->forced_local = 1;
 
   free (tmp_name);
 
-  return hash;
+  /* Generate a mapping symbol for the veneer section, and explicitly add an
+     entry for that symbol to the code/data map for the section.  */
+  if (hash_table->vfp11_erratum_glue_size == 0)
+    {
+      bh = NULL;
+      /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
+        ever requires this erratum fix.  */
+      _bfd_generic_link_add_one_symbol (link_info,
+                                       hash_table->bfd_of_glue_owner, "$a",
+                                       BSF_LOCAL, s, 0, NULL,
+                                       TRUE, FALSE, &bh);
+
+      myh = (struct elf_link_hash_entry *) bh;
+      myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
+      myh->forced_local = 1;
+
+      /* The elf32_arm_init_maps function only cares about symbols from input
+        BFDs.  We must make a note of this generated mapping symbol
+        ourselves so that code byteswapping works properly in
+        elf32_arm_write_section.  */
+      elf32_arm_section_map_add (s, 'a', 0);
+    }
+
+  s->size += VFP11_ERRATUM_VENEER_SIZE;
+  hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
+  hash_table->num_vfp11_fixes++;
+
+  /* The offset of the veneer.  */
+  return val;
 }
 
-/* Locate the ARM encoded calling stub for NAME.  */
+/* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
+   veneers need to be handled because used only in Cortex-M.  */
 
-static struct elf_link_hash_entry *
-find_arm_glue (struct bfd_link_info *link_info,
-              const char *name,
-              char **error_message)
+static bfd_vma
+record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
+                                elf32_stm32l4xx_erratum_list *branch,
+                                bfd *branch_bfd,
+                                asection *branch_sec,
+                                unsigned int offset,
+                                bfd_size_type veneer_size)
 {
+  asection *s;
+  struct elf32_arm_link_hash_table *hash_table;
   char *tmp_name;
   struct elf_link_hash_entry *myh;
-  struct elf32_arm_link_hash_table *hash_table;
+  struct bfd_link_hash_entry *bh;
+  bfd_vma val;
+  struct _arm_elf_section_data *sec_data;
+  elf32_stm32l4xx_erratum_list *newerr;
 
-  /* We need a pointer to the elfarm specific hash table.  */
   hash_table = elf32_arm_hash_table (link_info);
-  if (hash_table == NULL)
-    return NULL;
-
-  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
-                                 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+  BFD_ASSERT (hash_table != NULL);
+  BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
 
-  BFD_ASSERT (tmp_name);
+  s = bfd_get_linker_section
+    (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
 
-  sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
+  BFD_ASSERT (s != NULL);
 
-  myh = elf_link_hash_lookup
-    (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
+  sec_data = elf32_arm_section_data (s);
 
-  if (myh == NULL
-      && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
-                  tmp_name, name) == -1)
-    *error_message = (char *) bfd_errmsg (bfd_error_system_call);
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
+                                 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
 
-  free (tmp_name);
+  BFD_ASSERT (tmp_name);
 
-  return myh;
-}
+  sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
+          hash_table->num_stm32l4xx_fixes);
 
-/* ARM->Thumb glue (static images):
+  myh = elf_link_hash_lookup
+    (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
 
-   .arm
-   __func_from_arm:
-   ldr r12, __func_addr
-   bx  r12
-   __func_addr:
-   .word func    @ behave as if you saw a ARM_32 reloc.
+  BFD_ASSERT (myh == NULL);
 
-   (v5t static images)
-   .arm
-   __func_from_arm:
-   ldr pc, __func_addr
-   __func_addr:
-   .word func    @ behave as if you saw a ARM_32 reloc.
+  bh = NULL;
+  val = hash_table->stm32l4xx_erratum_glue_size;
+  _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
+                                   tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
+                                   NULL, TRUE, FALSE, &bh);
 
-   (relocatable images)
-   .arm
-   __func_from_arm:
-   ldr r12, __func_offset
-   add r12, r12, pc
-   bx  r12
-   __func_offset:
-   .word func - .   */
+  myh = (struct elf_link_hash_entry *) bh;
+  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
+  myh->forced_local = 1;
 
-#define ARM2THUMB_STATIC_GLUE_SIZE 12
-static const insn32 a2t1_ldr_insn = 0xe59fc000;
-static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
-static const insn32 a2t3_func_addr_insn = 0x00000001;
+  /* Link veneer back to calling location.  */
+  sec_data->stm32l4xx_erratumcount += 1;
+  newerr = (elf32_stm32l4xx_erratum_list *)
+      bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
 
-#define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
-static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
-static const insn32 a2t2v5_func_addr_insn = 0x00000001;
+  newerr->type = STM32L4XX_ERRATUM_VENEER;
+  newerr->vma = -1;
+  newerr->u.v.branch = branch;
+  newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
+  branch->u.b.veneer = newerr;
 
-#define ARM2THUMB_PIC_GLUE_SIZE 16
-static const insn32 a2t1p_ldr_insn = 0xe59fc004;
-static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
-static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
+  newerr->next = sec_data->stm32l4xx_erratumlist;
+  sec_data->stm32l4xx_erratumlist = newerr;
 
-/* Thumb->ARM:                          Thumb->(non-interworking aware) ARM
+  /* A symbol for the return from the veneer.  */
+  sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
+          hash_table->num_stm32l4xx_fixes);
 
-     .thumb                             .thumb
-     .align 2                           .align 2
- __func_from_thumb:                 __func_from_thumb:
-     bx pc                              push {r6, lr}
-     nop                                ldr  r6, __func_addr
-     .arm                               mov  lr, pc
-     b func                             bx   r6
-                                       .arm
-                                   ;; back_to_thumb
-                                       ldmia r13! {r6, lr}
-                                       bx    lr
-                                   __func_addr:
-                                       .word        func  */
+  myh = elf_link_hash_lookup
+    (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
 
-#define THUMB2ARM_GLUE_SIZE 8
-static const insn16 t2a1_bx_pc_insn = 0x4778;
-static const insn16 t2a2_noop_insn = 0x46c0;
-static const insn32 t2a3_b_insn = 0xea000000;
+  if (myh != NULL)
+    abort ();
 
-#define VFP11_ERRATUM_VENEER_SIZE 8
+  bh = NULL;
+  val = offset + 4;
+  _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
+                                   branch_sec, val, NULL, TRUE, FALSE, &bh);
 
-#define ARM_BX_VENEER_SIZE 12
-static const insn32 armbx1_tst_insn = 0xe3100001;
-static const insn32 armbx2_moveq_insn = 0x01a0f000;
-static const insn32 armbx3_bx_insn = 0xe12fff10;
+  myh = (struct elf_link_hash_entry *) bh;
+  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
+  myh->forced_local = 1;
 
-#ifndef ELFARM_NABI_C_INCLUDED
-static void
-arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
-{
-  asection * s;
-  bfd_byte * contents;
+  free (tmp_name);
 
-  if (size == 0)
+  /* Generate a mapping symbol for the veneer section, and explicitly add an
+     entry for that symbol to the code/data map for the section.  */
+  if (hash_table->stm32l4xx_erratum_glue_size == 0)
     {
-      /* Do not include empty glue sections in the output.  */
-      if (abfd != NULL)
-       {
-         s = bfd_get_linker_section (abfd, name);
-         if (s != NULL)
-           s->flags |= SEC_EXCLUDE;
-       }
-      return;
-    }
+      bh = NULL;
+      /* Creates a THUMB symbol since there is no other choice.  */
+      _bfd_generic_link_add_one_symbol (link_info,
+                                       hash_table->bfd_of_glue_owner, "$t",
+                                       BSF_LOCAL, s, 0, NULL,
+                                       TRUE, FALSE, &bh);
 
-  BFD_ASSERT (abfd != NULL);
+      myh = (struct elf_link_hash_entry *) bh;
+      myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
+      myh->forced_local = 1;
 
-  s = bfd_get_linker_section (abfd, name);
-  BFD_ASSERT (s != NULL);
+      /* The elf32_arm_init_maps function only cares about symbols from input
+        BFDs.  We must make a note of this generated mapping symbol
+        ourselves so that code byteswapping works properly in
+        elf32_arm_write_section.  */
+      elf32_arm_section_map_add (s, 't', 0);
+    }
 
-  contents = (bfd_byte *) bfd_alloc (abfd, size);
+  s->size += veneer_size;
+  hash_table->stm32l4xx_erratum_glue_size += veneer_size;
+  hash_table->num_stm32l4xx_fixes++;
 
-  BFD_ASSERT (s->size == size);
-  s->contents = contents;
+  /* The offset of the veneer.  */
+  return val;
 }
 
-bfd_boolean
-bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
-{
-  struct elf32_arm_link_hash_table * globals;
+#define ARM_GLUE_SECTION_FLAGS \
+  (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
+   | SEC_READONLY | SEC_LINKER_CREATED)
 
-  globals = elf32_arm_hash_table (info);
-  BFD_ASSERT (globals != NULL);
+/* Create a fake section for use by the ARM backend of the linker.  */
 
-  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
-                                  globals->arm_glue_size,
-                                  ARM2THUMB_GLUE_SECTION_NAME);
+static bfd_boolean
+arm_make_glue_section (bfd * abfd, const char * name)
+{
+  asection * sec;
 
-  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
-                                  globals->thumb_glue_size,
-                                  THUMB2ARM_GLUE_SECTION_NAME);
+  sec = bfd_get_linker_section (abfd, name);
+  if (sec != NULL)
+    /* Already made.  */
+    return TRUE;
 
-  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
-                                  globals->vfp11_erratum_glue_size,
-                                  VFP11_ERRATUM_VENEER_SECTION_NAME);
+  sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
 
-  arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
-                                  globals->bx_glue_size,
-                                  ARM_BX_GLUE_SECTION_NAME);
+  if (sec == NULL
+      || !bfd_set_section_alignment (abfd, sec, 2))
+    return FALSE;
+
+  /* Set the gc mark to prevent the section from being removed by garbage
+     collection, despite the fact that no relocs refer to this section.  */
+  sec->gc_mark = 1;
 
   return TRUE;
 }
 
-/* Allocate space and symbols for calling a Thumb function from Arm mode.
-   returns the symbol identifying the stub.  */
+/* Set size of .plt entries.  This function is called from the
+   linker scripts in ld/emultempl/{armelf}.em.  */
 
-static struct elf_link_hash_entry *
-record_arm_to_thumb_glue (struct bfd_link_info * link_info,
-                         struct elf_link_hash_entry * h)
+void
+bfd_elf32_arm_use_long_plt (void)
 {
-  const char * name = h->root.root.string;
-  asection * s;
-  char * tmp_name;
-  struct elf_link_hash_entry * myh;
-  struct bfd_link_hash_entry * bh;
-  struct elf32_arm_link_hash_table * globals;
-  bfd_vma val;
-  bfd_size_type size;
+  elf32_arm_use_long_plt_entry = TRUE;
+}
 
-  globals = elf32_arm_hash_table (link_info);
-  BFD_ASSERT (globals != NULL);
-  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
+/* Add the glue sections to ABFD.  This function is called from the
+   linker scripts in ld/emultempl/{armelf}.em.  */
 
-  s = bfd_get_linker_section
-    (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
+bfd_boolean
+bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
+                                       struct bfd_link_info *info)
+{
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
+  bfd_boolean dostm32l4xx = globals
+    && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
+  bfd_boolean addglue;
 
-  BFD_ASSERT (s != NULL);
+  /* If we are only performing a partial
+     link do not bother adding the glue.  */
+  if (bfd_link_relocatable (info))
+    return TRUE;
 
-  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
-                                 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
+  addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
+    && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
+    && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
+    && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
 
-  BFD_ASSERT (tmp_name);
+  if (!dostm32l4xx)
+    return addglue;
 
-  sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
+  return addglue
+    && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
+}
 
-  myh = elf_link_hash_lookup
-    (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
+/* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
+   ensures they are not marked for deletion by
+   strip_excluded_output_sections () when veneers are going to be created
+   later.  Not doing so would trigger assert on empty section size in
+   lang_size_sections_1 ().  */
 
-  if (myh != NULL)
+void
+bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
+{
+  enum elf32_arm_stub_type stub_type;
+
+  /* If we are only performing a partial
+     link do not bother adding the glue.  */
+  if (bfd_link_relocatable (info))
+    return;
+
+  for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
     {
-      /* We've already seen this guy.  */
-      free (tmp_name);
-      return myh;
+      asection *out_sec;
+      const char *out_sec_name;
+
+      if (!arm_dedicated_stub_output_section_required (stub_type))
+       continue;
+
+     out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
+     out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
+     if (out_sec != NULL)
+       out_sec->flags |= SEC_KEEP;
     }
+}
 
-  /* The only trick here is using hash_table->arm_glue_size as the value.
-     Even though the section isn't allocated yet, this is where we will be
-     putting it.  The +1 on the value marks that the stub has not been
-     output yet - not that it is a Thumb function.  */
-  bh = NULL;
-  val = globals->arm_glue_size + 1;
-  _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
-                                   tmp_name, BSF_GLOBAL, s, val,
-                                   NULL, TRUE, FALSE, &bh);
+/* Select a BFD to be used to hold the sections used by the glue code.
+   This function is called from the linker scripts in ld/emultempl/
+   {armelf/pe}.em.  */
 
-  myh = (struct elf_link_hash_entry *) bh;
-  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
-  myh->forced_local = 1;
+bfd_boolean
+bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
+{
+  struct elf32_arm_link_hash_table *globals;
 
-  free (tmp_name);
+  /* If we are only performing a partial link
+     do not bother getting a bfd to hold the glue.  */
+  if (bfd_link_relocatable (info))
+    return TRUE;
 
-  if (link_info->shared || globals->root.is_relocatable_executable
-      || globals->pic_veneer)
-    size = ARM2THUMB_PIC_GLUE_SIZE;
-  else if (globals->use_blx)
-    size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
-  else
-    size = ARM2THUMB_STATIC_GLUE_SIZE;
+  /* Make sure we don't attach the glue sections to a dynamic object.  */
+  BFD_ASSERT (!(abfd->flags & DYNAMIC));
+
+  globals = elf32_arm_hash_table (info);
+  BFD_ASSERT (globals != NULL);
+
+  if (globals->bfd_of_glue_owner != NULL)
+    return TRUE;
 
-  s->size += size;
-  globals->arm_glue_size += size;
+  /* Save the bfd for later use.  */
+  globals->bfd_of_glue_owner = abfd;
 
-  return myh;
+  return TRUE;
 }
 
-/* Allocate space for ARMv4 BX veneers.  */
-
 static void
-record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
+check_use_blx (struct elf32_arm_link_hash_table *globals)
 {
-  asection * s;
-  struct elf32_arm_link_hash_table *globals;
-  char *tmp_name;
-  struct elf_link_hash_entry *myh;
-  struct bfd_link_hash_entry *bh;
-  bfd_vma val;
-
-  /* BX PC does not need a veneer.  */
-  if (reg == 15)
-    return;
+  int cpu_arch;
 
-  globals = elf32_arm_hash_table (link_info);
-  BFD_ASSERT (globals != NULL);
-  BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
+  cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
+                                      Tag_CPU_arch);
 
-  /* Check if this veneer has already been allocated.  */
-  if (globals->bx_glue_offset[reg])
-    return;
+  if (globals->fix_arm1176)
+    {
+      if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
+       globals->use_blx = 1;
+    }
+  else
+    {
+      if (cpu_arch > TAG_CPU_ARCH_V4T)
+       globals->use_blx = 1;
+    }
+}
 
-  s = bfd_get_linker_section
-    (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
+bfd_boolean
+bfd_elf32_arm_process_before_allocation (bfd *abfd,
+                                        struct bfd_link_info *link_info)
+{
+  Elf_Internal_Shdr *symtab_hdr;
+  Elf_Internal_Rela *internal_relocs = NULL;
+  Elf_Internal_Rela *irel, *irelend;
+  bfd_byte *contents = NULL;
 
-  BFD_ASSERT (s != NULL);
+  asection *sec;
+  struct elf32_arm_link_hash_table *globals;
 
-  /* Add symbol for veneer.  */
-  tmp_name = (char *)
-      bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
+  /* If we are only performing a partial link do not bother
+     to construct any glue.  */
+  if (bfd_link_relocatable (link_info))
+    return TRUE;
 
-  BFD_ASSERT (tmp_name);
+  /* Here we have a bfd that is to be included on the link.  We have a
+     hook to do reloc rummaging, before section sizes are nailed down.  */
+  globals = elf32_arm_hash_table (link_info);
+  BFD_ASSERT (globals != NULL);
 
-  sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
+  check_use_blx (globals);
 
-  myh = elf_link_hash_lookup
-    (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
+  if (globals->byteswap_code && !bfd_big_endian (abfd))
+    {
+      _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
+                         abfd);
+      return FALSE;
+    }
 
-  BFD_ASSERT (myh == NULL);
+  /* PR 5398: If we have not decided to include any loadable sections in
+     the output then we will not have a glue owner bfd.  This is OK, it
+     just means that there is nothing else for us to do here.  */
+  if (globals->bfd_of_glue_owner == NULL)
+    return TRUE;
 
-  bh = NULL;
-  val = globals->bx_glue_size;
-  _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
-                                   tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
-                                   NULL, TRUE, FALSE, &bh);
+  /* Rummage around all the relocs and map the glue vectors.  */
+  sec = abfd->sections;
 
-  myh = (struct elf_link_hash_entry *) bh;
-  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
-  myh->forced_local = 1;
+  if (sec == NULL)
+    return TRUE;
 
-  s->size += ARM_BX_VENEER_SIZE;
-  globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
-  globals->bx_glue_size += ARM_BX_VENEER_SIZE;
-}
+  for (; sec != NULL; sec = sec->next)
+    {
+      if (sec->reloc_count == 0)
+       continue;
 
+      if ((sec->flags & SEC_EXCLUDE) != 0)
+       continue;
 
-/* Add an entry to the code/data map for section SEC.  */
+      symtab_hdr = & elf_symtab_hdr (abfd);
 
-static void
-elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
-{
-  struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
-  unsigned int newidx;
+      /* Load the relocs.  */
+      internal_relocs
+       = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
 
-  if (sec_data->map == NULL)
-    {
-      sec_data->map = (elf32_arm_section_map *)
-         bfd_malloc (sizeof (elf32_arm_section_map));
-      sec_data->mapcount = 0;
-      sec_data->mapsize = 1;
-    }
+      if (internal_relocs == NULL)
+       goto error_return;
 
-  newidx = sec_data->mapcount++;
+      irelend = internal_relocs + sec->reloc_count;
+      for (irel = internal_relocs; irel < irelend; irel++)
+       {
+         long r_type;
+         unsigned long r_index;
 
-  if (sec_data->mapcount > sec_data->mapsize)
-    {
-      sec_data->mapsize *= 2;
-      sec_data->map = (elf32_arm_section_map *)
-         bfd_realloc_or_free (sec_data->map, sec_data->mapsize
-                              * sizeof (elf32_arm_section_map));
-    }
+         struct elf_link_hash_entry *h;
 
-  if (sec_data->map)
-    {
-      sec_data->map[newidx].vma = vma;
-      sec_data->map[newidx].type = type;
-    }
-}
+         r_type = ELF32_R_TYPE (irel->r_info);
+         r_index = ELF32_R_SYM (irel->r_info);
 
+         /* These are the only relocation types we care about.  */
+         if (   r_type != R_ARM_PC24
+             && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
+           continue;
 
-/* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
-   veneers are handled for now.  */
+         /* Get the section contents if we haven't done so already.  */
+         if (contents == NULL)
+           {
+             /* Get cached copy if it exists.  */
+             if (elf_section_data (sec)->this_hdr.contents != NULL)
+               contents = elf_section_data (sec)->this_hdr.contents;
+             else
+               {
+                 /* Go get them off disk.  */
+                 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
+                   goto error_return;
+               }
+           }
 
-static bfd_vma
-record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
-                            elf32_vfp11_erratum_list *branch,
-                            bfd *branch_bfd,
-                            asection *branch_sec,
-                            unsigned int offset)
-{
-  asection *s;
-  struct elf32_arm_link_hash_table *hash_table;
-  char *tmp_name;
-  struct elf_link_hash_entry *myh;
-  struct bfd_link_hash_entry *bh;
-  bfd_vma val;
-  struct _arm_elf_section_data *sec_data;
-  elf32_vfp11_erratum_list *newerr;
+         if (r_type == R_ARM_V4BX)
+           {
+             int reg;
 
-  hash_table = elf32_arm_hash_table (link_info);
-  BFD_ASSERT (hash_table != NULL);
-  BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
+             reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
+             record_arm_bx_glue (link_info, reg);
+             continue;
+           }
 
-  s = bfd_get_linker_section
-    (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
+         /* If the relocation is not against a symbol it cannot concern us.  */
+         h = NULL;
 
-  sec_data = elf32_arm_section_data (s);
+         /* We don't care about local symbols.  */
+         if (r_index < symtab_hdr->sh_info)
+           continue;
 
-  BFD_ASSERT (s != NULL);
+         /* This is an external symbol.  */
+         r_index -= symtab_hdr->sh_info;
+         h = (struct elf_link_hash_entry *)
+           elf_sym_hashes (abfd)[r_index];
 
-  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
-                                 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
+         /* If the relocation is against a static symbol it must be within
+            the current section and so cannot be a cross ARM/Thumb relocation.  */
+         if (h == NULL)
+           continue;
 
-  BFD_ASSERT (tmp_name);
+         /* If the call will go through a PLT entry then we do not need
+            glue.  */
+         if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
+           continue;
 
-  sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
-          hash_table->num_vfp11_fixes);
+         switch (r_type)
+           {
+           case R_ARM_PC24:
+             /* This one is a call from arm code.  We need to look up
+                the target of the call.  If it is a thumb target, we
+                insert glue.  */
+             if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
+                 == ST_BRANCH_TO_THUMB)
+               record_arm_to_thumb_glue (link_info, h);
+             break;
 
-  myh = elf_link_hash_lookup
-    (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
+           default:
+             abort ();
+           }
+       }
 
-  BFD_ASSERT (myh == NULL);
+      if (contents != NULL
+         && elf_section_data (sec)->this_hdr.contents != contents)
+       free (contents);
+      contents = NULL;
 
-  bh = NULL;
-  val = hash_table->vfp11_erratum_glue_size;
-  _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
-                                   tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
-                                   NULL, TRUE, FALSE, &bh);
+      if (internal_relocs != NULL
+         && elf_section_data (sec)->relocs != internal_relocs)
+       free (internal_relocs);
+      internal_relocs = NULL;
+    }
 
-  myh = (struct elf_link_hash_entry *) bh;
-  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
-  myh->forced_local = 1;
+  return TRUE;
 
-  /* Link veneer back to calling location.  */
-  sec_data->erratumcount += 1;
-  newerr = (elf32_vfp11_erratum_list *)
-      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
+error_return:
+  if (contents != NULL
+      && elf_section_data (sec)->this_hdr.contents != contents)
+    free (contents);
+  if (internal_relocs != NULL
+      && elf_section_data (sec)->relocs != internal_relocs)
+    free (internal_relocs);
 
-  newerr->type = VFP11_ERRATUM_ARM_VENEER;
-  newerr->vma = -1;
-  newerr->u.v.branch = branch;
-  newerr->u.v.id = hash_table->num_vfp11_fixes;
-  branch->u.b.veneer = newerr;
+  return FALSE;
+}
+#endif
 
-  newerr->next = sec_data->erratumlist;
-  sec_data->erratumlist = newerr;
 
-  /* A symbol for the return from the veneer.  */
-  sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
-          hash_table->num_vfp11_fixes);
+/* Initialise maps of ARM/Thumb/data for input BFDs.  */
 
-  myh = elf_link_hash_lookup
-    (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
+void
+bfd_elf32_arm_init_maps (bfd *abfd)
+{
+  Elf_Internal_Sym *isymbuf;
+  Elf_Internal_Shdr *hdr;
+  unsigned int i, localsyms;
 
-  if (myh != NULL)
-    abort ();
+  /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
+  if (! is_arm_elf (abfd))
+    return;
 
-  bh = NULL;
-  val = offset + 4;
-  _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
-                                   branch_sec, val, NULL, TRUE, FALSE, &bh);
+  if ((abfd->flags & DYNAMIC) != 0)
+    return;
 
-  myh = (struct elf_link_hash_entry *) bh;
-  myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
-  myh->forced_local = 1;
+  hdr = & elf_symtab_hdr (abfd);
+  localsyms = hdr->sh_info;
 
-  free (tmp_name);
+  /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
+     should contain the number of local symbols, which should come before any
+     global symbols.  Mapping symbols are always local.  */
+  isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
+                                 NULL);
 
-  /* Generate a mapping symbol for the veneer section, and explicitly add an
-     entry for that symbol to the code/data map for the section.  */
-  if (hash_table->vfp11_erratum_glue_size == 0)
+  /* No internal symbols read?  Skip this BFD.  */
+  if (isymbuf == NULL)
+    return;
+
+  for (i = 0; i < localsyms; i++)
     {
-      bh = NULL;
-      /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
-        ever requires this erratum fix.  */
-      _bfd_generic_link_add_one_symbol (link_info,
-                                       hash_table->bfd_of_glue_owner, "$a",
-                                       BSF_LOCAL, s, 0, NULL,
-                                       TRUE, FALSE, &bh);
+      Elf_Internal_Sym *isym = &isymbuf[i];
+      asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
+      const char *name;
 
-      myh = (struct elf_link_hash_entry *) bh;
-      myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
-      myh->forced_local = 1;
+      if (sec != NULL
+         && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
+       {
+         name = bfd_elf_string_from_elf_section (abfd,
+           hdr->sh_link, isym->st_name);
 
-      /* The elf32_arm_init_maps function only cares about symbols from input
-        BFDs.  We must make a note of this generated mapping symbol
-        ourselves so that code byteswapping works properly in
-        elf32_arm_write_section.  */
-      elf32_arm_section_map_add (s, 'a', 0);
+         if (bfd_is_arm_special_symbol_name (name,
+                                             BFD_ARM_SPECIAL_SYM_TYPE_MAP))
+           elf32_arm_section_map_add (sec, name[1], isym->st_value);
+       }
     }
-
-  s->size += VFP11_ERRATUM_VENEER_SIZE;
-  hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
-  hash_table->num_vfp11_fixes++;
-
-  /* The offset of the veneer.  */
-  return val;
 }
 
-#define ARM_GLUE_SECTION_FLAGS \
-  (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
-   | SEC_READONLY | SEC_LINKER_CREATED)
 
-/* Create a fake section for use by the ARM backend of the linker.  */
+/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
+   say what they wanted.  */
 
-static bfd_boolean
-arm_make_glue_section (bfd * abfd, const char * name)
+void
+bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
 {
-  asection * sec;
-
-  sec = bfd_get_linker_section (abfd, name);
-  if (sec != NULL)
-    /* Already made.  */
-    return TRUE;
-
-  sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
-
-  if (sec == NULL
-      || !bfd_set_section_alignment (abfd, sec, 2))
-    return FALSE;
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
+  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
 
-  /* Set the gc mark to prevent the section from being removed by garbage
-     collection, despite the fact that no relocs refer to this section.  */
-  sec->gc_mark = 1;
+  if (globals == NULL)
+    return;
 
-  return TRUE;
+  if (globals->fix_cortex_a8 == -1)
+    {
+      /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
+      if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
+         && (out_attr[Tag_CPU_arch_profile].i == 'A'
+             || out_attr[Tag_CPU_arch_profile].i == 0))
+       globals->fix_cortex_a8 = 1;
+      else
+       globals->fix_cortex_a8 = 0;
+    }
 }
 
-/* Add the glue sections to ABFD.  This function is called from the
-   linker scripts in ld/emultempl/{armelf}.em.  */
 
-bfd_boolean
-bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
-                                       struct bfd_link_info *info)
+void
+bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
 {
-  /* If we are only performing a partial
-     link do not bother adding the glue.  */
-  if (info->relocatable)
-    return TRUE;
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
+  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
 
-  return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
-    && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
-    && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
-    && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
-}
+  if (globals == NULL)
+    return;
+  /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
+  if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
+    {
+      switch (globals->vfp11_fix)
+       {
+       case BFD_ARM_VFP11_FIX_DEFAULT:
+       case BFD_ARM_VFP11_FIX_NONE:
+         globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
+         break;
 
-/* Select a BFD to be used to hold the sections used by the glue code.
-   This function is called from the linker scripts in ld/emultempl/
-   {armelf/pe}.em.  */
+       default:
+         /* Give a warning, but do as the user requests anyway.  */
+         (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
+           "workaround is not necessary for target architecture"), obfd);
+       }
+    }
+  else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
+    /* For earlier architectures, we might need the workaround, but do not
+       enable it by default.  If users is running with broken hardware, they
+       must enable the erratum fix explicitly.  */
+    globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
+}
 
-bfd_boolean
-bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
+void
+bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
 {
-  struct elf32_arm_link_hash_table *globals;
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
+  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
 
-  /* If we are only performing a partial link
-     do not bother getting a bfd to hold the glue.  */
-  if (info->relocatable)
-    return TRUE;
+  if (globals == NULL)
+    return;
 
-  /* Make sure we don't attach the glue sections to a dynamic object.  */
-  BFD_ASSERT (!(abfd->flags & DYNAMIC));
+  /* We assume only Cortex-M4 may require the fix.  */
+  if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
+      || out_attr[Tag_CPU_arch_profile].i != 'M')
+    {
+      if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
+       /* Give a warning, but do as the user requests anyway.  */
+       (*_bfd_error_handler)
+         (_("%B: warning: selected STM32L4XX erratum "
+            "workaround is not necessary for target architecture"), obfd);
+    }
+}
 
-  globals = elf32_arm_hash_table (info);
-  BFD_ASSERT (globals != NULL);
+enum bfd_arm_vfp11_pipe
+{
+  VFP11_FMAC,
+  VFP11_LS,
+  VFP11_DS,
+  VFP11_BAD
+};
 
-  if (globals->bfd_of_glue_owner != NULL)
-    return TRUE;
+/* Return a VFP register number.  This is encoded as RX:X for single-precision
+   registers, or X:RX for double-precision registers, where RX is the group of
+   four bits in the instruction encoding and X is the single extension bit.
+   RX and X fields are specified using their lowest (starting) bit.  The return
+   value is:
 
-  /* Save the bfd for later use.  */
-  globals->bfd_of_glue_owner = abfd;
+     0...31: single-precision registers s0...s31
+     32...63: double-precision registers d0...d31.
 
-  return TRUE;
+   Although X should be zero for VFP11 (encoding d0...d15 only), we might
+   encounter VFP3 instructions, so we allow the full range for DP registers.  */
+
+static unsigned int
+bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
+                    unsigned int x)
+{
+  if (is_double)
+    return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
+  else
+    return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
 }
 
+/* Set bits in *WMASK according to a register number REG as encoded by
+   bfd_arm_vfp11_regno().  Ignore d16-d31.  */
+
 static void
-check_use_blx (struct elf32_arm_link_hash_table *globals)
+bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
 {
-  int cpu_arch;
+  if (reg < 32)
+    *wmask |= 1 << reg;
+  else if (reg < 48)
+    *wmask |= 3 << ((reg - 32) * 2);
+}
 
-  cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
-                                      Tag_CPU_arch);
+/* Return TRUE if WMASK overwrites anything in REGS.  */
 
-  if (globals->fix_arm1176)
-    {
-      if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
-       globals->use_blx = 1;
-    }
-  else
+static bfd_boolean
+bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
+{
+  int i;
+
+  for (i = 0; i < numregs; i++)
     {
-      if (cpu_arch > TAG_CPU_ARCH_V4T)
-       globals->use_blx = 1;
+      unsigned int reg = regs[i];
+
+      if (reg < 32 && (wmask & (1 << reg)) != 0)
+       return TRUE;
+
+      reg -= 32;
+
+      if (reg >= 16)
+       continue;
+
+      if ((wmask & (3 << (reg * 2))) != 0)
+       return TRUE;
     }
-}
 
-bfd_boolean
-bfd_elf32_arm_process_before_allocation (bfd *abfd,
-                                        struct bfd_link_info *link_info)
-{
-  Elf_Internal_Shdr *symtab_hdr;
-  Elf_Internal_Rela *internal_relocs = NULL;
-  Elf_Internal_Rela *irel, *irelend;
-  bfd_byte *contents = NULL;
+  return FALSE;
+}
 
-  asection *sec;
-  struct elf32_arm_link_hash_table *globals;
+/* In this function, we're interested in two things: finding input registers
+   for VFP data-processing instructions, and finding the set of registers which
+   arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
+   hold the written set, so FLDM etc. are easy to deal with (we're only
+   interested in 32 SP registers or 16 dp registers, due to the VFP version
+   implemented by the chip in question).  DP registers are marked by setting
+   both SP registers in the write mask).  */
 
-  /* If we are only performing a partial link do not bother
-     to construct any glue.  */
-  if (link_info->relocatable)
-    return TRUE;
+static enum bfd_arm_vfp11_pipe
+bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
+                          int *numregs)
+{
+  enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
+  bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
 
-  /* Here we have a bfd that is to be included on the link.  We have a
-     hook to do reloc rummaging, before section sizes are nailed down.  */
-  globals = elf32_arm_hash_table (link_info);
-  BFD_ASSERT (globals != NULL);
+  if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
+    {
+      unsigned int pqrs;
+      unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
+      unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
 
-  check_use_blx (globals);
+      pqrs = ((insn & 0x00800000) >> 20)
+          | ((insn & 0x00300000) >> 19)
+          | ((insn & 0x00000040) >> 6);
 
-  if (globals->byteswap_code && !bfd_big_endian (abfd))
-    {
-      _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
-                         abfd);
-      return FALSE;
-    }
+      switch (pqrs)
+       {
+       case 0: /* fmac[sd].  */
+       case 1: /* fnmac[sd].  */
+       case 2: /* fmsc[sd].  */
+       case 3: /* fnmsc[sd].  */
+         vpipe = VFP11_FMAC;
+         bfd_arm_vfp11_write_mask (destmask, fd);
+         regs[0] = fd;
+         regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
+         regs[2] = fm;
+         *numregs = 3;
+         break;
 
-  /* PR 5398: If we have not decided to include any loadable sections in
-     the output then we will not have a glue owner bfd.  This is OK, it
-     just means that there is nothing else for us to do here.  */
-  if (globals->bfd_of_glue_owner == NULL)
-    return TRUE;
+       case 4: /* fmul[sd].  */
+       case 5: /* fnmul[sd].  */
+       case 6: /* fadd[sd].  */
+       case 7: /* fsub[sd].  */
+         vpipe = VFP11_FMAC;
+         goto vfp_binop;
 
-  /* Rummage around all the relocs and map the glue vectors.  */
-  sec = abfd->sections;
+       case 8: /* fdiv[sd].  */
+         vpipe = VFP11_DS;
+         vfp_binop:
+         bfd_arm_vfp11_write_mask (destmask, fd);
+         regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
+         regs[1] = fm;
+         *numregs = 2;
+         break;
 
-  if (sec == NULL)
-    return TRUE;
+       case 15: /* extended opcode.  */
+         {
+           unsigned int extn = ((insn >> 15) & 0x1e)
+                             | ((insn >> 7) & 1);
 
-  for (; sec != NULL; sec = sec->next)
-    {
-      if (sec->reloc_count == 0)
-       continue;
+           switch (extn)
+             {
+             case 0: /* fcpy[sd].  */
+             case 1: /* fabs[sd].  */
+             case 2: /* fneg[sd].  */
+             case 8: /* fcmp[sd].  */
+             case 9: /* fcmpe[sd].  */
+             case 10: /* fcmpz[sd].  */
+             case 11: /* fcmpez[sd].  */
+             case 16: /* fuito[sd].  */
+             case 17: /* fsito[sd].  */
+             case 24: /* ftoui[sd].  */
+             case 25: /* ftouiz[sd].  */
+             case 26: /* ftosi[sd].  */
+             case 27: /* ftosiz[sd].  */
+               /* These instructions will not bounce due to underflow.  */
+               *numregs = 0;
+               vpipe = VFP11_FMAC;
+               break;
 
-      if ((sec->flags & SEC_EXCLUDE) != 0)
-       continue;
+             case 3: /* fsqrt[sd].  */
+               /* fsqrt cannot underflow, but it can (perhaps) overwrite
+                  registers to cause the erratum in previous instructions.  */
+               bfd_arm_vfp11_write_mask (destmask, fd);
+               vpipe = VFP11_DS;
+               break;
 
-      symtab_hdr = & elf_symtab_hdr (abfd);
+             case 15: /* fcvt{ds,sd}.  */
+               {
+                 int rnum = 0;
 
-      /* Load the relocs.  */
-      internal_relocs
-       = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
+                 bfd_arm_vfp11_write_mask (destmask, fd);
 
-      if (internal_relocs == NULL)
-       goto error_return;
+                 /* Only FCVTSD can underflow.  */
+                 if ((insn & 0x100) != 0)
+                   regs[rnum++] = fm;
 
-      irelend = internal_relocs + sec->reloc_count;
-      for (irel = internal_relocs; irel < irelend; irel++)
-       {
-         long r_type;
-         unsigned long r_index;
+                 *numregs = rnum;
 
-         struct elf_link_hash_entry *h;
+                 vpipe = VFP11_FMAC;
+               }
+               break;
 
-         r_type = ELF32_R_TYPE (irel->r_info);
-         r_index = ELF32_R_SYM (irel->r_info);
+             default:
+               return VFP11_BAD;
+             }
+         }
+         break;
 
-         /* These are the only relocation types we care about.  */
-         if (   r_type != R_ARM_PC24
-             && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
-           continue;
+       default:
+         return VFP11_BAD;
+       }
+    }
+  /* Two-register transfer.  */
+  else if ((insn & 0x0fe00ed0) == 0x0c400a10)
+    {
+      unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
 
-         /* Get the section contents if we haven't done so already.  */
-         if (contents == NULL)
+      if ((insn & 0x100000) == 0)
+       {
+         if (is_double)
+           bfd_arm_vfp11_write_mask (destmask, fm);
+         else
            {
-             /* Get cached copy if it exists.  */
-             if (elf_section_data (sec)->this_hdr.contents != NULL)
-               contents = elf_section_data (sec)->this_hdr.contents;
-             else
-               {
-                 /* Go get them off disk.  */
-                 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
-                   goto error_return;
-               }
+             bfd_arm_vfp11_write_mask (destmask, fm);
+             bfd_arm_vfp11_write_mask (destmask, fm + 1);
            }
+       }
 
-         if (r_type == R_ARM_V4BX)
-           {
-             int reg;
+      vpipe = VFP11_LS;
+    }
+  else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
+    {
+      int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
+      unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
 
-             reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
-             record_arm_bx_glue (link_info, reg);
-             continue;
-           }
+      switch (puw)
+       {
+       case 0: /* Two-reg transfer.  We should catch these above.  */
+         abort ();
 
-         /* If the relocation is not against a symbol it cannot concern us.  */
-         h = NULL;
+       case 2: /* fldm[sdx].  */
+       case 3:
+       case 5:
+         {
+           unsigned int i, offset = insn & 0xff;
 
-         /* We don't care about local symbols.  */
-         if (r_index < symtab_hdr->sh_info)
-           continue;
+           if (is_double)
+             offset >>= 1;
 
-         /* This is an external symbol.  */
-         r_index -= symtab_hdr->sh_info;
-         h = (struct elf_link_hash_entry *)
-           elf_sym_hashes (abfd)[r_index];
+           for (i = fd; i < fd + offset; i++)
+             bfd_arm_vfp11_write_mask (destmask, i);
+         }
+         break;
 
-         /* If the relocation is against a static symbol it must be within
-            the current section and so cannot be a cross ARM/Thumb relocation.  */
-         if (h == NULL)
-           continue;
+       case 4: /* fld[sd].  */
+       case 6:
+         bfd_arm_vfp11_write_mask (destmask, fd);
+         break;
 
-         /* If the call will go through a PLT entry then we do not need
-            glue.  */
-         if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
-           continue;
+       default:
+         return VFP11_BAD;
+       }
 
-         switch (r_type)
-           {
-           case R_ARM_PC24:
-             /* This one is a call from arm code.  We need to look up
-                the target of the call.  If it is a thumb target, we
-                insert glue.  */
-             if (h->target_internal == ST_BRANCH_TO_THUMB)
-               record_arm_to_thumb_glue (link_info, h);
-             break;
+      vpipe = VFP11_LS;
+    }
+  /* Single-register transfer. Note L==0.  */
+  else if ((insn & 0x0f100e10) == 0x0e000a10)
+    {
+      unsigned int opcode = (insn >> 21) & 7;
+      unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
+
+      switch (opcode)
+       {
+       case 0: /* fmsr/fmdlr.  */
+       case 1: /* fmdhr.  */
+         /* Mark fmdhr and fmdlr as writing to the whole of the DP
+            destination register.  I don't know if this is exactly right,
+            but it is the conservative choice.  */
+         bfd_arm_vfp11_write_mask (destmask, fn);
+         break;
 
-           default:
-             abort ();
-           }
+       case 7: /* fmxr.  */
+         break;
        }
 
-      if (contents != NULL
-         && elf_section_data (sec)->this_hdr.contents != contents)
-       free (contents);
-      contents = NULL;
-
-      if (internal_relocs != NULL
-         && elf_section_data (sec)->relocs != internal_relocs)
-       free (internal_relocs);
-      internal_relocs = NULL;
+      vpipe = VFP11_LS;
     }
 
-  return TRUE;
+  return vpipe;
+}
 
-error_return:
-  if (contents != NULL
-      && elf_section_data (sec)->this_hdr.contents != contents)
-    free (contents);
-  if (internal_relocs != NULL
-      && elf_section_data (sec)->relocs != internal_relocs)
-    free (internal_relocs);
 
-  return FALSE;
-}
-#endif
+static int elf32_arm_compare_mapping (const void * a, const void * b);
 
 
-/* Initialise maps of ARM/Thumb/data for input BFDs.  */
+/* Look for potentially-troublesome code sequences which might trigger the
+   VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
+   (available from ARM) for details of the erratum.  A short version is
+   described in ld.texinfo.  */
 
-void
-bfd_elf32_arm_init_maps (bfd *abfd)
+bfd_boolean
+bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
 {
-  Elf_Internal_Sym *isymbuf;
-  Elf_Internal_Shdr *hdr;
-  unsigned int i, localsyms;
+  asection *sec;
+  bfd_byte *contents = NULL;
+  int state = 0;
+  int regs[3], numregs = 0;
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
+  int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
 
-  /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
-  if (! is_arm_elf (abfd))
-    return;
+  if (globals == NULL)
+    return FALSE;
 
-  if ((abfd->flags & DYNAMIC) != 0)
-    return;
+  /* We use a simple FSM to match troublesome VFP11 instruction sequences.
+     The states transition as follows:
 
-  hdr = & elf_symtab_hdr (abfd);
-  localsyms = hdr->sh_info;
+       0 -> 1 (vector) or 0 -> 2 (scalar)
+          A VFP FMAC-pipeline instruction has been seen. Fill
+          regs[0]..regs[numregs-1] with its input operands. Remember this
+          instruction in 'first_fmac'.
 
-  /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
-     should contain the number of local symbols, which should come before any
-     global symbols.  Mapping symbols are always local.  */
-  isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
-                                 NULL);
+       1 -> 2
+          Any instruction, except for a VFP instruction which overwrites
+          regs[*].
 
-  /* No internal symbols read?  Skip this BFD.  */
-  if (isymbuf == NULL)
-    return;
+       1 -> 3 [ -> 0 ]  or
+       2 -> 3 [ -> 0 ]
+          A VFP instruction has been seen which overwrites any of regs[*].
+          We must make a veneer!  Reset state to 0 before examining next
+          instruction.
 
-  for (i = 0; i < localsyms; i++)
-    {
-      Elf_Internal_Sym *isym = &isymbuf[i];
-      asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
-      const char *name;
+       2 -> 0
+          If we fail to match anything in state 2, reset to state 0 and reset
+          the instruction pointer to the instruction after 'first_fmac'.
 
-      if (sec != NULL
-         && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
-       {
-         name = bfd_elf_string_from_elf_section (abfd,
-           hdr->sh_link, isym->st_name);
+     If the VFP11 vector mode is in use, there must be at least two unrelated
+     instructions between anti-dependent VFP11 instructions to properly avoid
+     triggering the erratum, hence the use of the extra state 1.  */
 
-         if (bfd_is_arm_special_symbol_name (name,
-                                             BFD_ARM_SPECIAL_SYM_TYPE_MAP))
-           elf32_arm_section_map_add (sec, name[1], isym->st_value);
-       }
-    }
-}
+  /* If we are only performing a partial link do not bother
+     to construct any glue.  */
+  if (bfd_link_relocatable (link_info))
+    return TRUE;
 
+  /* Skip if this bfd does not correspond to an ELF image.  */
+  if (! is_arm_elf (abfd))
+    return TRUE;
 
-/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
-   say what they wanted.  */
+  /* We should have chosen a fix type by the time we get here.  */
+  BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
 
-void
-bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
-{
-  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
-  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
+  if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
+    return TRUE;
 
-  if (globals == NULL)
-    return;
+  /* Skip this BFD if it corresponds to an executable or dynamic object.  */
+  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
+    return TRUE;
 
-  if (globals->fix_cortex_a8 == -1)
+  for (sec = abfd->sections; sec != NULL; sec = sec->next)
     {
-      /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
-      if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
-         && (out_attr[Tag_CPU_arch_profile].i == 'A'
-             || out_attr[Tag_CPU_arch_profile].i == 0))
-       globals->fix_cortex_a8 = 1;
-      else
-       globals->fix_cortex_a8 = 0;
-    }
-}
+      unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
+      struct _arm_elf_section_data *sec_data;
 
+      /* If we don't have executable progbits, we're not interested in this
+        section.  Also skip if section is to be excluded.  */
+      if (elf_section_type (sec) != SHT_PROGBITS
+         || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
+         || (sec->flags & SEC_EXCLUDE) != 0
+         || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
+         || sec->output_section == bfd_abs_section_ptr
+         || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
+       continue;
 
-void
-bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
-{
-  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
-  obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
+      sec_data = elf32_arm_section_data (sec);
 
-  if (globals == NULL)
-    return;
-  /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
-  if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
-    {
-      switch (globals->vfp11_fix)
+      if (sec_data->mapcount == 0)
+       continue;
+
+      if (elf_section_data (sec)->this_hdr.contents != NULL)
+       contents = elf_section_data (sec)->this_hdr.contents;
+      else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
+       goto error_return;
+
+      qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
+            elf32_arm_compare_mapping);
+
+      for (span = 0; span < sec_data->mapcount; span++)
        {
-       case BFD_ARM_VFP11_FIX_DEFAULT:
-       case BFD_ARM_VFP11_FIX_NONE:
-         globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
-         break;
+         unsigned int span_start = sec_data->map[span].vma;
+         unsigned int span_end = (span == sec_data->mapcount - 1)
+                                 ? sec->size : sec_data->map[span + 1].vma;
+         char span_type = sec_data->map[span].type;
 
-       default:
-         /* Give a warning, but do as the user requests anyway.  */
-         (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
-           "workaround is not necessary for target architecture"), obfd);
-       }
-    }
-  else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
-    /* For earlier architectures, we might need the workaround, but do not
-       enable it by default.  If users is running with broken hardware, they
-       must enable the erratum fix explicitly.  */
-    globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
-}
+         /* FIXME: Only ARM mode is supported at present.  We may need to
+            support Thumb-2 mode also at some point.  */
+         if (span_type != 'a')
+           continue;
+
+         for (i = span_start; i < span_end;)
+           {
+             unsigned int next_i = i + 4;
+             unsigned int insn = bfd_big_endian (abfd)
+               ? (contents[i] << 24)
+                 | (contents[i + 1] << 16)
+                 | (contents[i + 2] << 8)
+                 | contents[i + 3]
+               : (contents[i + 3] << 24)
+                 | (contents[i + 2] << 16)
+                 | (contents[i + 1] << 8)
+                 | contents[i];
+             unsigned int writemask = 0;
+             enum bfd_arm_vfp11_pipe vpipe;
+
+             switch (state)
+               {
+               case 0:
+                 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
+                                                   &numregs);
+                 /* I'm assuming the VFP11 erratum can trigger with denorm
+                    operands on either the FMAC or the DS pipeline. This might
+                    lead to slightly overenthusiastic veneer insertion.  */
+                 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
+                   {
+                     state = use_vector ? 1 : 2;
+                     first_fmac = i;
+                     veneer_of_insn = insn;
+                   }
+                 break;
+
+               case 1:
+                 {
+                   int other_regs[3], other_numregs;
+                   vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
+                                                     other_regs,
+                                                     &other_numregs);
+                   if (vpipe != VFP11_BAD
+                       && bfd_arm_vfp11_antidependency (writemask, regs,
+                                                        numregs))
+                     state = 3;
+                   else
+                     state = 2;
+                 }
+                 break;
 
+               case 2:
+                 {
+                   int other_regs[3], other_numregs;
+                   vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
+                                                     other_regs,
+                                                     &other_numregs);
+                   if (vpipe != VFP11_BAD
+                       && bfd_arm_vfp11_antidependency (writemask, regs,
+                                                        numregs))
+                     state = 3;
+                   else
+                     {
+                       state = 0;
+                       next_i = first_fmac + 4;
+                     }
+                 }
+                 break;
 
-enum bfd_arm_vfp11_pipe
-{
-  VFP11_FMAC,
-  VFP11_LS,
-  VFP11_DS,
-  VFP11_BAD
-};
+               case 3:
+                 abort ();  /* Should be unreachable.  */
+               }
 
-/* Return a VFP register number.  This is encoded as RX:X for single-precision
-   registers, or X:RX for double-precision registers, where RX is the group of
-   four bits in the instruction encoding and X is the single extension bit.
-   RX and X fields are specified using their lowest (starting) bit.  The return
-   value is:
+             if (state == 3)
+               {
+                 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
+                     bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
 
-     0...31: single-precision registers s0...s31
-     32...63: double-precision registers d0...d31.
+                 elf32_arm_section_data (sec)->erratumcount += 1;
 
-   Although X should be zero for VFP11 (encoding d0...d15 only), we might
-   encounter VFP3 instructions, so we allow the full range for DP registers.  */
+                 newerr->u.b.vfp_insn = veneer_of_insn;
 
-static unsigned int
-bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
-                    unsigned int x)
-{
-  if (is_double)
-    return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
-  else
-    return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
-}
+                 switch (span_type)
+                   {
+                   case 'a':
+                     newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
+                     break;
 
-/* Set bits in *WMASK according to a register number REG as encoded by
-   bfd_arm_vfp11_regno().  Ignore d16-d31.  */
+                   default:
+                     abort ();
+                   }
 
-static void
-bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
-{
-  if (reg < 32)
-    *wmask |= 1 << reg;
-  else if (reg < 48)
-    *wmask |= 3 << ((reg - 32) * 2);
-}
+                 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
+                                              first_fmac);
 
-/* Return TRUE if WMASK overwrites anything in REGS.  */
+                 newerr->vma = -1;
 
-static bfd_boolean
-bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
-{
-  int i;
+                 newerr->next = sec_data->erratumlist;
+                 sec_data->erratumlist = newerr;
 
-  for (i = 0; i < numregs; i++)
-    {
-      unsigned int reg = regs[i];
+                 state = 0;
+               }
 
-      if (reg < 32 && (wmask & (1 << reg)) != 0)
-       return TRUE;
+             i = next_i;
+           }
+       }
 
-      reg -= 32;
+      if (contents != NULL
+         && elf_section_data (sec)->this_hdr.contents != contents)
+       free (contents);
+      contents = NULL;
+    }
 
-      if (reg >= 16)
-       continue;
+  return TRUE;
 
-      if ((wmask & (3 << (reg * 2))) != 0)
-       return TRUE;
-    }
+error_return:
+  if (contents != NULL
+      && elf_section_data (sec)->this_hdr.contents != contents)
+    free (contents);
 
   return FALSE;
 }
 
-/* In this function, we're interested in two things: finding input registers
-   for VFP data-processing instructions, and finding the set of registers which
-   arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
-   hold the written set, so FLDM etc. are easy to deal with (we're only
-   interested in 32 SP registers or 16 dp registers, due to the VFP version
-   implemented by the chip in question).  DP registers are marked by setting
-   both SP registers in the write mask).  */
+/* Find virtual-memory addresses for VFP11 erratum veneers and return locations
+   after sections have been laid out, using specially-named symbols.  */
 
-static enum bfd_arm_vfp11_pipe
-bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
-                          int *numregs)
+void
+bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
+                                         struct bfd_link_info *link_info)
 {
-  enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
-  bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
+  asection *sec;
+  struct elf32_arm_link_hash_table *globals;
+  char *tmp_name;
 
-  if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
-    {
-      unsigned int pqrs;
-      unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
-      unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
+  if (bfd_link_relocatable (link_info))
+    return;
 
-      pqrs = ((insn & 0x00800000) >> 20)
-          | ((insn & 0x00300000) >> 19)
-          | ((insn & 0x00000040) >> 6);
+  /* Skip if this bfd does not correspond to an ELF image.  */
+  if (! is_arm_elf (abfd))
+    return;
 
-      switch (pqrs)
+  globals = elf32_arm_hash_table (link_info);
+  if (globals == NULL)
+    return;
+
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
+                                 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
+
+  for (sec = abfd->sections; sec != NULL; sec = sec->next)
+    {
+      struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
+      elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
+
+      for (; errnode != NULL; errnode = errnode->next)
        {
-       case 0: /* fmac[sd].  */
-       case 1: /* fnmac[sd].  */
-       case 2: /* fmsc[sd].  */
-       case 3: /* fnmsc[sd].  */
-         vpipe = VFP11_FMAC;
-         bfd_arm_vfp11_write_mask (destmask, fd);
-         regs[0] = fd;
-         regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
-         regs[2] = fm;
-         *numregs = 3;
-         break;
+         struct elf_link_hash_entry *myh;
+         bfd_vma vma;
 
-       case 4: /* fmul[sd].  */
-       case 5: /* fnmul[sd].  */
-       case 6: /* fadd[sd].  */
-       case 7: /* fsub[sd].  */
-         vpipe = VFP11_FMAC;
-         goto vfp_binop;
+         switch (errnode->type)
+           {
+           case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
+           case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
+             /* Find veneer symbol.  */
+             sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
+                      errnode->u.b.veneer->u.v.id);
 
-       case 8: /* fdiv[sd].  */
-         vpipe = VFP11_DS;
-         vfp_binop:
-         bfd_arm_vfp11_write_mask (destmask, fd);
-         regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
-         regs[1] = fm;
-         *numregs = 2;
-         break;
+             myh = elf_link_hash_lookup
+               (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
 
-       case 15: /* extended opcode.  */
-         {
-           unsigned int extn = ((insn >> 15) & 0x1e)
-                             | ((insn >> 7) & 1);
+             if (myh == NULL)
+               (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
+                                        "`%s'"), abfd, tmp_name);
 
-           switch (extn)
-             {
-             case 0: /* fcpy[sd].  */
-             case 1: /* fabs[sd].  */
-             case 2: /* fneg[sd].  */
-             case 8: /* fcmp[sd].  */
-             case 9: /* fcmpe[sd].  */
-             case 10: /* fcmpz[sd].  */
-             case 11: /* fcmpez[sd].  */
-             case 16: /* fuito[sd].  */
-             case 17: /* fsito[sd].  */
-             case 24: /* ftoui[sd].  */
-             case 25: /* ftouiz[sd].  */
-             case 26: /* ftosi[sd].  */
-             case 27: /* ftosiz[sd].  */
-               /* These instructions will not bounce due to underflow.  */
-               *numregs = 0;
-               vpipe = VFP11_FMAC;
-               break;
+             vma = myh->root.u.def.section->output_section->vma
+                   + myh->root.u.def.section->output_offset
+                   + myh->root.u.def.value;
 
-             case 3: /* fsqrt[sd].  */
-               /* fsqrt cannot underflow, but it can (perhaps) overwrite
-                  registers to cause the erratum in previous instructions.  */
-               bfd_arm_vfp11_write_mask (destmask, fd);
-               vpipe = VFP11_DS;
-               break;
+             errnode->u.b.veneer->vma = vma;
+             break;
 
-             case 15: /* fcvt{ds,sd}.  */
-               {
-                 int rnum = 0;
+           case VFP11_ERRATUM_ARM_VENEER:
+           case VFP11_ERRATUM_THUMB_VENEER:
+             /* Find return location.  */
+             sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
+                      errnode->u.v.id);
 
-                 bfd_arm_vfp11_write_mask (destmask, fd);
+             myh = elf_link_hash_lookup
+               (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
 
-                 /* Only FCVTSD can underflow.  */
-                 if ((insn & 0x100) != 0)
-                   regs[rnum++] = fm;
+             if (myh == NULL)
+               (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
+                                        "`%s'"), abfd, tmp_name);
 
-                 *numregs = rnum;
+             vma = myh->root.u.def.section->output_section->vma
+                   + myh->root.u.def.section->output_offset
+                   + myh->root.u.def.value;
 
-                 vpipe = VFP11_FMAC;
-               }
-               break;
+             errnode->u.v.branch->vma = vma;
+             break;
+
+           default:
+             abort ();
+           }
+       }
+    }
+
+  free (tmp_name);
+}
+
+/* Find virtual-memory addresses for STM32L4XX erratum veneers and
+   return locations after sections have been laid out, using
+   specially-named symbols.  */
+
+void
+bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
+                                             struct bfd_link_info *link_info)
+{
+  asection *sec;
+  struct elf32_arm_link_hash_table *globals;
+  char *tmp_name;
+
+  if (bfd_link_relocatable (link_info))
+    return;
+
+  /* Skip if this bfd does not correspond to an ELF image.  */
+  if (! is_arm_elf (abfd))
+    return;
 
-             default:
-               return VFP11_BAD;
-             }
-         }
-         break;
+  globals = elf32_arm_hash_table (link_info);
+  if (globals == NULL)
+    return;
 
-       default:
-         return VFP11_BAD;
-       }
-    }
-  /* Two-register transfer.  */
-  else if ((insn & 0x0fe00ed0) == 0x0c400a10)
+  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
+                                 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
+
+  for (sec = abfd->sections; sec != NULL; sec = sec->next)
     {
-      unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
+      struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
+      elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
 
-      if ((insn & 0x100000) == 0)
+      for (; errnode != NULL; errnode = errnode->next)
        {
-         if (is_double)
-           bfd_arm_vfp11_write_mask (destmask, fm);
-         else
+         struct elf_link_hash_entry *myh;
+         bfd_vma vma;
+
+         switch (errnode->type)
            {
-             bfd_arm_vfp11_write_mask (destmask, fm);
-             bfd_arm_vfp11_write_mask (destmask, fm + 1);
-           }
-       }
+           case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
+             /* Find veneer symbol.  */
+             sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
+                      errnode->u.b.veneer->u.v.id);
 
-      vpipe = VFP11_LS;
-    }
-  else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
-    {
-      int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
-      unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
+             myh = elf_link_hash_lookup
+               (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
 
-      switch (puw)
-       {
-       case 0: /* Two-reg transfer.  We should catch these above.  */
-         abort ();
+             if (myh == NULL)
+               (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
+                                        "`%s'"), abfd, tmp_name);
 
-       case 2: /* fldm[sdx].  */
-       case 3:
-       case 5:
-         {
-           unsigned int i, offset = insn & 0xff;
+             vma = myh->root.u.def.section->output_section->vma
+               + myh->root.u.def.section->output_offset
+               + myh->root.u.def.value;
 
-           if (is_double)
-             offset >>= 1;
+             errnode->u.b.veneer->vma = vma;
+             break;
 
-           for (i = fd; i < fd + offset; i++)
-             bfd_arm_vfp11_write_mask (destmask, i);
-         }
-         break;
+           case STM32L4XX_ERRATUM_VENEER:
+             /* Find return location.  */
+             sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
+                      errnode->u.v.id);
 
-       case 4: /* fld[sd].  */
-       case 6:
-         bfd_arm_vfp11_write_mask (destmask, fd);
-         break;
+             myh = elf_link_hash_lookup
+               (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
 
-       default:
-         return VFP11_BAD;
-       }
+             if (myh == NULL)
+               (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
+                                        "`%s'"), abfd, tmp_name);
 
-      vpipe = VFP11_LS;
-    }
-  /* Single-register transfer. Note L==0.  */
-  else if ((insn & 0x0f100e10) == 0x0e000a10)
-    {
-      unsigned int opcode = (insn >> 21) & 7;
-      unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
+             vma = myh->root.u.def.section->output_section->vma
+               + myh->root.u.def.section->output_offset
+               + myh->root.u.def.value;
 
-      switch (opcode)
-       {
-       case 0: /* fmsr/fmdlr.  */
-       case 1: /* fmdhr.  */
-         /* Mark fmdhr and fmdlr as writing to the whole of the DP
-            destination register.  I don't know if this is exactly right,
-            but it is the conservative choice.  */
-         bfd_arm_vfp11_write_mask (destmask, fn);
-         break;
+             errnode->u.v.branch->vma = vma;
+             break;
 
-       case 7: /* fmxr.  */
-         break;
+           default:
+             abort ();
+           }
        }
-
-      vpipe = VFP11_LS;
     }
 
-  return vpipe;
+  free (tmp_name);
 }
 
+static inline bfd_boolean
+is_thumb2_ldmia (const insn32 insn)
+{
+  /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
+     1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
+  return (insn & 0xffd02000) == 0xe8900000;
+}
 
-static int elf32_arm_compare_mapping (const void * a, const void * b);
+static inline bfd_boolean
+is_thumb2_ldmdb (const insn32 insn)
+{
+  /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
+     1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
+  return (insn & 0xffd02000) == 0xe9100000;
+}
 
+static inline bfd_boolean
+is_thumb2_vldm (const insn32 insn)
+{
+  /* A6.5 Extension register load or store instruction
+     A7.7.229
+     We look for SP 32-bit and DP 64-bit registers.
+     Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
+     <list> is consecutive 64-bit registers
+     1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
+     Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
+     <list> is consecutive 32-bit registers
+     1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
+     if P==0 && U==1 && W==1 && Rn=1101 VPOP
+     if PUW=010 || PUW=011 || PUW=101 VLDM.  */
+  return
+    (((insn & 0xfe100f00) == 0xec100b00) ||
+     ((insn & 0xfe100f00) == 0xec100a00))
+    && /* (IA without !).  */
+    (((((insn << 7) >> 28) & 0xd) == 0x4)
+     /* (IA with !), includes VPOP (when reg number is SP).  */
+     || ((((insn << 7) >> 28) & 0xd) == 0x5)
+     /* (DB with !).  */
+     || ((((insn << 7) >> 28) & 0xd) == 0x9));
+}
 
-/* Look for potentially-troublesome code sequences which might trigger the
-   VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
-   (available from ARM) for details of the erratum.  A short version is
-   described in ld.texinfo.  */
+/* STM STM32L4XX erratum : This function assumes that it receives an LDM or
+   VLDM opcode and:
+ - computes the number and the mode of memory accesses
+ - decides if the replacement should be done:
+   . replaces only if > 8-word accesses
+   . or (testing purposes only) replaces all accesses.  */
+
+static bfd_boolean
+stm32l4xx_need_create_replacing_stub (const insn32 insn,
+                                     bfd_arm_stm32l4xx_fix stm32l4xx_fix)
+{
+  int nb_words = 0;
+
+  /* The field encoding the register list is the same for both LDMIA
+     and LDMDB encodings.  */
+  if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
+    nb_words = popcount (insn & 0x0000ffff);
+  else if (is_thumb2_vldm (insn))
+   nb_words = (insn & 0xff);
+
+  /* DEFAULT mode accounts for the real bug condition situation,
+     ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
+  return
+    (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
+    (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
+}
+
+/* Look for potentially-troublesome code sequences which might trigger
+   the STM STM32L4XX erratum.  */
 
 bfd_boolean
-bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
+bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
+                                     struct bfd_link_info *link_info)
 {
   asection *sec;
   bfd_byte *contents = NULL;
-  int state = 0;
-  int regs[3], numregs = 0;
   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
-  int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
 
   if (globals == NULL)
     return FALSE;
 
-  /* We use a simple FSM to match troublesome VFP11 instruction sequences.
-     The states transition as follows:
-
-       0 -> 1 (vector) or 0 -> 2 (scalar)
-          A VFP FMAC-pipeline instruction has been seen. Fill
-          regs[0]..regs[numregs-1] with its input operands. Remember this
-          instruction in 'first_fmac'.
-
-       1 -> 2
-          Any instruction, except for a VFP instruction which overwrites
-          regs[*].
-
-       1 -> 3 [ -> 0 ]  or
-       2 -> 3 [ -> 0 ]
-          A VFP instruction has been seen which overwrites any of regs[*].
-          We must make a veneer!  Reset state to 0 before examining next
-          instruction.
-
-       2 -> 0
-          If we fail to match anything in state 2, reset to state 0 and reset
-          the instruction pointer to the instruction after 'first_fmac'.
-
-     If the VFP11 vector mode is in use, there must be at least two unrelated
-     instructions between anti-dependent VFP11 instructions to properly avoid
-     triggering the erratum, hence the use of the extra state 1.  */
-
   /* If we are only performing a partial link do not bother
      to construct any glue.  */
-  if (link_info->relocatable)
+  if (bfd_link_relocatable (link_info))
     return TRUE;
 
   /* Skip if this bfd does not correspond to an ELF image.  */
   if (! is_arm_elf (abfd))
     return TRUE;
 
-  /* We should have chosen a fix type by the time we get here.  */
-  BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
-
-  if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
+  if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
     return TRUE;
 
   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
@@ -6691,7 +8506,7 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
 
   for (sec = abfd->sections; sec != NULL; sec = sec->next)
     {
-      unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
+      unsigned int i, span;
       struct _arm_elf_section_data *sec_data;
 
       /* If we don't have executable progbits, we're not interested in this
@@ -6701,7 +8516,7 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
          || (sec->flags & SEC_EXCLUDE) != 0
          || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
          || sec->output_section == bfd_abs_section_ptr
-         || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
+         || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
        continue;
 
       sec_data = elf32_arm_section_data (sec);
@@ -6721,113 +8536,119 @@ bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
        {
          unsigned int span_start = sec_data->map[span].vma;
          unsigned int span_end = (span == sec_data->mapcount - 1)
-                                 ? sec->size : sec_data->map[span + 1].vma;
+           ? sec->size : sec_data->map[span + 1].vma;
          char span_type = sec_data->map[span].type;
+         int itblock_current_pos = 0;
 
-         /* FIXME: Only ARM mode is supported at present.  We may need to
-            support Thumb-2 mode also at some point.  */
-         if (span_type != 'a')
+         /* Only Thumb2 mode need be supported with this CM4 specific
+            code, we should not encounter any arm mode eg span_type
+            != 'a'.  */
+         if (span_type != 't')
            continue;
 
          for (i = span_start; i < span_end;)
            {
-             unsigned int next_i = i + 4;
-             unsigned int insn = bfd_big_endian (abfd)
-               ? (contents[i] << 24)
-                 | (contents[i + 1] << 16)
-                 | (contents[i + 2] << 8)
-                 | contents[i + 3]
-               : (contents[i + 3] << 24)
-                 | (contents[i + 2] << 16)
-                 | (contents[i + 1] << 8)
-                 | contents[i];
-             unsigned int writemask = 0;
-             enum bfd_arm_vfp11_pipe vpipe;
-
-             switch (state)
-               {
-               case 0:
-                 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
-                                                   &numregs);
-                 /* I'm assuming the VFP11 erratum can trigger with denorm
-                    operands on either the FMAC or the DS pipeline. This might
-                    lead to slightly overenthusiastic veneer insertion.  */
-                 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
-                   {
-                     state = use_vector ? 1 : 2;
-                     first_fmac = i;
-                     veneer_of_insn = insn;
-                   }
-                 break;
+             unsigned int insn = bfd_get_16 (abfd, &contents[i]);
+             bfd_boolean insn_32bit = FALSE;
+             bfd_boolean is_ldm = FALSE;
+             bfd_boolean is_vldm = FALSE;
+             bfd_boolean is_not_last_in_it_block = FALSE;
+
+             /* The first 16-bits of all 32-bit thumb2 instructions start
+                with opcode[15..13]=0b111 and the encoded op1 can be anything
+                except opcode[12..11]!=0b00.
+                See 32-bit Thumb instruction encoding.  */
+             if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
+               insn_32bit = TRUE;
 
-               case 1:
-                 {
-                   int other_regs[3], other_numregs;
-                   vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
-                                                     other_regs,
-                                                     &other_numregs);
-                   if (vpipe != VFP11_BAD
-                       && bfd_arm_vfp11_antidependency (writemask, regs,
-                                                        numregs))
-                     state = 3;
-                   else
-                     state = 2;
-                 }
-                 break;
+             /* Compute the predicate that tells if the instruction
+                is concerned by the IT block
+                - Creates an error if there is a ldm that is not
+                  last in the IT block thus cannot be replaced
+                - Otherwise we can create a branch at the end of the
+                  IT block, it will be controlled naturally by IT
+                  with the proper pseudo-predicate
+                - So the only interesting predicate is the one that
+                  tells that we are not on the last item of an IT
+                  block.  */
+             if (itblock_current_pos != 0)
+                 is_not_last_in_it_block = !!--itblock_current_pos;
 
-               case 2:
-                 {
-                   int other_regs[3], other_numregs;
-                   vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
-                                                     other_regs,
-                                                     &other_numregs);
-                   if (vpipe != VFP11_BAD
-                       && bfd_arm_vfp11_antidependency (writemask, regs,
-                                                        numregs))
-                     state = 3;
-                   else
+             if (insn_32bit)
+               {
+                 /* Load the rest of the insn (in manual-friendly order).  */
+                 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
+                 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
+                 is_vldm = is_thumb2_vldm (insn);
+
+                 /* Veneers are created for (v)ldm depending on
+                    option flags and memory accesses conditions; but
+                    if the instruction is not the last instruction of
+                    an IT block, we cannot create a jump there, so we
+                    bail out.  */
+                   if ((is_ldm || is_vldm) &&
+                       stm32l4xx_need_create_replacing_stub
+                       (insn, globals->stm32l4xx_fix))
                      {
-                       state = 0;
-                       next_i = first_fmac + 4;
+                       if (is_not_last_in_it_block)
+                         {
+                           (*_bfd_error_handler)
+                             /* Note - overlong line used here to allow for translation.  */
+                             (_("\
+%B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
+                                "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
+                              abfd, sec, (long)i);
+                         }
+                       else
+                         {
+                           elf32_stm32l4xx_erratum_list *newerr =
+                             (elf32_stm32l4xx_erratum_list *)
+                             bfd_zmalloc
+                             (sizeof (elf32_stm32l4xx_erratum_list));
+
+                           elf32_arm_section_data (sec)
+                             ->stm32l4xx_erratumcount += 1;
+                           newerr->u.b.insn = insn;
+                           /* We create only thumb branches.  */
+                           newerr->type =
+                             STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
+                           record_stm32l4xx_erratum_veneer
+                             (link_info, newerr, abfd, sec,
+                              i,
+                              is_ldm ?
+                              STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
+                              STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
+                           newerr->vma = -1;
+                           newerr->next = sec_data->stm32l4xx_erratumlist;
+                           sec_data->stm32l4xx_erratumlist = newerr;
+                         }
                      }
-                 }
-                 break;
-
-               case 3:
-                 abort ();  /* Should be unreachable.  */
                }
-
-             if (state == 3)
-               {
-                 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
-                     bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
-
-                 elf32_arm_section_data (sec)->erratumcount += 1;
-
-                 newerr->u.b.vfp_insn = veneer_of_insn;
-
-                 switch (span_type)
-                   {
-                   case 'a':
-                     newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
-                     break;
-
-                   default:
-                     abort ();
-                   }
-
-                 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
-                                              first_fmac);
-
-                 newerr->vma = -1;
-
-                 newerr->next = sec_data->erratumlist;
-                 sec_data->erratumlist = newerr;
-
-                 state = 0;
+             else
+               {
+                 /* A7.7.37 IT p208
+                    IT blocks are only encoded in T1
+                    Encoding T1: IT{x{y{z}}} <firstcond>
+                    1 0 1 1 - 1 1 1 1 - firstcond - mask
+                    if mask = '0000' then see 'related encodings'
+                    We don't deal with UNPREDICTABLE, just ignore these.
+                    There can be no nested IT blocks so an IT block
+                    is naturally a new one for which it is worth
+                    computing its size.  */
+                 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
+                   ((insn & 0x000f) != 0x0000);
+                 /* If we have a new IT block we compute its size.  */
+                 if (is_newitblock)
+                   {
+                     /* Compute the number of instructions controlled
+                        by the IT block, it will be used to decide
+                        whether we are inside an IT block or not.  */
+                     unsigned int mask = insn & 0x000f;
+                     itblock_current_pos = 4 - ctz (mask);
+                   }
                }
 
-             i = next_i;
+             i += insn_32bit ? 4 : 2;
            }
        }
 
@@ -6847,93 +8668,6 @@ error_return:
   return FALSE;
 }
 
-/* Find virtual-memory addresses for VFP11 erratum veneers and return locations
-   after sections have been laid out, using specially-named symbols.  */
-
-void
-bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
-                                         struct bfd_link_info *link_info)
-{
-  asection *sec;
-  struct elf32_arm_link_hash_table *globals;
-  char *tmp_name;
-
-  if (link_info->relocatable)
-    return;
-
-  /* Skip if this bfd does not correspond to an ELF image.  */
-  if (! is_arm_elf (abfd))
-    return;
-
-  globals = elf32_arm_hash_table (link_info);
-  if (globals == NULL)
-    return;
-
-  tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
-                                 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
-
-  for (sec = abfd->sections; sec != NULL; sec = sec->next)
-    {
-      struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
-      elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
-
-      for (; errnode != NULL; errnode = errnode->next)
-       {
-         struct elf_link_hash_entry *myh;
-         bfd_vma vma;
-
-         switch (errnode->type)
-           {
-           case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
-           case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
-             /* Find veneer symbol.  */
-             sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
-                      errnode->u.b.veneer->u.v.id);
-
-             myh = elf_link_hash_lookup
-               (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
-
-             if (myh == NULL)
-               (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
-                                        "`%s'"), abfd, tmp_name);
-
-             vma = myh->root.u.def.section->output_section->vma
-                   + myh->root.u.def.section->output_offset
-                   + myh->root.u.def.value;
-
-             errnode->u.b.veneer->vma = vma;
-             break;
-
-           case VFP11_ERRATUM_ARM_VENEER:
-           case VFP11_ERRATUM_THUMB_VENEER:
-             /* Find return location.  */
-             sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
-                      errnode->u.v.id);
-
-             myh = elf_link_hash_lookup
-               (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
-
-             if (myh == NULL)
-               (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
-                                        "`%s'"), abfd, tmp_name);
-
-             vma = myh->root.u.def.section->output_section->vma
-                   + myh->root.u.def.section->output_offset
-                   + myh->root.u.def.value;
-
-             errnode->u.v.branch->vma = vma;
-             break;
-
-           default:
-             abort ();
-           }
-       }
-    }
-
-  free (tmp_name);
-}
-
-
 /* Set target relocation values needed during linking.  */
 
 void
@@ -6944,9 +8678,11 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
                                 int fix_v4bx,
                                 int use_blx,
                                 bfd_arm_vfp11_fix vfp11_fix,
+                                bfd_arm_stm32l4xx_fix stm32l4xx_fix,
                                 int no_enum_warn, int no_wchar_warn,
                                 int pic_veneer, int fix_cortex_a8,
-                                int fix_arm1176)
+                                int fix_arm1176, int cmse_implib,
+                                bfd *in_implib_bfd)
 {
   struct elf32_arm_link_hash_table *globals;
 
@@ -6969,9 +8705,12 @@ bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
   globals->fix_v4bx = fix_v4bx;
   globals->use_blx |= use_blx;
   globals->vfp11_fix = vfp11_fix;
+  globals->stm32l4xx_fix = stm32l4xx_fix;
   globals->pic_veneer = pic_veneer;
   globals->fix_cortex_a8 = fix_cortex_a8;
   globals->fix_arm1176 = fix_arm1176;
+  globals->cmse_implib = cmse_implib;
+  globals->in_implib_bfd = in_implib_bfd;
 
   BFD_ASSERT (is_arm_elf (output_bfd));
   elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
@@ -7146,7 +8885,8 @@ elf32_arm_create_thumb_stub (struct bfd_link_info * info,
       --my_offset;
       myh->root.u.def.value = my_offset;
 
-      if (info->shared || globals->root.is_relocatable_executable
+      if (bfd_link_pic (info)
+         || globals->root.is_relocatable_executable
          || globals->pic_veneer)
        {
          /* For relocatable objects we can't use absolute addresses,
@@ -7457,6 +9197,8 @@ elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
         first entry.  */
       if (splt->size == 0)
        splt->size += htab->plt_header_size;
+
+      htab->next_tls_desc_index++;
     }
 
   /* Allocate the PLT entry itself, including any leading Thumb stub.  */
@@ -7469,7 +9211,10 @@ elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
     {
       /* We also need to make an entry in the .got.plt section, which
         will be placed in the .got section by the linker script.  */
-      arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
+      if (is_iplt_entry)
+       arm_plt->got_offset = sgotplt->size;
+      else
+       arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
       sgotplt->size += 4;
     }
 }
@@ -7493,9 +9238,11 @@ arm_movt_immediate (bfd_vma value)
 
    ROOT_PLT points to the offset of the PLT entry from the start of its
    section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
-   bookkeeping information.  */
+   bookkeeping information.
 
-static void
+   Returns FALSE if there was a problem.  */
+
+static bfd_boolean
 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
                              union gotplt_union *root_plt,
                              struct arm_plt_info *arm_plt,
@@ -7590,7 +9337,7 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
                     + root_plt->offset);
 
       ptr = splt->contents + root_plt->offset;
-      if (htab->vxworks_p && info->shared)
+      if (htab->vxworks_p && bfd_link_pic (info))
        {
          unsigned int i;
          bfd_vma val;
@@ -7685,6 +9432,46 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
                        | (tail_displacement & 0x00ffffff),
                        ptr + 12);
        }
+      else if (using_thumb_only (htab))
+       {
+         /* PR ld/16017: Generate thumb only PLT entries.  */
+         if (!using_thumb2 (htab))
+           {
+             /* FIXME: We ought to be able to generate thumb-1 PLT
+                instructions...  */
+             _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
+                                 output_bfd);
+             return FALSE;
+           }
+
+         /* Calculate the displacement between the PLT slot and the entry in
+            the GOT.  The 12-byte offset accounts for the value produced by
+            adding to pc in the 3rd instruction of the PLT stub.  */
+         got_displacement = got_address - (plt_address + 12);
+
+         /* As we are using 32 bit instructions we have to use 'put_arm_insn'
+            instead of 'put_thumb_insn'.  */
+         put_arm_insn (htab, output_bfd,
+                       elf32_thumb2_plt_entry[0]
+                       | ((got_displacement & 0x000000ff) << 16)
+                       | ((got_displacement & 0x00000700) << 20)
+                       | ((got_displacement & 0x00000800) >>  1)
+                       | ((got_displacement & 0x0000f000) >> 12),
+                       ptr + 0);
+         put_arm_insn (htab, output_bfd,
+                       elf32_thumb2_plt_entry[1]
+                       | ((got_displacement & 0x00ff0000)      )
+                       | ((got_displacement & 0x07000000) <<  4)
+                       | ((got_displacement & 0x08000000) >> 17)
+                       | ((got_displacement & 0xf0000000) >> 28),
+                       ptr + 4);
+         put_arm_insn (htab, output_bfd,
+                       elf32_thumb2_plt_entry[2],
+                       ptr + 8);
+         put_arm_insn (htab, output_bfd,
+                       elf32_thumb2_plt_entry[3],
+                       ptr + 12);
+       }
       else
        {
          /* Calculate the displacement between the PLT slot and the
@@ -7693,8 +9480,6 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
             of the PLT stub.  */
          got_displacement = got_address - (plt_address + 8);
 
-         BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
-
          if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
            {
              put_thumb_insn (htab, output_bfd,
@@ -7703,21 +9488,45 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
                              elf32_arm_plt_thumb_stub[1], ptr - 2);
            }
 
-         put_arm_insn (htab, output_bfd,
-                       elf32_arm_plt_entry[0]
-                       | ((got_displacement & 0x0ff00000) >> 20),
-                       ptr + 0);
-         put_arm_insn (htab, output_bfd,
-                       elf32_arm_plt_entry[1]
-                       | ((got_displacement & 0x000ff000) >> 12),
-                       ptr+ 4);
-         put_arm_insn (htab, output_bfd,
-                       elf32_arm_plt_entry[2]
-                       | (got_displacement & 0x00000fff),
-                       ptr + 8);
+         if (!elf32_arm_use_long_plt_entry)
+           {
+             BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
+
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_short[0]
+                           | ((got_displacement & 0x0ff00000) >> 20),
+                           ptr + 0);
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_short[1]
+                           | ((got_displacement & 0x000ff000) >> 12),
+                           ptr+ 4);
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_short[2]
+                           | (got_displacement & 0x00000fff),
+                           ptr + 8);
 #ifdef FOUR_WORD_PLT
-         bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
+             bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
 #endif
+           }
+         else
+           {
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_long[0]
+                           | ((got_displacement & 0xf0000000) >> 28),
+                           ptr + 0);
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_long[1]
+                           | ((got_displacement & 0x0ff00000) >> 20),
+                           ptr + 4);
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_long[2]
+                           | ((got_displacement & 0x000ff000) >> 12),
+                           ptr+ 8);
+             put_arm_insn (htab, output_bfd,
+                           elf32_arm_plt_entry_long[3]
+                           | (got_displacement & 0x00000fff),
+                           ptr + 12);
+           }
        }
 
       /* Fill in the entry in the .rel(a).(i)plt section.  */
@@ -7750,6 +9559,8 @@ elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
       loc = srel->contents + plt_index * RELOC_SIZE (htab);
       SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
     }
+
+  return TRUE;
 }
 
 /* Some relocations map to different relocations depending on the
@@ -7945,11 +9756,11 @@ elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
       break;
 
     case R_ARM_THM_TLS_CALL:
-      /* GD->IE relaxation */
+      /* GD->IE relaxation */
       if (!is_local)
        /* add r0,pc; ldr r0, [r0]  */
        insn = 0x44786800;
-      else if (arch_has_thumb2_nop (globals))
+      else if (using_thumb2 (globals))
        /* nop.w */
        insn = 0xf3af8000;
       else
@@ -8089,18 +9900,6 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
   if (r_type != howto->type)
     howto = elf32_arm_howto_from_type (r_type);
 
-  /* If the start address has been set, then set the EF_ARM_HASENTRY
-     flag.  Setting this more than once is redundant, but the cost is
-     not too high, and it keeps the code simple.
-
-     The test is done  here, rather than somewhere else, because the
-     start address is only set just before the final link commences.
-
-     Note - if the user deliberately sets a start address of 0, the
-     flag will not be set.  */
-  if (bfd_get_start_address (output_bfd) != 0)
-    elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
-
   eh = (struct elf32_arm_link_hash_entry *) h;
   sgot = globals->root.sgot;
   local_got_offsets = elf_local_got_offsets (input_bfd);
@@ -8147,7 +9946,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
      VALUE appropriately for relocations that we resolve at link time.  */
   has_iplt_entry = FALSE;
-  if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
+  if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
+                             &arm_plt)
       && root_plt->offset != (bfd_vma) -1)
     {
       plt_offset = root_plt->offset;
@@ -8165,9 +9965,11 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
            plt_offset--;
          else
            {
-             elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
-                                           -1, dynreloc_value);
-             root_plt->offset |= 1;
+             if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
+                                               -1, dynreloc_value))
+               root_plt->offset |= 1;
+             else
+               return bfd_reloc_notsupported;
            }
 
          /* Static relocations always resolve to the .iplt entry.  */
@@ -8248,7 +10050,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
       /* When generating a shared object or relocatable executable, these
         relocations are copied into the output file to be resolved at
         run time.  */
-      if ((info->shared || globals->root.is_relocatable_executable)
+      if ((bfd_link_pic (info)
+          || globals->root.is_relocatable_executable)
          && (input_section->flags & SEC_ALLOC)
          && !(globals->vxworks_p
               && strcmp (input_section->output_section->name,
@@ -8269,6 +10072,21 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
          Elf_Internal_Rela outrel;
          bfd_boolean skip, relocate;
 
+         if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
+             && !h->def_regular)
+           {
+             char *v = _("shared object");
+
+             if (bfd_link_executable (info))
+               v = _("PIE executable");
+
+             (*_bfd_error_handler)
+               (_("%B: relocation %s against external or undefined symbol `%s'"
+                  " can not be used when making a %s; recompile with -fPIC"), input_bfd,
+                elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
+             return bfd_reloc_notsupported;
+           }
+
          *unresolved_reloc_p = FALSE;
 
          if (sreloc == NULL && globals->root.dynamic_sections_created)
@@ -8298,8 +10116,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
            memset (&outrel, 0, sizeof outrel);
          else if (h != NULL
                   && h->dynindx != -1
-                  && (!info->shared
-                      || !info->symbolic
+                  && (!bfd_link_pic (info)
+                      || !SYMBOLIC_BIND (info, h)
                       || !h->def_regular))
            outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
          else
@@ -8593,6 +10411,9 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
       return bfd_reloc_ok;
 
     case R_ARM_ABS8:
+      /* PR 16202: Refectch the addend using the correct size.  */
+      if (globals->use_rel)
+       addend = bfd_get_8 (input_bfd, hit_data);
       value += addend;
 
       /* There is no way to tell whether the user intended to use a signed or
@@ -8605,6 +10426,9 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
       return bfd_reloc_ok;
 
     case R_ARM_ABS16:
+      /* PR 16202: Refectch the addend using the correct size.  */
+      if (globals->use_rel)
+       addend = bfd_get_16 (input_bfd, hit_data);
       value += addend;
 
       /* See comment for R_ARM_ABS8.  */
@@ -8656,7 +10480,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                          + input_section->output_offset
                          + rel->r_offset);
 
-       value = abs (relocation);
+       value = relocation;
 
        if (value >= 0x1000)
          return bfd_reloc_overflow;
@@ -8691,7 +10515,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                          + input_section->output_offset
                          + rel->r_offset);
 
-       value = abs (relocation);
+       value = relocation;
 
        /* We do not check for overflow of this reloc.  Although strictly
           speaking this is incorrect, it appears to be necessary in order
@@ -8728,7 +10552,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                          + input_section->output_offset
                          + rel->r_offset);
 
-       value = abs (relocation);
+       value = relocation;
 
        if (value >= 0x1000)
          return bfd_reloc_overflow;
@@ -8759,6 +10583,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
        bfd_signed_vma signed_check;
        int bitsize;
        const int thumb2 = using_thumb2 (globals);
+       const int thumb2_bl = using_thumb2_bl (globals);
 
        /* A branch to an undefined weak symbol is turned into a jump to
           the next instruction unless a PLT entry will be created.
@@ -8767,7 +10592,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
        if (h && h->root.type == bfd_link_hash_undefweak
            && plt_offset == (bfd_vma) -1)
          {
-           if (arch_has_thumb2_nop (globals))
+           if (thumb2)
              {
                bfd_put_16 (input_bfd, 0xf3af, hit_data);
                bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
@@ -8896,7 +10721,9 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                     + splt->output_offset
                     + plt_offset);
 
-           if (globals->use_blx && r_type == R_ARM_THM_CALL)
+           if (globals->use_blx
+               && r_type == R_ARM_THM_CALL
+               && ! using_thumb_only (globals))
              {
                /* If the Thumb BLX instruction is available, convert
                   the BL to a BLX instruction to call the ARM-mode
@@ -8906,8 +10733,9 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
              }
            else
              {
-               /* Target the Thumb stub before the ARM PLT entry.  */
-               value -= PLT_THUMB_STUB_SIZE;
+               if (! using_thumb_only (globals))
+                 /* Target the Thumb stub before the ARM PLT entry.  */
+                 value -= PLT_THUMB_STUB_SIZE;
                branch_type = ST_BRANCH_TO_THUMB;
              }
            *unresolved_reloc_p = FALSE;
@@ -8932,7 +10760,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
           this relocation according to whether we're relocating for
           Thumb-2 or not.  */
        bitsize = howto->bitsize;
-       if (!thumb2)
+       if (!thumb2_bl)
          bitsize -= 2;
        reloc_signed_max = (1 << (bitsize - 1)) - 1;
        reloc_signed_min = ~reloc_signed_max;
@@ -8978,6 +10806,9 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
        bfd_signed_vma reloc_signed_max = 0xffffe;
        bfd_signed_vma reloc_signed_min = -0x100000;
        bfd_signed_vma signed_check;
+        enum elf32_arm_stub_type stub_type = arm_stub_none;
+       struct elf32_arm_stub_hash_entry *stub_entry;
+       struct elf32_arm_link_hash_entry *hash;
 
        /* Need to refetch the addend, reconstruct the top three bits,
           and squish the two 11 bit pieces together.  */
@@ -9009,8 +10840,25 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
            *unresolved_reloc_p = FALSE;
          }
 
-       /* ??? Should handle interworking?  GCC might someday try to
-          use this for tail calls.  */
+       hash = (struct elf32_arm_link_hash_entry *)h;
+
+       stub_type = arm_type_of_stub (info, input_section, rel,
+                                     st_type, &branch_type,
+                                     hash, value, sym_sec,
+                                     input_bfd, sym_name);
+       if (stub_type != arm_stub_none)
+         {
+           stub_entry = elf32_arm_get_stub_entry (input_section,
+                                                  sym_sec, h,
+                                                  rel, globals,
+                                                  stub_type);
+           if (stub_entry != NULL)
+             {
+               value = (stub_entry->stub_offset
+                        + stub_entry->stub_sec->output_offset
+                        + stub_entry->stub_sec->output_section->vma);
+             }
+         }
 
        relocation = value + signed_addend;
        relocation -= (input_section->output_section->vma
@@ -9219,7 +11067,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                {
                  if (dynreloc_st_type == STT_GNU_IFUNC)
                    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
-                 else if (info->shared &&
+                 else if (bfd_link_pic (info) &&
                           (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
                            || h->root.type != bfd_link_hash_undefweak))
                    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
@@ -9268,7 +11116,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
              if (globals->use_rel)
                bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
 
-             if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
+             if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
                {
                  Elf_Internal_Rela outrel;
 
@@ -9317,7 +11165,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
          {
            /* If we don't know the module number, create a relocation
               for it.  */
-           if (info->shared)
+           if (bfd_link_pic (info))
              {
                Elf_Internal_Rela outrel;
 
@@ -9367,8 +11215,10 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
          {
            bfd_boolean dyn;
            dyn = globals->root.dynamic_sections_created;
-           if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
-               && (!info->shared
+           if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
+                                                bfd_link_pic (info),
+                                                h)
+               && (!bfd_link_pic (info)
                    || !SYMBOL_REFERENCES_LOCAL (info, h)))
              {
                *unresolved_reloc_p = FALSE;
@@ -9405,7 +11255,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
               now, and emit any relocations.  If both an IE GOT and a
               GD GOT are necessary, we emit the GD first.  */
 
-           if ((info->shared || indx != 0)
+           if ((bfd_link_pic (info) || indx != 0)
                && (h == NULL
                    || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
                    || h->root.type != bfd_link_hash_undefweak))
@@ -9421,7 +11271,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                /* We should have relaxed, unless this is an undefined
                   weak symbol.  */
                BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
-                           || info->shared);
+                           || bfd_link_pic (info));
                BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
                            <= globals->root.sgotplt->size);
 
@@ -9602,7 +11452,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
                else
                  {
                    lower_insn = 0xc000;
-                   /* Round up the offset to a word boundary */
+                   /* Round up the offset to a word boundary */
                    offset = (offset + 2) & ~2;
                  }
 
@@ -9621,7 +11471,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
        /* These relocations needs special care, as besides the fact
           they point somewhere in .gotplt, the addend must be
           adjusted accordingly depending on the type of instruction
-          we refer to */
+          we refer to */
        else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
          {
            unsigned long data, insn;
@@ -9696,7 +11546,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
       }
 
     case R_ARM_TLS_LE32:
-      if (info->shared && !info->pie)
+      if (bfd_link_dll (info))
        {
          (*_bfd_error_handler)
            (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
@@ -9940,8 +11790,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
 
        /* Calculate the value of the relevant G_n, in encoded
           constant-with-rotation format.  */
-       g_n = calculate_group_reloc_mask (abs (signed_value), group,
-                                         &residual);
+       g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
+                                         group, &residual);
 
        /* Check for overflow if required.  */
        if ((r_type == R_ARM_ALU_PC_G0
@@ -9954,7 +11804,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
            (*_bfd_error_handler)
              (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
              input_bfd, input_section,
-             (long) rel->r_offset, abs (signed_value), howto->name);
+              (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
+              howto->name);
            return bfd_reloc_overflow;
          }
 
@@ -10034,15 +11885,16 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
 
        /* Calculate the value of the relevant G_{n-1} to obtain
           the residual at that stage.  */
-       calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
+       calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
+                                   group - 1, &residual);
 
        /* Check for overflow.  */
        if (residual >= 0x1000)
          {
            (*_bfd_error_handler)
              (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
-             input_bfd, input_section,
-             (long) rel->r_offset, abs (signed_value), howto->name);
+              input_bfd, input_section,
+              (long) rel->r_offset, labs (signed_value), howto->name);
            return bfd_reloc_overflow;
          }
 
@@ -10118,15 +11970,16 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
 
        /* Calculate the value of the relevant G_{n-1} to obtain
           the residual at that stage.  */
-       calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
+       calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
+                                   group - 1, &residual);
 
        /* Check for overflow.  */
        if (residual >= 0x100)
          {
            (*_bfd_error_handler)
              (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
-             input_bfd, input_section,
-             (long) rel->r_offset, abs (signed_value), howto->name);
+              input_bfd, input_section,
+              (long) rel->r_offset, labs (signed_value), howto->name);
            return bfd_reloc_overflow;
          }
 
@@ -10202,7 +12055,8 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
 
        /* Calculate the value of the relevant G_{n-1} to obtain
           the residual at that stage.  */
-       calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
+       calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
+                                   group - 1, &residual);
 
        /* Check for overflow.  (The absolute value to go in the place must be
           divisible by four and, after having been divided by four, must
@@ -10212,7 +12066,7 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
            (*_bfd_error_handler)
              (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
              input_bfd, input_section,
-             (long) rel->r_offset, abs (signed_value), howto->name);
+             (long) rel->r_offset, labs (signed_value), howto->name);
            return bfd_reloc_overflow;
          }
 
@@ -10230,6 +12084,33 @@ elf32_arm_final_link_relocate (reloc_howto_type *           howto,
       }
       return bfd_reloc_ok;
 
+    case R_ARM_THM_ALU_ABS_G0_NC:
+    case R_ARM_THM_ALU_ABS_G1_NC:
+    case R_ARM_THM_ALU_ABS_G2_NC:
+    case R_ARM_THM_ALU_ABS_G3_NC:
+       {
+           const int shift_array[4] = {0, 8, 16, 24};
+           bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
+           bfd_vma addr = value;
+           int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
+
+           /* Compute address.  */
+           if (globals->use_rel)
+               signed_addend = insn & 0xff;
+           addr += signed_addend;
+           if (branch_type == ST_BRANCH_TO_THUMB)
+               addr |= 1;
+           /* Clean imm8 insn.  */
+           insn &= 0xff00;
+           /* And update with correct part of address.  */
+           insn |= (addr >> shift) & 0xff;
+           /* Update insn.  */
+           bfd_put_16 (input_bfd, insn, hit_data);
+       }
+
+       *unresolved_reloc_p = FALSE;
+       return bfd_reloc_ok;
+
     default:
       return bfd_reloc_notsupported;
     }
@@ -10403,21 +12284,18 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
              && r_symndx != STN_UNDEF
              && bfd_is_und_section (sec)
              && ELF_ST_BIND (sym->st_info) != STB_WEAK)
-           {
-             if (!info->callbacks->undefined_symbol
-                 (info, bfd_elf_string_from_elf_section
-                  (input_bfd, symtab_hdr->sh_link, sym->st_name),
-                  input_bfd, input_section,
-                  rel->r_offset, TRUE))
-               return FALSE;
-           }
+           (*info->callbacks->undefined_symbol)
+             (info, bfd_elf_string_from_elf_section
+              (input_bfd, symtab_hdr->sh_link, sym->st_name),
+              input_bfd, input_section,
+              rel->r_offset, TRUE);
 
          if (globals->use_rel)
            {
              relocation = (sec->output_section->vma
                            + sec->output_offset
                            + sym->st_value);
-             if (!info->relocatable
+             if (!bfd_link_relocatable (info)
                  && (sec->flags & SEC_MERGE)
                  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
                {
@@ -10524,7 +12402,7 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
        RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
                                         rel, 1, relend, howto, 0, contents);
 
-      if (info->relocatable)
+      if (bfd_link_relocatable (info))
        {
          /* This is a relocatable link.  We don't have to change
             anything, unless the reloc is against a section symbol,
@@ -10573,29 +12451,35 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
         done, i.e., the relaxation produced the final output we want,
         and we won't let anybody mess with it. Also, we have to do
         addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
-        both in relaxed and non-relaxed cases */
-     if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
-        || (IS_ARM_TLS_GNU_RELOC (r_type)
-            && !((h ? elf32_arm_hash_entry (h)->tls_type :
-                  elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
-                 & GOT_TLS_GDESC)))
-       {
-        r = elf32_arm_tls_relax (globals, input_bfd, input_section,
-                                 contents, rel, h == NULL);
-        /* This may have been marked unresolved because it came from
-           a shared library.  But we've just dealt with that.  */
-        unresolved_reloc = 0;
-       }
-     else
-       r = bfd_reloc_continue;
+        both in relaxed and non-relaxed cases */
+      if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
+         || (IS_ARM_TLS_GNU_RELOC (r_type)
+             && !((h ? elf32_arm_hash_entry (h)->tls_type :
+                   elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
+                  & GOT_TLS_GDESC)))
+       {
+         r = elf32_arm_tls_relax (globals, input_bfd, input_section,
+                                  contents, rel, h == NULL);
+         /* This may have been marked unresolved because it came from
+            a shared library.  But we've just dealt with that.  */
+         unresolved_reloc = 0;
+       }
+      else
+       r = bfd_reloc_continue;
 
-     if (r == bfd_reloc_continue)
-       r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
-                                         input_section, contents, rel,
-                                         relocation, info, sec, name, sym_type,
-                                         (h ? h->target_internal
-                                          : ARM_SYM_BRANCH_TYPE (sym)), h,
-                                         &unresolved_reloc, &error_message);
+      if (r == bfd_reloc_continue)
+       {
+         unsigned char branch_type =
+           h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
+             : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
+
+         r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
+                                            input_section, contents, rel,
+                                            relocation, info, sec, name,
+                                            sym_type, branch_type, h,
+                                            &unresolved_reloc,
+                                            &error_message);
+       }
 
       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
         because such sections are not SEC_ALLOC and thus ld.so will
@@ -10624,20 +12508,15 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
              /* If the overflowing reloc was to an undefined symbol,
                 we have already printed one error message and there
                 is no point complaining again.  */
-             if ((! h ||
-                  h->root.type != bfd_link_hash_undefined)
-                 && (!((*info->callbacks->reloc_overflow)
-                       (info, (h ? &h->root : NULL), name, howto->name,
-                        (bfd_vma) 0, input_bfd, input_section,
-                        rel->r_offset))))
-                 return FALSE;
+             if (!h || h->root.type != bfd_link_hash_undefined)
+               (*info->callbacks->reloc_overflow)
+                 (info, (h ? &h->root : NULL), name, howto->name,
+                  (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
              break;
 
            case bfd_reloc_undefined:
-             if (!((*info->callbacks->undefined_symbol)
-                   (info, name, input_bfd, input_section,
-                    rel->r_offset, TRUE)))
-               return FALSE;
+             (*info->callbacks->undefined_symbol)
+               (info, name, input_bfd, input_section, rel->r_offset, TRUE);
              break;
 
            case bfd_reloc_outofrange:
@@ -10658,10 +12537,8 @@ elf32_arm_relocate_section (bfd *                  output_bfd,
 
            common_error:
              BFD_ASSERT (error_message != NULL);
-             if (!((*info->callbacks->reloc_dangerous)
-                   (info, error_message, input_bfd, input_section,
-                    rel->r_offset)))
-               return FALSE;
+             (*info->callbacks->reloc_dangerous)
+               (info, error_message, input_bfd, input_section, rel->r_offset);
              break;
            }
        }
@@ -10741,6 +12618,8 @@ insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
     &exidx_arm_data->u.exidx.unwind_edit_tail,
     INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
 
+  exidx_arm_data->additional_reloc_count++;
+
   adjust_exidx_size(exidx_sec, 8);
 }
 
@@ -10770,7 +12649,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
 
   /* Walk over all EXIDX sections, and create backlinks from the corrsponding
      text sections.  */
-  for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
+  for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
     {
       asection *sec;
 
@@ -10856,6 +12735,18 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
        /* An error?  */
        continue;
 
+      if (last_unwind_type > 0)
+       {
+         unsigned int first_word = bfd_get_32 (ibfd, contents);
+         /* Add cantunwind if first unwind item does not match section
+            start.  */
+         if (first_word != sec->vma)
+           {
+             insert_cantunwind_after (last_text_sec, last_exidx_sec);
+             last_unwind_type = 0;
+           }
+       }
+
       for (j = 0; j < hdr->sh_size; j += 8)
        {
          unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
@@ -10883,7 +12774,7 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
          else
            unwind_type = 2;
 
-         if (elide)
+         if (elide && !bfd_link_relocatable (info))
            {
              add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
                                     DELETE_EXIDX_ENTRY, NULL, j / 8);
@@ -10910,7 +12801,8 @@ elf32_arm_fix_exidx_coverage (asection **text_section_order,
     }
 
   /* Add terminating CANTUNWIND entry.  */
-  if (last_exidx_sec && last_unwind_type != 0)
+  if (!bfd_link_relocatable (info) && last_exidx_sec
+      && last_unwind_type != 0)
     insert_cantunwind_after(last_text_sec, last_exidx_sec);
 
   return TRUE;
@@ -10952,7 +12844,7 @@ elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
 
   /* Process stub sections (eg BE8 encoding, ...).  */
   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
-  int i;
+  unsigned int i;
   for (i=0; i<htab->top_id; i++)
     {
       sec = htab->stub_group[i].stub_sec;
@@ -10986,6 +12878,11 @@ elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
                                           VFP11_ERRATUM_VENEER_SECTION_NAME))
        return FALSE;
 
+      if (! elf32_arm_output_glue_section (info, abfd,
+                                          globals->bfd_of_glue_owner,
+                                          STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
+       return FALSE;
+
       if (! elf32_arm_output_glue_section (info, abfd,
                                           globals->bfd_of_glue_owner,
                                           ARM_BX_GLUE_SECTION_NAME))
@@ -11142,14 +13039,7 @@ elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
   elf_elfheader (obfd)->e_flags = in_flags;
   elf_flags_init (obfd) = TRUE;
 
-  /* Also copy the EI_OSABI field.  */
-  elf_elfheader (obfd)->e_ident[EI_OSABI] =
-    elf_elfheader (ibfd)->e_ident[EI_OSABI];
-
-  /* Copy object attributes.  */
-  _bfd_elf_copy_obj_attributes (ibfd, obfd);
-
-  return TRUE;
+  return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
 }
 
 /* Values for Tag_ABI_PCS_R9_use.  */
@@ -11395,6 +13285,47 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
       T(V8),           /* V7E_M.  */
       T(V8)            /* V8.  */
     };
+  const int v8m_baseline[] =
+    {
+      -1,              /* PRE_V4.  */
+      -1,              /* V4.  */
+      -1,              /* V4T.  */
+      -1,              /* V5T.  */
+      -1,              /* V5TE.  */
+      -1,              /* V5TEJ.  */
+      -1,              /* V6.  */
+      -1,              /* V6KZ.  */
+      -1,              /* V6T2.  */
+      -1,              /* V6K.  */
+      -1,              /* V7.  */
+      T(V8M_BASE),     /* V6_M.  */
+      T(V8M_BASE),     /* V6S_M.  */
+      -1,              /* V7E_M.  */
+      -1,              /* V8.  */
+      -1,
+      T(V8M_BASE)      /* V8-M BASELINE.  */
+    };
+  const int v8m_mainline[] =
+    {
+      -1,              /* PRE_V4.  */
+      -1,              /* V4.  */
+      -1,              /* V4T.  */
+      -1,              /* V5T.  */
+      -1,              /* V5TE.  */
+      -1,              /* V5TEJ.  */
+      -1,              /* V6.  */
+      -1,              /* V6KZ.  */
+      -1,              /* V6T2.  */
+      -1,              /* V6K.  */
+      T(V8M_MAIN),     /* V7.  */
+      T(V8M_MAIN),     /* V6_M.  */
+      T(V8M_MAIN),     /* V6S_M.  */
+      T(V8M_MAIN),     /* V7E_M.  */
+      -1,              /* V8.  */
+      -1,
+      T(V8M_MAIN),     /* V8-M BASELINE.  */
+      T(V8M_MAIN)      /* V8-M MAINLINE.  */
+    };
   const int v4t_plus_v6_m[] =
     {
       -1,              /* PRE_V4.  */
@@ -11412,6 +13343,9 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
       T(V6S_M),                /* V6S_M.  */
       T(V7E_M),                /* V7E_M.  */
       T(V8),           /* V8.  */
+      -1,              /* Unused.  */
+      T(V8M_BASE),     /* V8-M BASELINE.  */
+      T(V8M_MAIN),     /* V8-M MAINLINE.  */
       T(V4T_PLUS_V6_M) /* V4T plus V6_M.  */
     };
   const int *comb[] =
@@ -11423,6 +13357,9 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
       v6s_m,
       v7e_m,
       v8,
+      NULL,
+      v8m_baseline,
+      v8m_mainline,
       /* Pseudo-architecture.  */
       v4t_plus_v6_m
     };
@@ -11455,7 +13392,7 @@ tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
   if (tagh <= TAG_CPU_ARCH_V6KZ)
     return result;
 
-  result = comb[tagh - T(V6T2)][tagl];
+  result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
 
   /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
      as the canonical version.  */
@@ -11531,6 +13468,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
   static const int order_021[3] = {0, 2, 1};
   int i;
   bfd_boolean result = TRUE;
+  const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
 
   /* Skip the linker stubs file.  This preserves previous behavior
      of accepting unknown attributes in the first input file - but
@@ -11538,6 +13476,12 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
   if (ibfd->flags & BFD_LINKER_CREATED)
     return TRUE;
 
+  /* Skip any input that hasn't attribute section.
+     This enables to link object files without attribute section with
+     any others.  */
+  if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
+    return TRUE;
+
   if (!elf_known_obj_attributes_proc (obfd)[0].i)
     {
       /* This is the first object.  Copy the attributes.  */
@@ -11577,10 +13521,14 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
   /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
   if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
     {
-      /* Ignore mismatches if the object doesn't use floating point.  */
-      if (out_attr[Tag_ABI_FP_number_model].i == 0)
+      /* Ignore mismatches if the object doesn't use floating point or is
+        floating point ABI independent.  */
+      if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
+         || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
+             && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
        out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
-      else if (in_attr[Tag_ABI_FP_number_model].i != 0)
+      else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
+              && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
        {
          _bfd_error_handler
            (_("error: %B uses VFP register arguments, %B does not"),
@@ -11597,7 +13545,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
        {
        case Tag_CPU_raw_name:
        case Tag_CPU_name:
-         /* These are merged after Tag_CPU_arch. */
+         /* These are merged after Tag_CPU_arch.  */
          break;
 
        case Tag_ABI_optimization_goals:
@@ -11609,7 +13557,9 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
          {
            int secondary_compat = -1, secondary_compat_out = -1;
            unsigned int saved_out_attr = out_attr[i].i;
-           static const char *name_table[] = {
+           int arch_attr;
+           static const char *name_table[] =
+             {
                /* These aren't real CPU names, but we can't guess
                   that from the architecture version alone.  */
                "Pre v4",
@@ -11625,16 +13575,26 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
                "ARM v7",
                "ARM v6-M",
                "ARM v6S-M",
-               "ARM v8"
+               "ARM v8",
+               "",
+               "ARM v8-M.baseline",
+               "ARM v8-M.mainline",
            };
 
            /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
            secondary_compat = get_secondary_compatible_arch (ibfd);
            secondary_compat_out = get_secondary_compatible_arch (obfd);
-           out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
-                                                 &secondary_compat_out,
-                                                 in_attr[i].i,
-                                                 secondary_compat);
+           arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
+                                             &secondary_compat_out,
+                                             in_attr[i].i,
+                                             secondary_compat);
+
+           /* Return with error if failed to merge.  */
+           if (arch_attr == -1)
+             return FALSE;
+
+           out_attr[i].i = arch_attr;
+
            set_secondary_compatible_arch (obfd, secondary_compat_out);
 
            /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
@@ -11751,7 +13711,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
              else if (in_attr[i].i == 0
                       || (in_attr[i].i == 'S'
                           && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
-               ; /* Do nothing. */
+               ; /* Do nothing.  */
              else
                {
                  _bfd_error_handler
@@ -11763,14 +13723,39 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
                }
            }
          break;
+
+       case Tag_DSP_extension:
+         /* No need to change output value if any of:
+            - pre (<=) ARMv5T input architecture (do not have DSP)
+            - M input profile not ARMv7E-M and do not have DSP.  */
+         if (in_attr[Tag_CPU_arch].i <= 3
+             || (in_attr[Tag_CPU_arch_profile].i == 'M'
+                 && in_attr[Tag_CPU_arch].i != 13
+                 && in_attr[i].i == 0))
+           ; /* Do nothing.  */
+         /* Output value should be 0 if DSP part of architecture, ie.
+            - post (>=) ARMv5te architecture output
+            - A, R or S profile output or ARMv7E-M output architecture.  */
+         else if (out_attr[Tag_CPU_arch].i >= 4
+                  && (out_attr[Tag_CPU_arch_profile].i == 'A'
+                      || out_attr[Tag_CPU_arch_profile].i == 'R'
+                      || out_attr[Tag_CPU_arch_profile].i == 'S'
+                      || out_attr[Tag_CPU_arch].i == 13))
+           out_attr[i].i = 0;
+         /* Otherwise, DSP instructions are added and not part of output
+            architecture.  */
+         else
+           out_attr[i].i = 1;
+         break;
+
        case Tag_FP_arch:
            {
              /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
                 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
                 when it's 0.  It might mean absence of FP hardware if
-                Tag_FP_arch is zero, otherwise it is effectively SP + DP.  */
+                Tag_FP_arch is zero.  */
 
-#define VFP_VERSION_COUNT 8
+#define VFP_VERSION_COUNT 9
              static const struct
              {
                  int ver;
@@ -11784,7 +13769,8 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
                  {3, 16},
                  {4, 32},
                  {4, 16},
-                 {8, 32}
+                 {8, 32},
+                 {8, 16}
                };
              int ver;
              int regs;
@@ -11809,7 +13795,7 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
                }
 
              /* Both the input and the output have nonzero Tag_FP_arch.
-                So Tag_ABI_HardFP_use is (SP & DP) when it's zero.  */
+                So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
 
              /* If both the input and the output have zero Tag_ABI_HardFP_use,
                 do nothing.  */
@@ -11817,10 +13803,10 @@ elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
                  && out_attr[Tag_ABI_HardFP_use].i == 0)
                ;
              /* If the input and the output have different Tag_ABI_HardFP_use,
-                the combination of them is 3 (SP & DP).  */
+                the combination of them is 0 (implied by Tag_FP_arch).  */
              else if (in_attr[Tag_ABI_HardFP_use].i
                       != out_attr[Tag_ABI_HardFP_use].i)
-               out_attr[Tag_ABI_HardFP_use].i = 3;
+               out_attr[Tag_ABI_HardFP_use].i = 0;
 
              /* Now we can handle Tag_FP_arch.  */
 
@@ -12189,10 +14175,7 @@ elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
   if (flags & EF_ARM_RELEXEC)
     fprintf (file, _(" [relocatable executable]"));
 
-  if (flags & EF_ARM_HASENTRY)
-    fprintf (file, _(" [has entry point]"));
-
-  flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
+  flags &= ~EF_ARM_RELEXEC;
 
   if (flags)
     fprintf (file, _("<Unrecognised flag bits set>"));
@@ -12258,7 +14241,7 @@ elf32_arm_gc_sweep_hook (bfd *                     abfd,
   const Elf_Internal_Rela *rel, *relend;
   struct elf32_arm_link_hash_table * globals;
 
-  if (info->relocatable)
+  if (bfd_link_relocatable (info))
     return TRUE;
 
   globals = elf32_arm_hash_table (info);
@@ -12356,11 +14339,11 @@ elf32_arm_gc_sweep_hook (bfd *                     abfd,
        case R_ARM_THM_MOVW_PREL_NC:
        case R_ARM_THM_MOVT_PREL:
          /* Should the interworking branches be here also?  */
-         if ((info->shared || globals->root.is_relocatable_executable)
+         if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
              && (sec->flags & SEC_ALLOC) != 0)
            {
              if (h == NULL
-                 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
+                 && elf32_arm_howto_from_type (r_type)->pc_relative)
                {
                  call_reloc_p = TRUE;
                  may_need_local_target_p = TRUE;
@@ -12377,7 +14360,8 @@ elf32_arm_gc_sweep_hook (bfd *                     abfd,
        }
 
       if (may_need_local_target_p
-         && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
+         && elf32_arm_get_plt_info (abfd, globals, eh, r_symndx, &root_plt,
+                                    &arm_plt))
        {
          /* If PLT refcount book-keeping is wrong and too low, we'll
             see a zero value (going to -1) for the root PLT reference
@@ -12454,7 +14438,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
   bfd_boolean may_need_local_target_p;
   unsigned long nsyms;
 
-  if (info->relocatable)
+  if (bfd_link_relocatable (info))
     return TRUE;
 
   BFD_ASSERT (is_arm_elf (abfd));
@@ -12571,6 +14555,9 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
                default: tls_type = GOT_NORMAL; break;
                }
 
+             if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
+               info->flags |= DF_STATIC_TLS;
+
              if (h != NULL)
                {
                  h->got.refcount++;
@@ -12601,7 +14588,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
              /* If the symbol is accessed in both IE and GDESC
                 method, we're able to relax. Turn off the GDESC flag,
                 without messing up with any other kind of tls types
-                that may be involved */
+                that may be involved */
              if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
                tls_type &= ~GOT_TLS_GDESC;
 
@@ -12647,13 +14634,15 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
                may_need_local_target_p = TRUE;
                break;
              }
+           else goto jump_over;
+             
            /* Fall through.  */
 
          case R_ARM_MOVW_ABS_NC:
          case R_ARM_MOVT_ABS:
          case R_ARM_THM_MOVW_ABS_NC:
          case R_ARM_THM_MOVT_ABS:
-           if (info->shared)
+           if (bfd_link_pic (info))
              {
                (*_bfd_error_handler)
                  (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
@@ -12666,6 +14655,12 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
            /* Fall through.  */
          case R_ARM_ABS32:
          case R_ARM_ABS32_NOI:
+       jump_over:
+           if (h != NULL && bfd_link_executable (info))
+             {
+               h->pointer_equality_needed = 1;
+             }
+           /* Fall through.  */
          case R_ARM_REL32:
          case R_ARM_REL32_NOI:
          case R_ARM_MOVW_PREL_NC:
@@ -12674,11 +14669,11 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
          case R_ARM_THM_MOVT_PREL:
 
            /* Should the interworking branches be listed here?  */
-           if ((info->shared || htab->root.is_relocatable_executable)
+           if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
                && (sec->flags & SEC_ALLOC) != 0)
              {
                if (h == NULL
-                   && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
+                   && elf32_arm_howto_from_type (r_type)->pc_relative)
                  {
                    /* In shared libraries and relocatable executables,
                       we treat local relative references as calls;
@@ -12824,7 +14819,7 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
              p->pc_count = 0;
            }
 
-         if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
+         if (elf32_arm_howto_from_type (r_type)->pc_relative)
            p->pc_count += 1;
          p->count += 1;
        }
@@ -12834,7 +14829,11 @@ elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
 }
 
 /* Unwinding tables are not referenced directly.  This pass marks them as
-   required if the corresponding code section is marked.  */
+   required if the corresponding code section is marked.  Similarly, ARMv8-M
+   secure entry functions can only be referenced by SG veneers which are
+   created after the GC process. They need to be marked in case they reside in
+   their own section (as would be the case if code was compiled with
+   -ffunction-sections).  */
 
 static bfd_boolean
 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
@@ -12842,17 +14841,28 @@ elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
 {
   bfd *sub;
   Elf_Internal_Shdr **elf_shdrp;
-  bfd_boolean again;
+  asection *cmse_sec;
+  obj_attribute *out_attr;
+  Elf_Internal_Shdr *symtab_hdr;
+  unsigned i, sym_count, ext_start;
+  const struct elf_backend_data *bed;
+  struct elf_link_hash_entry **sym_hashes;
+  struct elf32_arm_link_hash_entry *cmse_hash;
+  bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
 
   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
 
+  out_attr = elf_known_obj_attributes_proc (info->output_bfd);
+  is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
+          && out_attr[Tag_CPU_arch_profile].i == 'M';
+
   /* Marking EH data may cause additional code sections to be marked,
      requiring multiple passes.  */
   again = TRUE;
   while (again)
     {
       again = FALSE;
-      for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
+      for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
        {
          asection *o;
 
@@ -12876,7 +14886,34 @@ elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
                    return FALSE;
                }
            }
+
+         /* Mark section holding ARMv8-M secure entry functions.  We mark all
+            of them so no need for a second browsing.  */
+         if (is_v8m && first_bfd_browse)
+           {
+             sym_hashes = elf_sym_hashes (sub);
+             bed = get_elf_backend_data (sub);
+             symtab_hdr = &elf_tdata (sub)->symtab_hdr;
+             sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
+             ext_start = symtab_hdr->sh_info;
+
+             /* Scan symbols.  */
+             for (i = ext_start; i < sym_count; i++)
+               {
+                 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
+
+                 /* Assume it is a special symbol.  If not, cmse_scan will
+                    warn about it and user can do something about it.  */
+                 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+                   {
+                     cmse_sec = cmse_hash->root.root.u.def.section;
+                     if (!_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
+                       return FALSE;
+                   }
+               }
+           }
        }
+      first_bfd_browse = FALSE;
     }
 
   return TRUE;
@@ -12897,8 +14934,8 @@ elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
 
 static bfd_boolean
 arm_elf_find_function (bfd *         abfd ATTRIBUTE_UNUSED,
-                      asection *    section,
                       asymbol **    symbols,
+                      asection *    section,
                       bfd_vma       offset,
                       const char ** filename_ptr,
                       const char ** functionname_ptr)
@@ -12959,31 +14996,33 @@ arm_elf_find_function (bfd *         abfd ATTRIBUTE_UNUSED,
 
 static bfd_boolean
 elf32_arm_find_nearest_line (bfd *          abfd,
-                            asection *     section,
                             asymbol **     symbols,
+                            asection *     section,
                             bfd_vma        offset,
                             const char **  filename_ptr,
                             const char **  functionname_ptr,
-                            unsigned int * line_ptr)
+                            unsigned int * line_ptr,
+                            unsigned int * discriminator_ptr)
 {
   bfd_boolean found = FALSE;
 
-  /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it.  */
-
-  if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
-                                    section, symbols, offset,
+  if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
                                     filename_ptr, functionname_ptr,
-                                    line_ptr, NULL, 0,
+                                    line_ptr, discriminator_ptr,
+                                    dwarf_debug_sections, 0,
                                     & elf_tdata (abfd)->dwarf2_find_line_info))
     {
       if (!*functionname_ptr)
-       arm_elf_find_function (abfd, section, symbols, offset,
+       arm_elf_find_function (abfd, symbols, section, offset,
                               *filename_ptr ? NULL : filename_ptr,
                               functionname_ptr);
 
       return TRUE;
     }
 
+  /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
+     uses DWARF1.  */
+
   if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
                                             & found, filename_ptr,
                                             functionname_ptr, line_ptr,
@@ -12996,7 +15035,7 @@ elf32_arm_find_nearest_line (bfd *          abfd,
   if (symbols == NULL)
     return FALSE;
 
-  if (! arm_elf_find_function (abfd, section, symbols, offset,
+  if (! arm_elf_find_function (abfd, symbols, section, offset,
                               filename_ptr, functionname_ptr))
     return FALSE;
 
@@ -13115,7 +15154,7 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
      be handled correctly by relocate_section.  Relocatable executables
      can reference data in shared objects directly, so we don't need to
      do anything here.  */
-  if (info->shared || globals->root.is_relocatable_executable)
+  if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
     return TRUE;
 
   /* We must allocate the symbol in our .dynbss section, which will
@@ -13130,11 +15169,13 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
   s = bfd_get_linker_section (dynobj, ".dynbss");
   BFD_ASSERT (s != NULL);
 
-  /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
-     copy the initial value out of the dynamic object and into the
-     runtime process image.  We need to remember the offset into the
+  /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
+     linker to copy the initial value out of the dynamic object and into
+     the runtime process image.  We need to remember the offset into the
      .rel(a).bss section we are going to use.  */
-  if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
+  if (info->nocopyreloc == 0
+      && (h->root.u.def.section->flags & SEC_ALLOC) != 0
+      && h->size != 0)
     {
       asection *srel;
 
@@ -13143,7 +15184,7 @@ elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
       h->needs_copy = 1;
     }
 
-  return _bfd_elf_adjust_dynamic_copy (h, s);
+  return _bfd_elf_adjust_dynamic_copy (info, h, s);
 }
 
 /* Allocate space in .plt, .got and associated reloc sections for
@@ -13197,7 +15238,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
            h->got.refcount = 0;
        }
 
-      if (info->shared
+      if (bfd_link_pic (info)
          || eh->is_iplt
          || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
        {
@@ -13208,7 +15249,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
             location in the .plt.  This is required to make function
             pointers compare as equal between the normal executable and
             the shared library.  */
-         if (! info->shared
+         if (! bfd_link_pic (info)
              && !h->def_regular)
            {
              h->root.u.def.section = htab->root.splt;
@@ -13217,15 +15258,13 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
              /* Make sure the function is not marked as Thumb, in case
                 it is the target of an ABS32 relocation, which will
                 point to the PLT entry.  */
-             h->target_internal = ST_BRANCH_TO_ARM;
+             ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
            }
 
-         htab->next_tls_desc_index++;
-
          /* VxWorks executables have a second set of relocations for
             each PLT entry.  They go in a separate relocation section,
             which is processed by the kernel loader.  */
-         if (htab->vxworks_p && !info->shared)
+         if (htab->vxworks_p && !bfd_link_pic (info))
            {
              /* There is a relocation for the initial PLT entry:
                 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
@@ -13312,13 +15351,15 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
          dyn = htab->root.dynamic_sections_created;
 
          indx = 0;
-         if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
-             && (!info->shared
+         if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
+                                              bfd_link_pic (info),
+                                              h)
+             && (!bfd_link_pic (info)
                  || !SYMBOL_REFERENCES_LOCAL (info, h)))
            indx = h->dynindx;
 
          if (tls_type != GOT_NORMAL
-             && (info->shared || indx != 0)
+             && (bfd_link_pic (info) || indx != 0)
              && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
                  || h->root.type != bfd_link_hash_undefweak))
            {
@@ -13352,8 +15393,9 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
               they all resolve dynamically instead.  Reserve room for the
               GOT entry's R_ARM_IRELATIVE relocation.  */
            elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
-         else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
-                                   || h->root.type != bfd_link_hash_undefweak))
+         else if (bfd_link_pic (info)
+                  && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
+                      || h->root.type != bfd_link_hash_undefweak))
            /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
            elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
        }
@@ -13364,7 +15406,7 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
   /* Allocate stubs for exported Thumb functions on v4t.  */
   if (!htab->use_blx && h->dynindx != -1
       && h->def_regular
-      && h->target_internal == ST_BRANCH_TO_THUMB
+      && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
     {
       struct elf_link_hash_entry * th;
@@ -13384,12 +15426,12 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
       myh = (struct elf_link_hash_entry *) bh;
       myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
       myh->forced_local = 1;
-      myh->target_internal = ST_BRANCH_TO_THUMB;
+      ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
       eh->export_glue = myh;
       th = record_arm_to_thumb_glue (info, h);
       /* Point the symbol at the stub.  */
       h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
-      h->target_internal = ST_BRANCH_TO_ARM;
+      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
       h->root.u.def.section = th->root.u.def.section;
       h->root.u.def.value = th->root.u.def.value & ~1;
     }
@@ -13403,14 +15445,14 @@ allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
      space for pc-relative relocs that have become local due to symbol
      visibility changes.  */
 
-  if (info->shared || htab->root.is_relocatable_executable)
+  if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
     {
-      /* The only relocs that use pc_count are R_ARM_REL32 and
-        R_ARM_REL32_NOI, which will appear on something like
-        ".long foo - .".  We want calls to protected symbols to resolve
-        directly to the function rather than going via the plt.  If people
-        want function pointer comparisons to work as expected then they
-        should avoid writing assembly like ".long foo - .".  */
+      /* Relocs that use pc_count are PC-relative forms, which will appear
+        on something like ".long foo - ." or "movw REG, foo - .".  We want
+        calls to protected symbols to resolve directly to the function
+        rather than going via the plt.  If people want function pointer
+        comparisons to work as expected then they should avoid writing
+        assembly like ".long foo - .".  */
       if (SYMBOL_CALLS_LOCAL (info, h))
        {
          struct elf_dyn_relocs **pp;
@@ -13579,7 +15621,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
   if (elf_hash_table (info)->dynamic_sections_created)
     {
       /* Set the contents of the .interp section to the interpreter.  */
-      if (info->executable)
+      if (bfd_link_executable (info) && !info->nointerp)
        {
          s = bfd_get_linker_section (dynobj, ".interp");
          BFD_ASSERT (s != NULL);
@@ -13590,7 +15632,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
 
   /* Set up .got offsets for local syms, and space for local dynamic
      relocs.  */
-  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
+  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
     {
       bfd_signed_vma *local_got;
       bfd_signed_vma *end_local_got;
@@ -13731,13 +15773,13 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
                  && (local_iplt == NULL
                      || local_iplt->arm.noncall_refcount == 0))
                elf32_arm_allocate_irelocs (info, srel, 1);
-             else if (info->shared || output_bfd->flags & DYNAMIC)
+             else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
                {
-                 if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
+                 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
                      || *local_tls_type & GOT_TLS_GD)
                    elf32_arm_allocate_dynrelocs (info, srel, 1);
 
-                 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
+                 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
                    {
                      elf32_arm_allocate_dynrelocs (info,
                                                    htab->root.srelplt, 1);
@@ -13756,7 +15798,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
         for R_ARM_TLS_LDM32 relocations.  */
       htab->tls_ldm_got.offset = htab->root.sgot->size;
       htab->root.sgot->size += 8;
-      if (info->shared)
+      if (bfd_link_pic (info))
        elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
     }
   else
@@ -13767,7 +15809,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
   elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
 
   /* Here we rummage through the found bfds to collect glue information.  */
-  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
+  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
     {
       if (! is_arm_elf (ibfd))
        continue;
@@ -13776,7 +15818,8 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
       bfd_elf32_arm_init_maps (ibfd);
 
       if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
-         || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
+         || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
+         || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
        /* xgettext:c-format */
        _bfd_error_handler (_("Errors encountered processing file %s"),
                            ibfd->filename);
@@ -13892,7 +15935,7 @@ elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
 #define add_dynamic_entry(TAG, VAL) \
   _bfd_elf_add_dynamic_entry (info, TAG, VAL)
 
-     if (info->executable)
+     if (bfd_link_executable (info))
        {
          if (!add_dynamic_entry (DT_DEBUG, 0))
            return FALSE;
@@ -13960,7 +16003,7 @@ elf32_arm_always_size_sections (bfd *output_bfd,
 {
   asection *tls_sec;
 
-  if (info->relocatable)
+  if (bfd_link_relocatable (info))
     return TRUE;
 
   tls_sec = elf_hash_table (info)->tls_sec;
@@ -14017,20 +16060,25 @@ elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
       if (!eh->is_iplt)
        {
          BFD_ASSERT (h->dynindx != -1);
-         elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
-                                       h->dynindx, 0);
+         if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
+                                             h->dynindx, 0))
+           return FALSE;
        }
 
       if (!h->def_regular)
        {
          /* Mark the symbol as undefined, rather than as defined in
-            the .plt section.  Leave the value alone.  */
+            the .plt section.  */
          sym->st_shndx = SHN_UNDEF;
-         /* If the symbol is weak, we do need to clear the value.
+         /* If the symbol is weak we need to clear the value.
             Otherwise, the PLT entry would provide a definition for
             the symbol even if the symbol wasn't defined anywhere,
-            and so the symbol would never be NULL.  */
-         if (!h->ref_regular_nonweak)
+            and so the symbol would never be NULL.  Leave the value if
+            there were any relocations where pointer equality matters
+            (this is a clue for the dynamic linker, to make function
+            pointer comparisons work between an application and shared
+            library).  */
+         if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
            sym->st_value = 0;
        }
       else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
@@ -14038,7 +16086,7 @@ elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
          /* At least one non-call relocation references this .iplt entry,
             so the .iplt entry is the function's canonical address.  */
          sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
-         sym->st_target_internal = ST_BRANCH_TO_ARM;
+         ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
          sym->st_shndx = (_bfd_elf_section_from_bfd_section
                           (output_bfd, htab->root.iplt->output_section));
          sym->st_value = (h->plt.offset
@@ -14201,27 +16249,26 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
              goto get_vma_if_bpabi;
 
            case DT_PLTGOT:
-             name = ".got";
+             name = htab->symbian_p ? ".got" : ".got.plt";
              goto get_vma;
            case DT_JMPREL:
              name = RELOC_SECTION (htab, ".plt");
            get_vma:
-             s = bfd_get_section_by_name (output_bfd, name);
+             s = bfd_get_linker_section (dynobj, name);
              if (s == NULL)
                {
-                 /* PR ld/14397: Issue an error message if a required section is missing.  */
                  (*_bfd_error_handler)
-                   (_("error: required section '%s' not found in the linker script"), name);
+                   (_("could not find section %s"), name);
                  bfd_set_error (bfd_error_invalid_operation);
                  return FALSE;
                }
              if (!htab->symbian_p)
-               dyn.d_un.d_ptr = s->vma;
+               dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
              else
                /* In the BPABI, tags in the PT_DYNAMIC section point
                   at the file offset, not the memory address, for the
                   convenience of the post linker.  */
-               dyn.d_un.d_ptr = s->filepos;
+               dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
              bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
              break;
 
@@ -14322,7 +16369,9 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
 
                  eh = elf_link_hash_lookup (elf_hash_table (info), name,
                                             FALSE, FALSE, TRUE);
-                 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
+                 if (eh != NULL
+                     && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
+                        == ST_BRANCH_TO_THUMB)
                    {
                      dyn.d_un.d_val |= 1;
                      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
@@ -14368,6 +16417,20 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
          else if (htab->nacl_p)
            arm_nacl_put_plt0 (htab, output_bfd, splt,
                               got_address + 8 - (plt_address + 16));
+         else if (using_thumb_only (htab))
+           {
+             got_displacement = got_address - (plt_address + 12);
+
+             plt0_entry = elf32_thumb2_plt0_entry;
+             put_arm_insn (htab, output_bfd, plt0_entry[0],
+                           splt->contents + 0);
+             put_arm_insn (htab, output_bfd, plt0_entry[1],
+                           splt->contents + 4);
+             put_arm_insn (htab, output_bfd, plt0_entry[2],
+                           splt->contents + 8);
+
+             bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
+           }
          else
            {
              got_displacement = got_address - (plt_address + 16);
@@ -14432,7 +16495,9 @@ elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info
 #endif
        }
 
-      if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
+      if (htab->vxworks_p
+         && !bfd_link_pic (info)
+         && htab->root.splt->size > 0)
        {
          /* Correct the .rel(a).plt.unloaded relocations.  They will have
             incorrect symbol indexes.  */
@@ -14490,13 +16555,14 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT
 {
   Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form.  */
   struct elf32_arm_link_hash_table *globals;
+  struct elf_segment_map *m;
 
   i_ehdrp = elf_elfheader (abfd);
 
   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
     i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
   else
-    i_ehdrp->e_ident[EI_OSABI] = 0;
+    _bfd_elf_post_process_headers (abfd, link_info);
   i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
 
   if (link_info)
@@ -14510,11 +16576,31 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT
       && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
     {
       int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
-      if (abi)
+      if (abi == AEABI_VFP_args_vfp)
        i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
       else
        i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
     }
+
+  /* Scan segment to set p_flags attribute if it contains only sections with
+     SHF_ARM_PURECODE flag.  */
+  for (m = elf_seg_map (abfd); m != NULL; m = m->next)
+    {
+      unsigned int j;
+
+      if (m->count == 0)
+       continue;
+      for (j = 0; j < m->count; j++)
+       {
+         if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
+           break;
+       }
+      if (j == m->count)
+       {
+         m->p_flags = PF_X;
+         m->p_flags_valid = 1;
+       }
+    }
 }
 
 static enum elf_reloc_type_class
@@ -14530,6 +16616,8 @@ elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
       return reloc_class_plt;
     case R_ARM_COPY:
       return reloc_class_copy;
+    case R_ARM_IRELATIVE:
+      return reloc_class_ifunc;
     default:
       return reloc_class_normal;
     }
@@ -14566,6 +16654,10 @@ elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
       hdr->sh_type = SHT_ARM_EXIDX;
       hdr->sh_flags |= SHF_LINK_ORDER;
     }
+
+  if (sec->flags & SEC_ELF_PURECODE)
+    hdr->sh_flags |= SHF_ARM_PURECODE;
+
   return TRUE;
 }
 
@@ -14706,6 +16798,11 @@ elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
        return FALSE;
     }
+  else if (using_thumb_only (htab))
+    {
+      if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
+       return FALSE;
+    }
   else
     {
       bfd_boolean thumb_stub_p;
@@ -14758,6 +16855,20 @@ elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
                                     &h->plt, &eh->plt);
 }
 
+/* Bind a veneered symbol to its veneer identified by its hash entry
+   STUB_ENTRY.  The veneered location thus loose its symbol.  */
+
+static void
+arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
+{
+  struct elf32_arm_link_hash_entry *hash = stub_entry->h;
+
+  BFD_ASSERT (hash);
+  hash->root.root.u.def.section = stub_entry->stub_sec;
+  hash->root.root.u.def.value = stub_entry->stub_offset;
+  hash->root.size = stub_entry->stub_size;
+}
+
 /* Output a single local symbol for a generated stub.  */
 
 static bfd_boolean
@@ -14804,24 +16915,30 @@ arm_map_one_stub (struct bfd_hash_entry * gen_entry,
     return TRUE;
 
   addr = (bfd_vma) stub_entry->stub_offset;
-  stub_name = stub_entry->output_name;
-
   template_sequence = stub_entry->stub_template;
-  switch (template_sequence[0].type)
+
+  if (arm_stub_sym_claimed (stub_entry->stub_type))
+    arm_stub_claim_sym (stub_entry);
+  else
     {
-    case ARM_TYPE:
-      if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
-       return FALSE;
-      break;
-    case THUMB16_TYPE:
-    case THUMB32_TYPE:
-      if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
-                                     stub_entry->stub_size))
-       return FALSE;
-      break;
-    default:
-      BFD_FAIL ();
-      return 0;
+      stub_name = stub_entry->output_name;
+      switch (template_sequence[0].type)
+       {
+       case ARM_TYPE:
+         if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
+                                         stub_entry->stub_size))
+           return FALSE;
+         break;
+       case THUMB16_TYPE:
+       case THUMB32_TYPE:
+         if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
+                                         stub_entry->stub_size))
+           return FALSE;
+         break;
+       default:
+         BFD_FAIL ();
+         return 0;
+       }
     }
 
   prev_type = DATA_TYPE;
@@ -14913,7 +17030,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
      mapping symbols.  */
   for (input_bfd = info->input_bfds;
        input_bfd != NULL;
-       input_bfd = input_bfd->link_next)
+       input_bfd = input_bfd->link.next)
     {
       if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
        for (osi.sec = input_bfd->sections;
@@ -14946,7 +17063,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
 
       osi.sec_shndx = _bfd_elf_section_from_bfd_section
          (output_bfd, osi.sec->output_section);
-      if (info->shared || htab->root.is_relocatable_executable
+      if (bfd_link_pic (info) || htab->root.is_relocatable_executable
          || htab->pic_veneer)
        size = ARM2THUMB_PIC_GLUE_SIZE;
       else if (htab->use_blx)
@@ -15024,7 +17141,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
       if (htab->vxworks_p)
        {
          /* VxWorks shared libraries have no PLT header.  */
-         if (!info->shared)
+         if (!bfd_link_pic (info))
            {
              if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
                return FALSE;
@@ -15037,6 +17154,15 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
          if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
            return FALSE;
        }
+      else if (using_thumb_only (htab))
+       {
+         if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
+           return FALSE;
+         if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
+           return FALSE;
+         if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
+           return FALSE;
+       }
       else if (!htab->symbian_p)
        {
          if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
@@ -15062,7 +17188,7 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
       elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
       for (input_bfd = info->input_bfds;
           input_bfd != NULL;
-          input_bfd = input_bfd->link_next)
+          input_bfd = input_bfd->link.next)
        {
          struct arm_local_iplt_info **local_iplt;
          unsigned int i, num_syms;
@@ -15105,6 +17231,95 @@ elf32_arm_output_arch_local_syms (bfd *output_bfd,
   return TRUE;
 }
 
+/* Filter normal symbols of CMSE entry functions of ABFD to include in
+   the import library.  All SYMCOUNT symbols of ABFD can be examined
+   from their pointers in SYMS.  Pointers of symbols to keep should be
+   stored continuously at the beginning of that array.
+
+   Returns the number of symbols to keep.  */
+
+static unsigned int
+elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
+                              struct bfd_link_info *info,
+                              asymbol **syms, long symcount)
+{
+  size_t maxnamelen;
+  char *cmse_name;
+  long src_count, dst_count = 0;
+  struct elf32_arm_link_hash_table *htab;
+
+  htab = elf32_arm_hash_table (info);
+  if (!htab->stub_bfd || !htab->stub_bfd->sections)
+    symcount = 0;
+
+  maxnamelen = 128;
+  cmse_name = (char *) bfd_malloc (maxnamelen);
+  for (src_count = 0; src_count < symcount; src_count++)
+    {
+      struct elf32_arm_link_hash_entry *cmse_hash;
+      asymbol *sym;
+      flagword flags;
+      char *name;
+      size_t namelen;
+
+      sym = syms[src_count];
+      flags = sym->flags;
+      name = (char *) bfd_asymbol_name (sym);
+
+      if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
+       continue;
+      if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
+       continue;
+
+      namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
+      if (namelen > maxnamelen)
+       {
+         cmse_name = (char *)
+           bfd_realloc (cmse_name, namelen);
+         maxnamelen = namelen;
+       }
+      snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
+      cmse_hash = (struct elf32_arm_link_hash_entry *)
+       elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
+
+      if (!cmse_hash
+         || (cmse_hash->root.root.type != bfd_link_hash_defined
+             && cmse_hash->root.root.type != bfd_link_hash_defweak)
+         || cmse_hash->root.type != STT_FUNC)
+       continue;
+
+      if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
+       continue;
+
+      syms[dst_count++] = sym;
+    }
+  free (cmse_name);
+
+  syms[dst_count] = NULL;
+
+  return dst_count;
+}
+
+/* Filter symbols of ABFD to include in the import library.  All
+   SYMCOUNT symbols of ABFD can be examined from their pointers in
+   SYMS.  Pointers of symbols to keep should be stored continuously at
+   the beginning of that array.
+
+   Returns the number of symbols to keep.  */
+
+static unsigned int
+elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
+                                struct bfd_link_info *info,
+                                asymbol **syms, long symcount)
+{
+  struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
+
+  if (globals->cmse_implib)
+    return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
+  else
+    return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
+}
+
 /* Allocate target specific section data.  */
 
 static bfd_boolean
@@ -15201,7 +17416,7 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
   bfd_vma veneered_insn_loc, veneer_entry_loc;
   bfd_signed_vma branch_offset;
   bfd *abfd;
-  unsigned int target;
+  unsigned int loc;
 
   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
   data = (struct a8_branch_to_stub_data *) in_arg;
@@ -15212,9 +17427,11 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
 
   contents = data->contents;
 
+  /* We use target_section as Cortex-A8 erratum workaround stubs are only
+     generated when both source and target are in the same section.  */
   veneered_insn_loc = stub_entry->target_section->output_section->vma
                      + stub_entry->target_section->output_offset
-                     + stub_entry->target_value;
+                     + stub_entry->source_value;
 
   veneer_entry_loc = stub_entry->stub_sec->output_section->vma
                     + stub_entry->stub_sec->output_offset
@@ -15223,74 +17440,840 @@ make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
   if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
     veneered_insn_loc &= ~3u;
 
-  branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
+  branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
+
+  abfd = stub_entry->target_section->owner;
+  loc = stub_entry->source_value;
+
+  /* We attempt to avoid this condition by setting stubs_always_after_branch
+     in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
+     This check is just to be on the safe side...  */
+  if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
+    {
+      (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
+                              "allocated in unsafe location"), abfd);
+      return FALSE;
+    }
+
+  switch (stub_entry->stub_type)
+    {
+    case arm_stub_a8_veneer_b:
+    case arm_stub_a8_veneer_b_cond:
+      branch_insn = 0xf0009000;
+      goto jump24;
+
+    case arm_stub_a8_veneer_blx:
+      branch_insn = 0xf000e800;
+      goto jump24;
+
+    case arm_stub_a8_veneer_bl:
+      {
+       unsigned int i1, j1, i2, j2, s;
+
+       branch_insn = 0xf000d000;
+
+      jump24:
+       if (branch_offset < -16777216 || branch_offset > 16777214)
+         {
+           /* There's not much we can do apart from complain if this
+              happens.  */
+           (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
+                                    "of range (input file too large)"), abfd);
+           return FALSE;
+         }
+
+       /* i1 = not(j1 eor s), so:
+          not i1 = j1 eor s
+          j1 = (not i1) eor s.  */
+
+       branch_insn |= (branch_offset >> 1) & 0x7ff;
+       branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
+       i2 = (branch_offset >> 22) & 1;
+       i1 = (branch_offset >> 23) & 1;
+       s = (branch_offset >> 24) & 1;
+       j1 = (!i1) ^ s;
+       j2 = (!i2) ^ s;
+       branch_insn |= j2 << 11;
+       branch_insn |= j1 << 13;
+       branch_insn |= s << 26;
+      }
+      break;
+
+    default:
+      BFD_FAIL ();
+      return FALSE;
+    }
+
+  bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
+  bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
+
+  return TRUE;
+}
+
+/* Beginning of stm32l4xx work-around.  */
+
+/* Functions encoding instructions necessary for the emission of the
+   fix-stm32l4xx-629360.
+   Encoding is extracted from the
+   ARM (C) Architecture Reference Manual
+   ARMv7-A and ARMv7-R edition
+   ARM DDI 0406C.b (ID072512).  */
+
+static inline bfd_vma
+create_instruction_branch_absolute (int branch_offset)
+{
+  /* A8.8.18 B (A8-334)
+     B target_address (Encoding T4).  */
+  /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
+  /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
+  /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
+
+  int s = ((branch_offset & 0x1000000) >> 24);
+  int j1 = s ^ !((branch_offset & 0x800000) >> 23);
+  int j2 = s ^ !((branch_offset & 0x400000) >> 22);
+
+  if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
+    BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
+
+  bfd_vma patched_inst = 0xf0009000
+    | s << 26 /* S.  */
+    | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
+    | j1 << 13 /* J1.  */
+    | j2 << 11 /* J2.  */
+    | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_ldmia (int base_reg, int wback, int reg_mask)
+{
+  /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
+     LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
+  bfd_vma patched_inst = 0xe8900000
+    | (/*W=*/wback << 21)
+    | (base_reg << 16)
+    | (reg_mask & 0x0000ffff);
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
+{
+  /* A8.8.60 LDMDB/LDMEA (A8-402)
+     LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
+  bfd_vma patched_inst = 0xe9100000
+    | (/*W=*/wback << 21)
+    | (base_reg << 16)
+    | (reg_mask & 0x0000ffff);
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_mov (int target_reg, int source_reg)
+{
+  /* A8.8.103 MOV (register) (A8-486)
+     MOV Rd, Rm (Encoding T1).  */
+  bfd_vma patched_inst = 0x4600
+    | (target_reg & 0x7)
+    | ((target_reg & 0x8) >> 3) << 7
+    | (source_reg << 3);
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_sub (int target_reg, int source_reg, int value)
+{
+  /* A8.8.221 SUB (immediate) (A8-708)
+     SUB Rd, Rn, #value (Encoding T3).  */
+  bfd_vma patched_inst = 0xf1a00000
+    | (target_reg << 8)
+    | (source_reg << 16)
+    | (/*S=*/0 << 20)
+    | ((value & 0x800) >> 11) << 26
+    | ((value & 0x700) >>  8) << 12
+    | (value & 0x0ff);
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
+                          int first_reg)
+{
+  /* A8.8.332 VLDM (A8-922)
+     VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
+  bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
+    | (/*W=*/wback << 21)
+    | (base_reg << 16)
+    | (num_words & 0x000000ff)
+    | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
+    | (first_reg & 0x00000001) << 22;
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
+                          int first_reg)
+{
+  /* A8.8.332 VLDM (A8-922)
+     VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
+  bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
+    | (base_reg << 16)
+    | (num_words & 0x000000ff)
+    | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
+    | (first_reg & 0x00000001) << 22;
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_udf_w (int value)
+{
+  /* A8.8.247 UDF (A8-758)
+     Undefined (Encoding T2).  */
+  bfd_vma patched_inst = 0xf7f0a000
+    | (value & 0x00000fff)
+    | (value & 0x000f0000) << 16;
+
+  return patched_inst;
+}
+
+static inline bfd_vma
+create_instruction_udf (int value)
+{
+  /* A8.8.247 UDF (A8-758)
+     Undefined (Encoding T1).  */
+  bfd_vma patched_inst = 0xde00
+    | (value & 0xff);
+
+  return patched_inst;
+}
+
+/* Functions writing an instruction in memory, returning the next
+   memory position to write to.  */
+
+static inline bfd_byte *
+push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
+                   bfd * output_bfd, bfd_byte *pt, insn32 insn)
+{
+  put_thumb2_insn (htab, output_bfd, insn, pt);
+  return pt + 4;
+}
+
+static inline bfd_byte *
+push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
+                   bfd * output_bfd, bfd_byte *pt, insn32 insn)
+{
+  put_thumb_insn (htab, output_bfd, insn, pt);
+  return pt + 2;
+}
+
+/* Function filling up a region in memory with T1 and T2 UDFs taking
+   care of alignment.  */
+
+static bfd_byte *
+stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
+                        bfd *                   output_bfd,
+                        const bfd_byte * const  base_stub_contents,
+                        bfd_byte * const        from_stub_contents,
+                        const bfd_byte * const  end_stub_contents)
+{
+  bfd_byte *current_stub_contents = from_stub_contents;
+
+  /* Fill the remaining of the stub with deterministic contents : UDF
+     instructions.
+     Check if realignment is needed on modulo 4 frontier using T1, to
+     further use T2.  */
+  if ((current_stub_contents < end_stub_contents)
+      && !((current_stub_contents - base_stub_contents) % 2)
+      && ((current_stub_contents - base_stub_contents) % 4))
+    current_stub_contents =
+      push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
+                         create_instruction_udf (0));
+
+  for (; current_stub_contents < end_stub_contents;)
+    current_stub_contents =
+      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                         create_instruction_udf_w (0));
+
+  return current_stub_contents;
+}
+
+/* Functions writing the stream of instructions equivalent to the
+   derived sequence for ldmia, ldmdb, vldm respectively.  */
+
+static void
+stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
+                                      bfd * output_bfd,
+                                      const insn32 initial_insn,
+                                      const bfd_byte *const initial_insn_addr,
+                                      bfd_byte *const base_stub_contents)
+{
+  int wback = (initial_insn & 0x00200000) >> 21;
+  int ri, rn = (initial_insn & 0x000F0000) >> 16;
+  int insn_all_registers = initial_insn & 0x0000ffff;
+  int insn_low_registers, insn_high_registers;
+  int usable_register_mask;
+  int nb_registers = popcount (insn_all_registers);
+  int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
+  int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
+  bfd_byte *current_stub_contents = base_stub_contents;
+
+  BFD_ASSERT (is_thumb2_ldmia (initial_insn));
+
+  /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
+     smaller than 8 registers load sequences that do not cause the
+     hardware issue.  */
+  if (nb_registers <= 8)
+    {
+      /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           initial_insn);
+
+      /* B initial_insn_addr+4.  */
+      if (!restore_pc)
+       current_stub_contents =
+         push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                             create_instruction_branch_absolute
+                             (initial_insn_addr - current_stub_contents));
+                              
+
+      /* Fill the remaining of the stub with deterministic contents.  */
+      current_stub_contents =
+       stm32l4xx_fill_stub_udf (htab, output_bfd,
+                                base_stub_contents, current_stub_contents,
+                                base_stub_contents +
+                                STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
+
+      return;
+    }
+
+  /* - reg_list[13] == 0.  */
+  BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
+
+  /* - reg_list[14] & reg_list[15] != 1.  */
+  BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
+
+  /* - if (wback==1) reg_list[rn] == 0.  */
+  BFD_ASSERT (!wback || !restore_rn);
+
+  /* - nb_registers > 8.  */
+  BFD_ASSERT (popcount (insn_all_registers) > 8);
+
+  /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
+
+  /* In the following algorithm, we split this wide LDM using 2 LDM insns:
+    - One with the 7 lowest registers (register mask 0x007F)
+      This LDM will finally contain between 2 and 7 registers
+    - One with the 7 highest registers (register mask 0xDF80)
+      This ldm will finally contain between 2 and 7 registers.  */
+  insn_low_registers = insn_all_registers & 0x007F;
+  insn_high_registers = insn_all_registers & 0xDF80;
+
+  /* A spare register may be needed during this veneer to temporarily
+     handle the base register.  This register will be restored with the
+     last LDM operation.
+     The usable register may be any general purpose register (that
+     excludes PC, SP, LR : register mask is 0x1FFF).  */
+  usable_register_mask = 0x1FFF;
+
+  /* Generate the stub function.  */
+  if (wback)
+    {
+      /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (rn, /*wback=*/1, insn_low_registers));
+
+      /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (rn, /*wback=*/1, insn_high_registers));
+      if (!restore_pc)
+       {
+         /* B initial_insn_addr+4.  */
+         current_stub_contents =
+           push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                               create_instruction_branch_absolute
+                               (initial_insn_addr - current_stub_contents));
+       }
+    }
+  else /* if (!wback).  */
+    {
+      ri = rn;
+
+      /* If Rn is not part of the high-register-list, move it there.  */
+      if (!(insn_high_registers & (1 << rn)))
+       {
+         /* Choose a Ri in the high-register-list that will be restored.  */
+         ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
+
+         /* MOV Ri, Rn.  */
+         current_stub_contents =
+           push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
+                               create_instruction_mov (ri, rn));
+       }
+
+      /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/1, insn_low_registers));
+
+      /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/0, insn_high_registers));
+
+      if (!restore_pc)
+       {
+         /* B initial_insn_addr+4.  */
+         current_stub_contents =
+           push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                               create_instruction_branch_absolute
+                               (initial_insn_addr - current_stub_contents));
+       }
+    }
+
+  /* Fill the remaining of the stub with deterministic contents.  */
+  current_stub_contents =
+    stm32l4xx_fill_stub_udf (htab, output_bfd,
+                            base_stub_contents, current_stub_contents,
+                            base_stub_contents +
+                            STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
+}
+
+static void
+stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
+                                      bfd * output_bfd,
+                                      const insn32 initial_insn,
+                                      const bfd_byte *const initial_insn_addr,
+                                      bfd_byte *const base_stub_contents)
+{
+  int wback = (initial_insn & 0x00200000) >> 21;
+  int ri, rn = (initial_insn & 0x000f0000) >> 16;
+  int insn_all_registers = initial_insn & 0x0000ffff;
+  int insn_low_registers, insn_high_registers;
+  int usable_register_mask;
+  int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
+  int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
+  int nb_registers = popcount (insn_all_registers);
+  bfd_byte *current_stub_contents = base_stub_contents;
+
+  BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
+
+  /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
+     smaller than 8 registers load sequences that do not cause the
+     hardware issue.  */
+  if (nb_registers <= 8)
+    {
+      /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           initial_insn);
+
+      /* B initial_insn_addr+4.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_branch_absolute
+                           (initial_insn_addr - current_stub_contents));
+
+      /* Fill the remaining of the stub with deterministic contents.  */
+      current_stub_contents =
+       stm32l4xx_fill_stub_udf (htab, output_bfd,
+                                base_stub_contents, current_stub_contents,
+                                base_stub_contents +
+                                STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
+
+      return;
+    }
+
+  /* - reg_list[13] == 0.  */
+  BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
+
+  /* - reg_list[14] & reg_list[15] != 1.  */
+  BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
+
+  /* - if (wback==1) reg_list[rn] == 0.  */
+  BFD_ASSERT (!wback || !restore_rn);
+
+  /* - nb_registers > 8.  */
+  BFD_ASSERT (popcount (insn_all_registers) > 8);
+
+  /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
+
+  /* In the following algorithm, we split this wide LDM using 2 LDM insn:
+    - One with the 7 lowest registers (register mask 0x007F)
+      This LDM will finally contain between 2 and 7 registers
+    - One with the 7 highest registers (register mask 0xDF80)
+      This ldm will finally contain between 2 and 7 registers.  */
+  insn_low_registers = insn_all_registers & 0x007F;
+  insn_high_registers = insn_all_registers & 0xDF80;
+
+  /* A spare register may be needed during this veneer to temporarily
+     handle the base register.  This register will be restored with
+     the last LDM operation.
+     The usable register may be any general purpose register (that excludes
+     PC, SP, LR : register mask is 0x1FFF).  */
+  usable_register_mask = 0x1FFF;
+
+  /* Generate the stub function.  */
+  if (!wback && !restore_pc && !restore_rn)
+    {
+      /* Choose a Ri in the low-register-list that will be restored.  */
+      ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
+
+      /* MOV Ri, Rn.  */
+      current_stub_contents =
+       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
+                           create_instruction_mov (ri, rn));
+
+      /* LDMDB Ri!, {R-high-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmdb
+                           (ri, /*wback=*/1, insn_high_registers));
+
+      /* LDMDB Ri, {R-low-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmdb
+                           (ri, /*wback=*/0, insn_low_registers));
+
+      /* B initial_insn_addr+4.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_branch_absolute
+                           (initial_insn_addr - current_stub_contents));
+    }
+  else if (wback && !restore_pc && !restore_rn)
+    {
+      /* LDMDB Rn!, {R-high-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmdb
+                           (rn, /*wback=*/1, insn_high_registers));
+
+      /* LDMDB Rn!, {R-low-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmdb
+                           (rn, /*wback=*/1, insn_low_registers));
+
+      /* B initial_insn_addr+4.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_branch_absolute
+                           (initial_insn_addr - current_stub_contents));
+    }
+  else if (!wback && restore_pc && !restore_rn)
+    {
+      /* Choose a Ri in the high-register-list that will be restored.  */
+      ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
+
+      /* SUB Ri, Rn, #(4*nb_registers).  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_sub (ri, rn, (4 * nb_registers)));
+
+      /* LDMIA Ri!, {R-low-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/1, insn_low_registers));
+
+      /* LDMIA Ri, {R-high-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/0, insn_high_registers));
+    }
+  else if (wback && restore_pc && !restore_rn)
+    {
+      /* Choose a Ri in the high-register-list that will be restored.  */
+      ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
+
+      /* SUB Rn, Rn, #(4*nb_registers)  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_sub (rn, rn, (4 * nb_registers)));
+
+      /* MOV Ri, Rn.  */
+      current_stub_contents =
+       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
+                           create_instruction_mov (ri, rn));
+
+      /* LDMIA Ri!, {R-low-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/1, insn_low_registers));
+
+      /* LDMIA Ri, {R-high-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/0, insn_high_registers));
+    }
+  else if (!wback && !restore_pc && restore_rn)
+    {
+      ri = rn;
+      if (!(insn_low_registers & (1 << rn)))
+       {
+         /* Choose a Ri in the low-register-list that will be restored.  */
+         ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
+
+         /* MOV Ri, Rn.  */
+         current_stub_contents =
+           push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
+                               create_instruction_mov (ri, rn));
+       }
+
+      /* LDMDB Ri!, {R-high-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmdb
+                           (ri, /*wback=*/1, insn_high_registers));
+
+      /* LDMDB Ri, {R-low-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmdb
+                           (ri, /*wback=*/0, insn_low_registers));
+
+      /* B initial_insn_addr+4.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_branch_absolute
+                           (initial_insn_addr - current_stub_contents));
+    }
+  else if (!wback && restore_pc && restore_rn)
+    {
+      ri = rn;
+      if (!(insn_high_registers & (1 << rn)))
+       {
+         /* Choose a Ri in the high-register-list that will be restored.  */
+         ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
+       }
+
+      /* SUB Ri, Rn, #(4*nb_registers).  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_sub (ri, rn, (4 * nb_registers)));
+
+      /* LDMIA Ri!, {R-low-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/1, insn_low_registers));
+
+      /* LDMIA Ri, {R-high-register-list}.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_ldmia
+                           (ri, /*wback=*/0, insn_high_registers));
+    }
+  else if (wback && restore_rn)
+    {
+      /* The assembler should not have accepted to encode this.  */
+      BFD_ASSERT (0 && "Cannot patch an instruction that has an "
+       "undefined behavior.\n");
+    }
+
+  /* Fill the remaining of the stub with deterministic contents.  */
+  current_stub_contents =
+    stm32l4xx_fill_stub_udf (htab, output_bfd,
+                            base_stub_contents, current_stub_contents,
+                            base_stub_contents +
+                            STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
+
+}
+
+static void
+stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
+                                     bfd * output_bfd,
+                                     const insn32 initial_insn,
+                                     const bfd_byte *const initial_insn_addr,
+                                     bfd_byte *const base_stub_contents)
+{
+  int num_words = ((unsigned int) initial_insn << 24) >> 24;
+  bfd_byte *current_stub_contents = base_stub_contents;
+
+  BFD_ASSERT (is_thumb2_vldm (initial_insn));
+
+  /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
+     smaller than 8 words load sequences that do not cause the
+     hardware issue.  */
+  if (num_words <= 8)
+    {
+      /* Untouched instruction.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           initial_insn);
+
+      /* B initial_insn_addr+4.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_branch_absolute
+                           (initial_insn_addr - current_stub_contents));
+    }
+  else
+    {
+      bfd_boolean is_dp = /* DP encoding. */
+       (initial_insn & 0xfe100f00) == 0xec100b00;
+      bfd_boolean is_ia_nobang = /* (IA without !).  */
+       (((initial_insn << 7) >> 28) & 0xd) == 0x4;
+      bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP.  */
+       (((initial_insn << 7) >> 28) & 0xd) == 0x5;
+      bfd_boolean is_db_bang = /* (DB with !).  */
+       (((initial_insn << 7) >> 28) & 0xd) == 0x9;
+      int base_reg = ((unsigned int) initial_insn << 12) >> 28;
+      /* d = UInt (Vd:D);.  */
+      int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
+       | (((unsigned int)initial_insn << 9) >> 31);
+
+      /* Compute the number of 8-words chunks needed to split.  */
+      int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
+      int chunk;
+
+      /* The test coverage has been done assuming the following
+        hypothesis that exactly one of the previous is_ predicates is
+        true.  */
+      BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
+                 && !(is_ia_nobang & is_ia_bang & is_db_bang));
+
+      /* We treat the cutting of the words in one pass for all
+        cases, then we emit the adjustments:
+
+        vldm rx, {...}
+        -> vldm rx!, {8_words_or_less} for each needed 8_word
+        -> sub rx, rx, #size (list)
+
+        vldm rx!, {...}
+        -> vldm rx!, {8_words_or_less} for each needed 8_word
+        This also handles vpop instruction (when rx is sp)
+
+        vldmd rx!, {...}
+        -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
+      for (chunk = 0; chunk < chunks; ++chunk)
+       {
+         bfd_vma new_insn = 0;
+
+         if (is_ia_nobang || is_ia_bang)
+           {
+             new_insn = create_instruction_vldmia
+               (base_reg,
+                is_dp,
+                /*wback= .  */1,
+                chunks - (chunk + 1) ?
+                8 : num_words - chunk * 8,
+                first_reg + chunk * 8);
+           }
+         else if (is_db_bang)
+           {
+             new_insn = create_instruction_vldmdb
+               (base_reg,
+                is_dp,
+                chunks - (chunk + 1) ?
+                8 : num_words - chunk * 8,
+                first_reg + chunk * 8);
+           }
+
+         if (new_insn)
+           current_stub_contents =
+             push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                                 new_insn);
+       }
 
-  abfd = stub_entry->target_section->owner;
-  target = stub_entry->target_value;
+      /* Only this case requires the base register compensation
+        subtract.  */
+      if (is_ia_nobang)
+       {
+         current_stub_contents =
+           push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                               create_instruction_sub
+                               (base_reg, base_reg, 4*num_words));
+       }
 
-  /* We attempt to avoid this condition by setting stubs_always_after_branch
-     in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
-     This check is just to be on the safe side...  */
-  if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
-    {
-      (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
-                              "allocated in unsafe location"), abfd);
-      return FALSE;
+      /* B initial_insn_addr+4.  */
+      current_stub_contents =
+       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
+                           create_instruction_branch_absolute
+                           (initial_insn_addr - current_stub_contents));
     }
 
-  switch (stub_entry->stub_type)
-    {
-    case arm_stub_a8_veneer_b:
-    case arm_stub_a8_veneer_b_cond:
-      branch_insn = 0xf0009000;
-      goto jump24;
-
-    case arm_stub_a8_veneer_blx:
-      branch_insn = 0xf000e800;
-      goto jump24;
+  /* Fill the remaining of the stub with deterministic contents.  */
+  current_stub_contents =
+    stm32l4xx_fill_stub_udf (htab, output_bfd,
+                            base_stub_contents, current_stub_contents,
+                            base_stub_contents +
+                            STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
+}
 
-    case arm_stub_a8_veneer_bl:
-      {
-       unsigned int i1, j1, i2, j2, s;
+static void
+stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
+                                bfd * output_bfd,
+                                const insn32 wrong_insn,
+                                const bfd_byte *const wrong_insn_addr,
+                                bfd_byte *const stub_contents)
+{
+  if (is_thumb2_ldmia (wrong_insn))
+    stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
+                                          wrong_insn, wrong_insn_addr,
+                                          stub_contents);
+  else if (is_thumb2_ldmdb (wrong_insn))
+    stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
+                                          wrong_insn, wrong_insn_addr,
+                                          stub_contents);
+  else if (is_thumb2_vldm (wrong_insn))
+    stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
+                                         wrong_insn, wrong_insn_addr,
+                                         stub_contents);
+}
 
-       branch_insn = 0xf000d000;
+/* End of stm32l4xx work-around.  */
 
-      jump24:
-       if (branch_offset < -16777216 || branch_offset > 16777214)
-         {
-           /* There's not much we can do apart from complain if this
-              happens.  */
-           (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
-                                    "of range (input file too large)"), abfd);
-           return FALSE;
-         }
 
-       /* i1 = not(j1 eor s), so:
-          not i1 = j1 eor s
-          j1 = (not i1) eor s.  */
+static void
+elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
+                         asection *output_sec, Elf_Internal_Rela *rel)
+{
+  BFD_ASSERT (output_sec && rel);
+  struct bfd_elf_section_reloc_data *output_reldata;
+  struct elf32_arm_link_hash_table *htab;
+  struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
+  Elf_Internal_Shdr *rel_hdr;
 
-       branch_insn |= (branch_offset >> 1) & 0x7ff;
-       branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
-       i2 = (branch_offset >> 22) & 1;
-       i1 = (branch_offset >> 23) & 1;
-       s = (branch_offset >> 24) & 1;
-       j1 = (!i1) ^ s;
-       j2 = (!i2) ^ s;
-       branch_insn |= j2 << 11;
-       branch_insn |= j1 << 13;
-       branch_insn |= s << 26;
-      }
-      break;
 
-    default:
-      BFD_FAIL ();
-      return FALSE;
+  if (oesd->rel.hdr)
+    {
+      rel_hdr = oesd->rel.hdr;
+      output_reldata = &(oesd->rel);
+    }
+  else if (oesd->rela.hdr)
+    {
+      rel_hdr = oesd->rela.hdr;
+      output_reldata = &(oesd->rela);
+    }
+  else
+    {
+      abort ();
     }
 
-  bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
-  bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
-
-  return TRUE;
+  bfd_byte *erel = rel_hdr->contents;
+  erel += output_reldata->count * rel_hdr->sh_entsize;
+  htab = elf32_arm_hash_table (info);
+  SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
+  output_reldata->count++;
 }
 
 /* Do code byteswapping.  Return FALSE afterwards so that the section is
@@ -15307,6 +18290,7 @@ elf32_arm_write_section (bfd *output_bfd,
   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
   elf32_arm_section_map *map;
   elf32_vfp11_erratum_list *errnode;
+  elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
   bfd_vma ptr;
   bfd_vma end;
   bfd_vma offset = sec->output_section->vma + sec->output_offset;
@@ -15401,6 +18385,89 @@ elf32_arm_write_section (bfd *output_bfd,
        }
     }
 
+  if (arm_data->stm32l4xx_erratumcount != 0)
+    {
+      for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
+          stm32l4xx_errnode != 0;
+          stm32l4xx_errnode = stm32l4xx_errnode->next)
+       {
+         bfd_vma target = stm32l4xx_errnode->vma - offset;
+
+         switch (stm32l4xx_errnode->type)
+           {
+           case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
+             {
+               unsigned int insn;
+               bfd_vma branch_to_veneer =
+                 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
+
+               if ((signed) branch_to_veneer < -(1 << 24)
+                   || (signed) branch_to_veneer >= (1 << 24))
+                 {
+                   bfd_vma out_of_range =
+                     ((signed) branch_to_veneer < -(1 << 24)) ?
+                     - branch_to_veneer - (1 << 24) :
+                     ((signed) branch_to_veneer >= (1 << 24)) ?
+                     branch_to_veneer - (1 << 24) : 0;
+
+                   (*_bfd_error_handler)
+                     (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
+                        "Jump out of range by %ld bytes. "
+                        "Cannot encode branch instruction. "),
+                      output_bfd,
+                      (long) (stm32l4xx_errnode->vma - 4),
+                      out_of_range);
+                   continue;
+                 }
+
+               insn = create_instruction_branch_absolute
+                 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
+
+               /* The instruction is before the label.  */
+               target -= 4;
+
+               put_thumb2_insn (globals, output_bfd,
+                                (bfd_vma) insn, contents + target);
+             }
+             break;
+
+           case STM32L4XX_ERRATUM_VENEER:
+             {
+               bfd_byte * veneer;
+               bfd_byte * veneer_r;
+               unsigned int insn;
+
+               veneer = contents + target;
+               veneer_r = veneer
+                 + stm32l4xx_errnode->u.b.veneer->vma
+                 - stm32l4xx_errnode->vma - 4;
+
+               if ((signed) (veneer_r - veneer -
+                             STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
+                             STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
+                             STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
+                             STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
+                   || (signed) (veneer_r - veneer) >= (1 << 24))
+                 {
+                   (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
+                                            "veneer."), output_bfd);
+                    continue;
+                 }
+
+               /* Original instruction.  */
+               insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
+
+               stm32l4xx_create_replacing_stub
+                 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
+             }
+             break;
+
+           default:
+             abort ();
+           }
+       }
+    }
+
   if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
     {
       arm_unwind_table_edit *edit_node
@@ -15453,6 +18520,26 @@ elf32_arm_write_section (bfd *output_bfd,
                           usual BFD method.  */
                        prel31_offset = (text_offset - exidx_offset)
                                        & 0x7ffffffful;
+                       if (bfd_link_relocatable (link_info))
+                         {
+                           /* Here relocation for new EXIDX_CANTUNWIND is
+                              created, so there is no need to
+                              adjust offset by hand.  */
+                           prel31_offset = text_sec->output_offset
+                                           + text_sec->size;
+
+                           /* New relocation entity.  */
+                           asection *text_out = text_sec->output_section;
+                           Elf_Internal_Rela rel;
+                           rel.r_addend = 0;
+                           rel.r_offset = exidx_offset;
+                           rel.r_info = ELF32_R_INFO (text_out->target_index,
+                                                      R_ARM_PREL31);
+
+                           elf32_arm_add_relocation (output_bfd, link_info,
+                                                     sec->output_section,
+                                                     &rel);
+                         }
 
                        /* First address we can't unwind.  */
                        bfd_put_32 (output_bfd, prel31_offset,
@@ -15497,8 +18584,8 @@ elf32_arm_write_section (bfd *output_bfd,
       data.writing_section = sec;
       data.contents = contents;
 
-      bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
-                        &data);
+      bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
+                        & data);
     }
 
   if (mapcount == 0)
@@ -15567,8 +18654,12 @@ elf32_arm_swap_symbol_in (bfd * abfd,
                          const void *pshn,
                          Elf_Internal_Sym *dst)
 {
+  Elf_Internal_Shdr *symtab_hdr;
+  const char *name = NULL;
+
   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
     return FALSE;
+  dst->st_target_internal = 0;
 
   /* New EABI objects mark thumb function symbols by setting the low bit of
      the address.  */
@@ -15578,20 +18669,28 @@ elf32_arm_swap_symbol_in (bfd * abfd,
       if (dst->st_value & 1)
        {
          dst->st_value &= ~(bfd_vma) 1;
-         dst->st_target_internal = ST_BRANCH_TO_THUMB;
+         ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
+                                  ST_BRANCH_TO_THUMB);
        }
       else
-       dst->st_target_internal = ST_BRANCH_TO_ARM;
+       ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
     }
   else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
     {
       dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
-      dst->st_target_internal = ST_BRANCH_TO_THUMB;
+      ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
     }
   else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
-    dst->st_target_internal = ST_BRANCH_LONG;
+    ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
   else
-    dst->st_target_internal = ST_BRANCH_UNKNOWN;
+    ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
+
+  /* Mark CMSE special symbols.  */
+  symtab_hdr = & elf_symtab_hdr (abfd);
+  if (symtab_hdr->sh_size)
+    name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
+  if (name && CONST_STRNEQ (name, CMSE_PREFIX))
+    ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
 
   return TRUE;
 }
@@ -15611,7 +18710,7 @@ elf32_arm_swap_symbol_out (bfd *abfd,
      of the address set, as per the new EABI.  We do this unconditionally
      because objcopy does not set the elf header flags until after
      it writes out the symbol table.  */
-  if (src->st_target_internal == ST_BRANCH_TO_THUMB)
+  if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
     {
       newsym = *src;
       if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
@@ -15693,10 +18792,13 @@ elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
                           Elf_Internal_Sym *sym, const char **namep,
                           flagword *flagsp, asection **secp, bfd_vma *valp)
 {
-  if ((abfd->flags & DYNAMIC) == 0
-      && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
-         || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
-    elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
+  if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
+      && (abfd->flags & DYNAMIC) == 0
+      && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
+    elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
+
+  if (elf32_arm_hash_table (info) == NULL)
+    return FALSE;
 
   if (elf32_arm_hash_table (info)->vxworks_p
       && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
@@ -15737,13 +18839,368 @@ const struct elf_size_info elf32_arm_size_info =
   bfd_elf32_swap_reloca_out
 };
 
+static bfd_vma
+read_code32 (const bfd *abfd, const bfd_byte *addr)
+{
+  /* V7 BE8 code is always little endian.  */
+  if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
+    return bfd_getl32 (addr);
+
+  return bfd_get_32 (abfd, addr);
+}
+
+static bfd_vma
+read_code16 (const bfd *abfd, const bfd_byte *addr)
+{
+  /* V7 BE8 code is always little endian.  */
+  if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
+    return bfd_getl16 (addr);
+
+  return bfd_get_16 (abfd, addr);
+}
+
+/* Return size of plt0 entry starting at ADDR
+   or (bfd_vma) -1 if size can not be determined.  */
+
+static bfd_vma
+elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
+{
+  bfd_vma first_word;
+  bfd_vma plt0_size;
+
+  first_word = read_code32 (abfd, addr);
+
+  if (first_word == elf32_arm_plt0_entry[0])
+    plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
+  else if (first_word == elf32_thumb2_plt0_entry[0])
+    plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
+  else
+    /* We don't yet handle this PLT format.  */
+    return (bfd_vma) -1;
+
+  return plt0_size;
+}
+
+/* Return size of plt entry starting at offset OFFSET
+   of plt section located at address START
+   or (bfd_vma) -1 if size can not be determined.  */
+
+static bfd_vma
+elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
+{
+  bfd_vma first_insn;
+  bfd_vma plt_size = 0;
+  const bfd_byte *addr = start + offset;
+
+  /* PLT entry size if fixed on Thumb-only platforms.  */
+  if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
+      return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
+
+  /* Respect Thumb stub if necessary.  */
+  if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
+    {
+      plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
+    }
+
+  /* Strip immediate from first add.  */
+  first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
+
+#ifdef FOUR_WORD_PLT
+  if (first_insn == elf32_arm_plt_entry[0])
+    plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
+#else
+  if (first_insn == elf32_arm_plt_entry_long[0])
+    plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
+  else if (first_insn == elf32_arm_plt_entry_short[0])
+    plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
+#endif
+  else
+    /* We don't yet handle this PLT format.  */
+    return (bfd_vma) -1;
+
+  return plt_size;
+}
+
+/* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
+
+static long
+elf32_arm_get_synthetic_symtab (bfd *abfd,
+                              long symcount ATTRIBUTE_UNUSED,
+                              asymbol **syms ATTRIBUTE_UNUSED,
+                              long dynsymcount,
+                              asymbol **dynsyms,
+                              asymbol **ret)
+{
+  asection *relplt;
+  asymbol *s;
+  arelent *p;
+  long count, i, n;
+  size_t size;
+  Elf_Internal_Shdr *hdr;
+  char *names;
+  asection *plt;
+  bfd_vma offset;
+  bfd_byte *data;
+
+  *ret = NULL;
+
+  if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
+    return 0;
+
+  if (dynsymcount <= 0)
+    return 0;
+
+  relplt = bfd_get_section_by_name (abfd, ".rel.plt");
+  if (relplt == NULL)
+    return 0;
+
+  hdr = &elf_section_data (relplt)->this_hdr;
+  if (hdr->sh_link != elf_dynsymtab (abfd)
+      || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
+    return 0;
+
+  plt = bfd_get_section_by_name (abfd, ".plt");
+  if (plt == NULL)
+    return 0;
+
+  if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
+    return -1;
+
+  data = plt->contents;
+  if (data == NULL)
+    {
+      if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
+       return -1;
+      bfd_cache_section_contents((asection *) plt, data);
+    }
+
+  count = relplt->size / hdr->sh_entsize;
+  size = count * sizeof (asymbol);
+  p = relplt->relocation;
+  for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
+    {
+      size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
+      if (p->addend != 0)
+       size += sizeof ("+0x") - 1 + 8;
+    }
+
+  s = *ret = (asymbol *) bfd_malloc (size);
+  if (s == NULL)
+    return -1;
+
+  offset = elf32_arm_plt0_size (abfd, data);
+  if (offset == (bfd_vma) -1)
+    return -1;
+
+  names = (char *) (s + count);
+  p = relplt->relocation;
+  n = 0;
+  for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
+    {
+      size_t len;
+
+      bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
+      if (plt_size == (bfd_vma) -1)
+       break;
+
+      *s = **p->sym_ptr_ptr;
+      /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
+        we are defining a symbol, ensure one of them is set.  */
+      if ((s->flags & BSF_LOCAL) == 0)
+       s->flags |= BSF_GLOBAL;
+      s->flags |= BSF_SYNTHETIC;
+      s->section = plt;
+      s->value = offset;
+      s->name = names;
+      s->udata.p = NULL;
+      len = strlen ((*p->sym_ptr_ptr)->name);
+      memcpy (names, (*p->sym_ptr_ptr)->name, len);
+      names += len;
+      if (p->addend != 0)
+       {
+         char buf[30], *a;
+
+         memcpy (names, "+0x", sizeof ("+0x") - 1);
+         names += sizeof ("+0x") - 1;
+         bfd_sprintf_vma (abfd, buf, p->addend);
+         for (a = buf; *a == '0'; ++a)
+           ;
+         len = strlen (a);
+         memcpy (names, a, len);
+         names += len;
+       }
+      memcpy (names, "@plt", sizeof ("@plt"));
+      names += sizeof ("@plt");
+      ++s, ++n;
+      offset += plt_size;
+    }
+
+  return n;
+}
+
+static bfd_boolean
+elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
+{
+  if (hdr->sh_flags & SHF_ARM_PURECODE)
+    *flags |= SEC_ELF_PURECODE;
+  return TRUE;
+}
+
+static flagword
+elf32_arm_lookup_section_flags (char *flag_name)
+{
+  if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
+    return SHF_ARM_PURECODE;
+
+  return SEC_NO_FLAGS;
+}
+
+static unsigned int
+elf32_arm_count_additional_relocs (asection *sec)
+{
+  struct _arm_elf_section_data *arm_data;
+  arm_data = get_arm_elf_section_data (sec);
+  return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
+}
+
+/* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
+   has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised 
+   FALSE otherwise.  ISECTION is the best guess matching section from the
+   input bfd IBFD, but it might be NULL.  */
+
+static bfd_boolean
+elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
+                                      bfd *obfd ATTRIBUTE_UNUSED,
+                                      const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
+                                      Elf_Internal_Shdr *osection)
+{
+  switch (osection->sh_type)
+    {
+    case SHT_ARM_EXIDX:
+      {
+       Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
+       Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
+       unsigned i = 0;
+
+       osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
+       osection->sh_info = 0;
+
+       /* The sh_link field must be set to the text section associated with
+          this index section.  Unfortunately the ARM EHABI does not specify
+          exactly how to determine this association.  Our caller does try
+          to match up OSECTION with its corresponding input section however
+          so that is a good first guess.  */
+       if (isection != NULL
+           && osection->bfd_section != NULL
+           && isection->bfd_section != NULL
+           && isection->bfd_section->output_section != NULL
+           && isection->bfd_section->output_section == osection->bfd_section
+           && iheaders != NULL
+           && isection->sh_link > 0
+           && isection->sh_link < elf_numsections (ibfd)
+           && iheaders[isection->sh_link]->bfd_section != NULL
+           && iheaders[isection->sh_link]->bfd_section->output_section != NULL
+           )
+         {
+           for (i = elf_numsections (obfd); i-- > 0;)
+             if (oheaders[i]->bfd_section
+                 == iheaders[isection->sh_link]->bfd_section->output_section)
+               break;
+         }
+           
+       if (i == 0)
+         {
+           /* Failing that we have to find a matching section ourselves.  If
+              we had the output section name available we could compare that
+              with input section names.  Unfortunately we don't.  So instead
+              we use a simple heuristic and look for the nearest executable
+              section before this one.  */
+           for (i = elf_numsections (obfd); i-- > 0;)
+             if (oheaders[i] == osection)
+               break;
+           if (i == 0)
+             break;
+
+           while (i-- > 0)
+             if (oheaders[i]->sh_type == SHT_PROGBITS
+                 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
+                 == (SHF_ALLOC | SHF_EXECINSTR))
+               break;
+         }
+
+       if (i)
+         {
+           osection->sh_link = i;
+           /* If the text section was part of a group
+              then the index section should be too.  */
+           if (oheaders[i]->sh_flags & SHF_GROUP)
+             osection->sh_flags |= SHF_GROUP;
+           return TRUE;
+         }
+      }
+      break;
+
+    case SHT_ARM_PREEMPTMAP:
+      osection->sh_flags = SHF_ALLOC;
+      break;
+
+    case SHT_ARM_ATTRIBUTES:
+    case SHT_ARM_DEBUGOVERLAY:
+    case SHT_ARM_OVERLAYSECTION:
+    default:
+      break;
+    }
+
+  return FALSE;
+}
+
+/* Returns TRUE if NAME is an ARM mapping symbol.
+   Traditionally the symbols $a, $d and $t have been used.
+   The ARM ELF standard also defines $x (for A64 code).  It also allows a
+   period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
+   Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
+   not support them here.  $t.x indicates the start of ThumbEE instructions.  */
+
+static bfd_boolean
+is_arm_mapping_symbol (const char * name)
+{
+  return name != NULL /* Paranoia.  */
+    && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
+                        the mapping symbols could have acquired a prefix.
+                        We do not support this here, since such symbols no
+                        longer conform to the ARM ELF ABI.  */
+    && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
+    && (name[2] == 0 || name[2] == '.');
+  /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
+     any characters that follow the period are legal characters for the body
+     of a symbol's name.  For now we just assume that this is the case.  */
+}
+
+/* Make sure that mapping symbols in object files are not removed via the
+   "strip --strip-unneeded" tool.  These symbols are needed in order to
+   correctly generate interworking veneers, and for byte swapping code
+   regions.  Once an object file has been linked, it is safe to remove the
+   symbols as they will no longer be needed.  */
+
+static void
+elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
+{
+  if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
+      && sym->section != bfd_abs_section_ptr
+      && is_arm_mapping_symbol (sym->name))
+    sym->flags |= BSF_KEEP;
+}
+
+#undef  elf_backend_copy_special_section_fields
+#define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
+
 #define ELF_ARCH                       bfd_arch_arm
 #define ELF_TARGET_ID                  ARM_ELF_DATA
 #define ELF_MACHINE_CODE               EM_ARM
 #ifdef __QNXTARGET__
 #define ELF_MAXPAGESIZE                        0x1000
 #else
-#define ELF_MAXPAGESIZE                        0x8000
+#define ELF_MAXPAGESIZE                        0x10000
 #endif
 #define ELF_MINPAGESIZE                        0x1000
 #define ELF_COMMONPAGESIZE             0x1000
@@ -15755,7 +19212,6 @@ const struct elf_size_info elf32_arm_size_info =
 #define bfd_elf32_bfd_set_private_flags                elf32_arm_set_private_flags
 #define bfd_elf32_bfd_print_private_bfd_data   elf32_arm_print_private_bfd_data
 #define bfd_elf32_bfd_link_hash_table_create    elf32_arm_link_hash_table_create
-#define bfd_elf32_bfd_link_hash_table_free      elf32_arm_hash_table_free
 #define bfd_elf32_bfd_reloc_type_lookup                elf32_arm_reloc_type_lookup
 #define bfd_elf32_bfd_reloc_name_lookup                elf32_arm_reloc_name_lookup
 #define bfd_elf32_find_nearest_line            elf32_arm_find_nearest_line
@@ -15763,6 +19219,7 @@ const struct elf_size_info elf32_arm_size_info =
 #define bfd_elf32_new_section_hook             elf32_arm_new_section_hook
 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
 #define bfd_elf32_bfd_final_link               elf32_arm_final_link
+#define bfd_elf32_get_synthetic_symtab  elf32_arm_get_synthetic_symtab
 
 #define elf_backend_get_symbol_type             elf32_arm_get_symbol_type
 #define elf_backend_gc_mark_hook                elf32_arm_gc_mark_hook
@@ -15789,8 +19246,11 @@ const struct elf_size_info elf32_arm_size_info =
 #define elf_backend_modify_segment_map         elf32_arm_modify_segment_map
 #define elf_backend_additional_program_headers  elf32_arm_additional_program_headers
 #define elf_backend_output_arch_local_syms      elf32_arm_output_arch_local_syms
+#define elf_backend_filter_implib_symbols      elf32_arm_filter_implib_symbols
 #define elf_backend_begin_write_processing      elf32_arm_begin_write_processing
 #define elf_backend_add_symbol_hook            elf32_arm_add_symbol_hook
+#define elf_backend_count_additional_relocs    elf32_arm_count_additional_relocs
+#define elf_backend_symbol_processing          elf32_arm_backend_symbol_processing
 
 #define elf_backend_can_refcount       1
 #define elf_backend_can_gc_sections    1
@@ -15802,6 +19262,7 @@ const struct elf_size_info elf32_arm_size_info =
 #define elf_backend_default_use_rela_p 0
 
 #define elf_backend_got_header_size    12
+#define elf_backend_extern_protected_data 1
 
 #undef  elf_backend_obj_attrs_vendor
 #define elf_backend_obj_attrs_vendor           "aeabi"
@@ -15814,16 +19275,21 @@ const struct elf_size_info elf32_arm_size_info =
 #define elf_backend_obj_attrs_order            elf32_arm_obj_attrs_order
 #define elf_backend_obj_attrs_handle_unknown   elf32_arm_obj_attrs_handle_unknown
 
+#undef elf_backend_section_flags
+#define elf_backend_section_flags              elf32_arm_section_flags
+#undef elf_backend_lookup_section_flags_hook
+#define elf_backend_lookup_section_flags_hook   elf32_arm_lookup_section_flags
+
 #include "elf32-target.h"
 
 /* Native Client targets.  */
 
 #undef TARGET_LITTLE_SYM
-#define TARGET_LITTLE_SYM              bfd_elf32_littlearm_nacl_vec
+#define TARGET_LITTLE_SYM              arm_elf32_nacl_le_vec
 #undef TARGET_LITTLE_NAME
 #define TARGET_LITTLE_NAME             "elf32-littlearm-nacl"
 #undef TARGET_BIG_SYM
-#define TARGET_BIG_SYM                 bfd_elf32_bigarm_nacl_vec
+#define TARGET_BIG_SYM                 arm_elf32_nacl_be_vec
 #undef TARGET_BIG_NAME
 #define TARGET_BIG_NAME                        "elf32-bigarm-nacl"
 
@@ -15867,23 +19333,33 @@ elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
   nacl_final_write_processing (abfd, linker);
 }
 
+static bfd_vma
+elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
+                           const arelent *rel ATTRIBUTE_UNUSED)
+{
+  return plt->vma
+    + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
+          i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
+}
 
 #undef elf32_bed
-#define elf32_bed                      elf32_arm_nacl_bed
+#define elf32_bed                              elf32_arm_nacl_bed
 #undef  bfd_elf32_bfd_link_hash_table_create
 #define bfd_elf32_bfd_link_hash_table_create   \
   elf32_arm_nacl_link_hash_table_create
 #undef elf_backend_plt_alignment
-#define elf_backend_plt_alignment      4
+#define elf_backend_plt_alignment              4
 #undef elf_backend_modify_segment_map
 #define        elf_backend_modify_segment_map          elf32_arm_nacl_modify_segment_map
 #undef elf_backend_modify_program_headers
 #define        elf_backend_modify_program_headers      nacl_modify_program_headers
 #undef  elf_backend_final_write_processing
 #define elf_backend_final_write_processing     elf32_arm_nacl_final_write_processing
+#undef bfd_elf32_get_synthetic_symtab
+#undef  elf_backend_plt_sym_val
+#define elf_backend_plt_sym_val                        elf32_arm_nacl_plt_sym_val
+#undef  elf_backend_copy_special_section_fields
 
-#undef ELF_MAXPAGESIZE
-#define ELF_MAXPAGESIZE                        0x10000
 #undef ELF_MINPAGESIZE
 #undef ELF_COMMONPAGESIZE
 
@@ -15906,11 +19382,11 @@ elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
 /* VxWorks Targets.  */
 
 #undef  TARGET_LITTLE_SYM
-#define TARGET_LITTLE_SYM               bfd_elf32_littlearm_vxworks_vec
+#define TARGET_LITTLE_SYM               arm_elf32_vxworks_le_vec
 #undef  TARGET_LITTLE_NAME
 #define TARGET_LITTLE_NAME              "elf32-littlearm-vxworks"
 #undef  TARGET_BIG_SYM
-#define TARGET_BIG_SYM                  bfd_elf32_bigarm_vxworks_vec
+#define TARGET_BIG_SYM                  arm_elf32_vxworks_be_vec
 #undef  TARGET_BIG_NAME
 #define TARGET_BIG_NAME                 "elf32-bigarm-vxworks"
 
@@ -16192,11 +19668,11 @@ elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
 /* Symbian OS Targets.  */
 
 #undef  TARGET_LITTLE_SYM
-#define TARGET_LITTLE_SYM               bfd_elf32_littlearm_symbian_vec
+#define TARGET_LITTLE_SYM               arm_elf32_symbian_le_vec
 #undef  TARGET_LITTLE_NAME
 #define TARGET_LITTLE_NAME              "elf32-littlearm-symbian"
 #undef  TARGET_BIG_SYM
-#define TARGET_BIG_SYM                  bfd_elf32_bigarm_symbian_vec
+#define TARGET_BIG_SYM                  arm_elf32_symbian_be_vec
 #undef  TARGET_BIG_NAME
 #define TARGET_BIG_NAME                 "elf32-bigarm-symbian"
 
@@ -16302,7 +19778,6 @@ elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
   return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
 }
 
-
 #undef  elf32_bed
 #define elf32_bed elf32_arm_symbian_bed
 
This page took 0.210398 seconds and 4 git commands to generate.