* elf32-spu.c (spu_elf_size_stubs): Always use an overlay stub
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "bfd.h"
22 #include "sysdep.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
28
29 /* We use RELA style relocs. Don't define USE_REL. */
30
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 };
82
83 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
84 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
85 { NULL, 0, 0, 0, 0 }
86 };
87
88 static enum elf_spu_reloc_type
89 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
90 {
91 switch (code)
92 {
93 default:
94 return R_SPU_NONE;
95 case BFD_RELOC_SPU_IMM10W:
96 return R_SPU_ADDR10;
97 case BFD_RELOC_SPU_IMM16W:
98 return R_SPU_ADDR16;
99 case BFD_RELOC_SPU_LO16:
100 return R_SPU_ADDR16_LO;
101 case BFD_RELOC_SPU_HI16:
102 return R_SPU_ADDR16_HI;
103 case BFD_RELOC_SPU_IMM18:
104 return R_SPU_ADDR18;
105 case BFD_RELOC_SPU_PCREL16:
106 return R_SPU_REL16;
107 case BFD_RELOC_SPU_IMM7:
108 return R_SPU_ADDR7;
109 case BFD_RELOC_SPU_IMM8:
110 return R_SPU_NONE;
111 case BFD_RELOC_SPU_PCREL9a:
112 return R_SPU_REL9;
113 case BFD_RELOC_SPU_PCREL9b:
114 return R_SPU_REL9I;
115 case BFD_RELOC_SPU_IMM10:
116 return R_SPU_ADDR10I;
117 case BFD_RELOC_SPU_IMM16:
118 return R_SPU_ADDR16I;
119 case BFD_RELOC_32:
120 return R_SPU_ADDR32;
121 case BFD_RELOC_32_PCREL:
122 return R_SPU_REL32;
123 }
124 }
125
126 static void
127 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
128 arelent *cache_ptr,
129 Elf_Internal_Rela *dst)
130 {
131 enum elf_spu_reloc_type r_type;
132
133 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
134 BFD_ASSERT (r_type < R_SPU_max);
135 cache_ptr->howto = &elf_howto_table[(int) r_type];
136 }
137
138 static reloc_howto_type *
139 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
140 bfd_reloc_code_real_type code)
141 {
142 return elf_howto_table + spu_elf_bfd_to_reloc_type (code);
143 }
144
145 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
146
147 static bfd_reloc_status_type
148 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
149 void *data, asection *input_section,
150 bfd *output_bfd, char **error_message)
151 {
152 bfd_size_type octets;
153 bfd_vma val;
154 long insn;
155
156 /* If this is a relocatable link (output_bfd test tells us), just
157 call the generic function. Any adjustment will be done at final
158 link time. */
159 if (output_bfd != NULL)
160 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
161 input_section, output_bfd, error_message);
162
163 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
164 return bfd_reloc_outofrange;
165 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
166
167 /* Get symbol value. */
168 val = 0;
169 if (!bfd_is_com_section (symbol->section))
170 val = symbol->value;
171 if (symbol->section->output_section)
172 val += symbol->section->output_section->vma;
173
174 val += reloc_entry->addend;
175
176 /* Make it pc-relative. */
177 val -= input_section->output_section->vma + input_section->output_offset;
178
179 val >>= 2;
180 if (val + 256 >= 512)
181 return bfd_reloc_overflow;
182
183 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
184
185 /* Move two high bits of value to REL9I and REL9 position.
186 The mask will take care of selecting the right field. */
187 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
188 insn &= ~reloc_entry->howto->dst_mask;
189 insn |= val & reloc_entry->howto->dst_mask;
190 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
191 return bfd_reloc_ok;
192 }
193
194 static bfd_boolean
195 spu_elf_new_section_hook (bfd *abfd, asection *sec)
196 {
197 if (!sec->used_by_bfd)
198 {
199 struct _spu_elf_section_data *sdata;
200
201 sdata = bfd_zalloc (abfd, sizeof (*sdata));
202 if (sdata == NULL)
203 return FALSE;
204 sec->used_by_bfd = sdata;
205 }
206
207 return _bfd_elf_new_section_hook (abfd, sec);
208 }
209
210 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
211 strip --strip-unneeded will not remove them. */
212
213 static void
214 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
215 {
216 if (sym->name != NULL
217 && sym->section != bfd_abs_section_ptr
218 && strncmp (sym->name, "_EAR_", 5) == 0)
219 sym->flags |= BSF_KEEP;
220 }
221
222 /* SPU ELF linker hash table. */
223
224 struct spu_link_hash_table
225 {
226 struct elf_link_hash_table elf;
227
228 /* The stub hash table. */
229 struct bfd_hash_table stub_hash_table;
230
231 /* Shortcuts to overlay sections. */
232 asection *stub;
233 asection *ovtab;
234
235 struct elf_link_hash_entry *ovly_load;
236
237 /* An array of two output sections per overlay region, chosen such that
238 the first section vma is the overlay buffer vma (ie. the section has
239 the lowest vma in the group that occupy the region), and the second
240 section vma+size specifies the end of the region. We keep pointers
241 to sections like this because section vmas may change when laying
242 them out. */
243 asection **ovl_region;
244
245 /* Number of overlay buffers. */
246 unsigned int num_buf;
247
248 /* Total number of overlays. */
249 unsigned int num_overlays;
250
251 /* Set if we should emit symbols for stubs. */
252 unsigned int emit_stub_syms:1;
253
254 /* Set if we want stubs on calls out of overlay regions to
255 non-overlay regions. */
256 unsigned int non_overlay_stubs : 1;
257
258 /* Set on error. */
259 unsigned int stub_overflow : 1;
260 };
261
262 #define spu_hash_table(p) \
263 ((struct spu_link_hash_table *) ((p)->hash))
264
265 struct spu_stub_hash_entry
266 {
267 struct bfd_hash_entry root;
268
269 /* Destination of this stub. */
270 asection *target_section;
271 bfd_vma target_off;
272
273 /* Offset of entry in stub section. */
274 bfd_vma off;
275
276 /* Offset from this stub to stub that loads the overlay index. */
277 bfd_vma delta;
278 };
279
280 /* Create an entry in a spu stub hash table. */
281
282 static struct bfd_hash_entry *
283 stub_hash_newfunc (struct bfd_hash_entry *entry,
284 struct bfd_hash_table *table,
285 const char *string)
286 {
287 /* Allocate the structure if it has not already been allocated by a
288 subclass. */
289 if (entry == NULL)
290 {
291 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
292 if (entry == NULL)
293 return entry;
294 }
295
296 /* Call the allocation method of the superclass. */
297 entry = bfd_hash_newfunc (entry, table, string);
298 if (entry != NULL)
299 {
300 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
301
302 sh->target_section = NULL;
303 sh->target_off = 0;
304 sh->off = 0;
305 sh->delta = 0;
306 }
307
308 return entry;
309 }
310
311 /* Create a spu ELF linker hash table. */
312
313 static struct bfd_link_hash_table *
314 spu_elf_link_hash_table_create (bfd *abfd)
315 {
316 struct spu_link_hash_table *htab;
317
318 htab = bfd_malloc (sizeof (*htab));
319 if (htab == NULL)
320 return NULL;
321
322 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
323 _bfd_elf_link_hash_newfunc,
324 sizeof (struct elf_link_hash_entry)))
325 {
326 free (htab);
327 return NULL;
328 }
329
330 /* Init the stub hash table too. */
331 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
332 sizeof (struct spu_stub_hash_entry)))
333 return NULL;
334
335 memset (&htab->stub, 0,
336 sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
337
338 return &htab->elf.root;
339 }
340
341 /* Free the derived linker hash table. */
342
343 static void
344 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
345 {
346 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
347
348 bfd_hash_table_free (&ret->stub_hash_table);
349 _bfd_generic_link_hash_table_free (hash);
350 }
351
352 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
353 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
354 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
355
356 static bfd_boolean
357 get_sym_h (struct elf_link_hash_entry **hp,
358 Elf_Internal_Sym **symp,
359 asection **symsecp,
360 Elf_Internal_Sym **locsymsp,
361 unsigned long r_symndx,
362 bfd *ibfd)
363 {
364 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
365
366 if (r_symndx >= symtab_hdr->sh_info)
367 {
368 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
369 struct elf_link_hash_entry *h;
370
371 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
372 while (h->root.type == bfd_link_hash_indirect
373 || h->root.type == bfd_link_hash_warning)
374 h = (struct elf_link_hash_entry *) h->root.u.i.link;
375
376 if (hp != NULL)
377 *hp = h;
378
379 if (symp != NULL)
380 *symp = NULL;
381
382 if (symsecp != NULL)
383 {
384 asection *symsec = NULL;
385 if (h->root.type == bfd_link_hash_defined
386 || h->root.type == bfd_link_hash_defweak)
387 symsec = h->root.u.def.section;
388 *symsecp = symsec;
389 }
390 }
391 else
392 {
393 Elf_Internal_Sym *sym;
394 Elf_Internal_Sym *locsyms = *locsymsp;
395
396 if (locsyms == NULL)
397 {
398 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
399 if (locsyms == NULL)
400 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
401 symtab_hdr->sh_info,
402 0, NULL, NULL, NULL);
403 if (locsyms == NULL)
404 return FALSE;
405 *locsymsp = locsyms;
406 }
407 sym = locsyms + r_symndx;
408
409 if (hp != NULL)
410 *hp = NULL;
411
412 if (symp != NULL)
413 *symp = sym;
414
415 if (symsecp != NULL)
416 {
417 asection *symsec = NULL;
418 if ((sym->st_shndx != SHN_UNDEF
419 && sym->st_shndx < SHN_LORESERVE)
420 || sym->st_shndx > SHN_HIRESERVE)
421 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
422 *symsecp = symsec;
423 }
424 }
425 return TRUE;
426 }
427
428 /* Build a name for an entry in the stub hash table. The input section
429 id isn't really necessary but we add that in for consistency with
430 ppc32 and ppc64 stub names. We can't use a local symbol name
431 because ld -r might generate duplicate local symbols. */
432
433 static char *
434 spu_stub_name (const asection *input_sec,
435 const asection *sym_sec,
436 const struct elf_link_hash_entry *h,
437 const Elf_Internal_Rela *rel)
438 {
439 char *stub_name;
440 bfd_size_type len;
441
442 if (h)
443 {
444 len = 8 + 1 + strlen (h->root.root.string) + 1 + 8 + 1;
445 stub_name = bfd_malloc (len);
446 if (stub_name == NULL)
447 return stub_name;
448
449 sprintf (stub_name, "%08x.%s+%x",
450 input_sec->id & 0xffffffff,
451 h->root.root.string,
452 (int) rel->r_addend & 0xffffffff);
453 len -= 8;
454 }
455 else
456 {
457 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
458 stub_name = bfd_malloc (len);
459 if (stub_name == NULL)
460 return stub_name;
461
462 sprintf (stub_name, "%08x.%x:%x+%x",
463 input_sec->id & 0xffffffff,
464 sym_sec->id & 0xffffffff,
465 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
466 (int) rel->r_addend & 0xffffffff);
467 len = strlen (stub_name);
468 }
469
470 if (stub_name[len - 2] == '+'
471 && stub_name[len - 1] == '0'
472 && stub_name[len] == 0)
473 stub_name[len - 2] = 0;
474
475 return stub_name;
476 }
477
478 /* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
480
481 bfd_boolean
482 spu_elf_create_sections (bfd *output_bfd, struct bfd_link_info *info)
483 {
484 bfd *ibfd;
485
486 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->next)
487 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
488 break;
489
490 if (ibfd == NULL)
491 {
492 /* Make SPU_PTNOTE_SPUNAME section. */
493 asection *s;
494 size_t name_len;
495 size_t size;
496 bfd_byte *data;
497 flagword flags;
498
499 ibfd = info->input_bfds;
500 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
501 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
502 if (s == NULL
503 || !bfd_set_section_alignment (ibfd, s, 4))
504 return FALSE;
505
506 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
507 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
508 size += (name_len + 3) & -4;
509
510 if (!bfd_set_section_size (ibfd, s, size))
511 return FALSE;
512
513 data = bfd_zalloc (ibfd, size);
514 if (data == NULL)
515 return FALSE;
516
517 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
518 bfd_put_32 (ibfd, name_len, data + 4);
519 bfd_put_32 (ibfd, 1, data + 8);
520 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
521 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
522 bfd_get_filename (output_bfd), name_len);
523 s->contents = data;
524 }
525
526 return TRUE;
527 }
528
529 /* Return the section that should be marked against GC for a given
530 relocation. */
531
532 static asection *
533 spu_elf_gc_mark_hook (asection *sec,
534 struct bfd_link_info *info ATTRIBUTE_UNUSED,
535 Elf_Internal_Rela *rel ATTRIBUTE_UNUSED,
536 struct elf_link_hash_entry *h,
537 Elf_Internal_Sym *sym)
538 {
539 if (h != NULL)
540 {
541 switch (h->root.type)
542 {
543 case bfd_link_hash_defined:
544 case bfd_link_hash_defweak:
545 return h->root.u.def.section;
546
547 case bfd_link_hash_common:
548 return h->root.u.c.p->section;
549
550 default:
551 break;
552 }
553 }
554 else
555 return bfd_section_from_elf_index (sec->owner, sym->st_shndx);
556
557 return NULL;
558 }
559
560 /* qsort predicate to sort sections by vma. */
561
562 static int
563 sort_sections (const void *a, const void *b)
564 {
565 const asection *const *s1 = a;
566 const asection *const *s2 = b;
567 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
568
569 if (delta != 0)
570 return delta < 0 ? -1 : 1;
571
572 return (*s1)->index - (*s2)->index;
573 }
574
575 /* Identify overlays in the output bfd, and number them. */
576
577 bfd_boolean
578 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
579 {
580 struct spu_link_hash_table *htab = spu_hash_table (info);
581 asection **alloc_sec;
582 unsigned int i, n, ovl_index, num_buf;
583 asection *s;
584 bfd_vma ovl_end;
585
586 if (output_bfd->section_count < 2)
587 return FALSE;
588
589 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
590 if (alloc_sec == NULL)
591 return FALSE;
592
593 /* Pick out all the alloced sections. */
594 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
595 if ((s->flags & SEC_ALLOC) != 0
596 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
597 && s->size != 0)
598 alloc_sec[n++] = s;
599
600 if (n == 0)
601 {
602 free (alloc_sec);
603 return FALSE;
604 }
605
606 /* Sort them by vma. */
607 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
608
609 /* Look for overlapping vmas. Any with overlap must be overlays.
610 Count them. Also count the number of overlay regions and for
611 each region save a section from that region with the lowest vma
612 and another section with the highest end vma. */
613 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
614 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
615 {
616 s = alloc_sec[i];
617 if (s->vma < ovl_end)
618 {
619 asection *s0 = alloc_sec[i - 1];
620
621 if (spu_elf_section_data (s0)->ovl_index == 0)
622 {
623 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
624 alloc_sec[num_buf * 2] = s0;
625 alloc_sec[num_buf * 2 + 1] = s0;
626 num_buf++;
627 }
628 spu_elf_section_data (s)->ovl_index = ++ovl_index;
629 if (ovl_end < s->vma + s->size)
630 {
631 ovl_end = s->vma + s->size;
632 alloc_sec[num_buf * 2 - 1] = s;
633 }
634 }
635 else
636 ovl_end = s->vma + s->size;
637 }
638
639 htab->num_overlays = ovl_index;
640 htab->num_buf = num_buf;
641 if (ovl_index == 0)
642 {
643 free (alloc_sec);
644 return FALSE;
645 }
646
647 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
648 if (alloc_sec == NULL)
649 return FALSE;
650
651 htab->ovl_region = alloc_sec;
652 return TRUE;
653 }
654
655 /* One of these per stub. */
656 #define SIZEOF_STUB1 8
657 #define ILA_79 0x4200004f /* ila $79,function_address */
658 #define BR 0x32000000 /* br stub2 */
659
660 /* One of these per overlay. */
661 #define SIZEOF_STUB2 8
662 #define ILA_78 0x4200004e /* ila $78,overlay_number */
663 /* br __ovly_load */
664 #define NOP 0x40200000
665
666 /* Return true for all relative and absolute branch and hint instructions.
667 bra 00110000 0..
668 brasl 00110001 0..
669 br 00110010 0..
670 brsl 00110011 0..
671 brz 00100000 0..
672 brnz 00100001 0..
673 brhz 00100010 0..
674 brhnz 00100011 0..
675 hbra 0001000..
676 hbrr 0001001.. */
677
678 static bfd_boolean
679 is_branch (const unsigned char *insn)
680 {
681 return (((insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0)
682 || (insn[0] & 0xfc) == 0x10);
683 }
684
685 struct stubarr {
686 struct spu_stub_hash_entry **sh;
687 unsigned int count;
688 };
689
690 /* Called via bfd_hash_traverse to set up pointers to all symbols
691 in the stub hash table. */
692
693 static bfd_boolean
694 populate_stubs (struct bfd_hash_entry *bh, void *inf)
695 {
696 struct stubarr *stubs = inf;
697
698 stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
699 return TRUE;
700 }
701
702 /* qsort predicate to sort stubs by overlay number. */
703
704 static int
705 sort_stubs (const void *a, const void *b)
706 {
707 const struct spu_stub_hash_entry *const *sa = a;
708 const struct spu_stub_hash_entry *const *sb = b;
709 int i;
710 bfd_signed_vma d;
711
712 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
713 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
714 if (i != 0)
715 return i;
716
717 d = ((*sa)->target_section->output_section->vma
718 + (*sa)->target_section->output_offset
719 + (*sa)->target_off
720 - (*sb)->target_section->output_section->vma
721 - (*sb)->target_section->output_offset
722 - (*sb)->target_off);
723 if (d != 0)
724 return d < 0 ? -1 : 1;
725
726 /* Two functions at the same address. Aliases perhaps. */
727 i = strcmp ((*sb)->root.string, (*sa)->root.string);
728 BFD_ASSERT (i != 0);
729 return i;
730 }
731
732 /* Allocate space for overlay call and return stubs. */
733
734 bfd_boolean
735 spu_elf_size_stubs (bfd *output_bfd,
736 struct bfd_link_info *info,
737 int non_overlay_stubs,
738 asection **stub,
739 asection **ovtab,
740 asection **toe)
741 {
742 struct spu_link_hash_table *htab = spu_hash_table (info);
743 bfd *ibfd;
744 struct stubarr stubs;
745 unsigned i, group;
746 flagword flags;
747
748 htab->non_overlay_stubs = non_overlay_stubs;
749 stubs.count = 0;
750 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
751 {
752 extern const bfd_target bfd_elf32_spu_vec;
753 Elf_Internal_Shdr *symtab_hdr;
754 asection *section;
755 Elf_Internal_Sym *local_syms = NULL;
756
757 if (ibfd->xvec != &bfd_elf32_spu_vec)
758 continue;
759
760 /* We'll need the symbol table in a second. */
761 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
762 if (symtab_hdr->sh_info == 0)
763 continue;
764
765 /* Walk over each section attached to the input bfd. */
766 for (section = ibfd->sections; section != NULL; section = section->next)
767 {
768 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
769
770 /* If there aren't any relocs, then there's nothing more to do. */
771 if ((section->flags & SEC_RELOC) == 0
772 || (section->flags & SEC_ALLOC) == 0
773 || (section->flags & SEC_LOAD) == 0
774 || section->reloc_count == 0)
775 continue;
776
777 /* If this section is a link-once section that will be
778 discarded, then don't create any stubs. */
779 if (section->output_section == NULL
780 || section->output_section->owner != output_bfd)
781 continue;
782
783 /* Get the relocs. */
784 internal_relocs
785 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
786 info->keep_memory);
787 if (internal_relocs == NULL)
788 goto error_ret_free_local;
789
790 /* Now examine each relocation. */
791 irela = internal_relocs;
792 irelaend = irela + section->reloc_count;
793 for (; irela < irelaend; irela++)
794 {
795 enum elf_spu_reloc_type r_type;
796 unsigned int r_indx;
797 asection *sym_sec;
798 Elf_Internal_Sym *sym;
799 struct elf_link_hash_entry *h;
800 char *stub_name;
801 struct spu_stub_hash_entry *sh;
802 unsigned int sym_type;
803 enum _insn_type { non_branch, branch, call } insn_type;
804 bfd_boolean is_setjmp;
805
806 r_type = ELF32_R_TYPE (irela->r_info);
807 r_indx = ELF32_R_SYM (irela->r_info);
808
809 if (r_type >= R_SPU_max)
810 {
811 bfd_set_error (bfd_error_bad_value);
812 goto error_ret_free_internal;
813 }
814
815 /* Determine the reloc target section. */
816 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
817 goto error_ret_free_internal;
818
819 if (sym_sec == NULL
820 || sym_sec->output_section == NULL
821 || sym_sec->output_section->owner != output_bfd)
822 continue;
823
824 /* Ensure no stubs for user supplied overlay manager syms. */
825 if (h != NULL
826 && (strcmp (h->root.root.string, "__ovly_load") == 0
827 || strcmp (h->root.root.string, "__ovly_return") == 0))
828 continue;
829
830 insn_type = non_branch;
831 if (r_type == R_SPU_REL16
832 || r_type == R_SPU_ADDR16)
833 {
834 unsigned char insn[4];
835
836 if (!bfd_get_section_contents (ibfd, section, insn,
837 irela->r_offset, 4))
838 goto error_ret_free_internal;
839
840 if (is_branch (insn))
841 {
842 insn_type = branch;
843 if ((insn[0] & 0xfd) == 0x31)
844 insn_type = call;
845 }
846 }
847
848 /* We are only interested in function symbols. */
849 if (h != NULL)
850 sym_type = h->type;
851 else
852 sym_type = ELF_ST_TYPE (sym->st_info);
853 if (sym_type != STT_FUNC)
854 {
855 /* It's common for people to write assembly and forget
856 to give function symbols the right type. Handle
857 calls to such symbols, but warn so that (hopefully)
858 people will fix their code. We need the symbol
859 type to be correct to distinguish function pointer
860 initialisation from other pointer initialisation. */
861 if (insn_type == call)
862 {
863 const char *sym_name;
864
865 if (h != NULL)
866 sym_name = h->root.root.string;
867 else
868 sym_name = bfd_elf_sym_name (sym_sec->owner,
869 symtab_hdr,
870 sym,
871 sym_sec);
872
873 (*_bfd_error_handler) (_("warning: call to non-function"
874 " symbol %s defined in %B"),
875 sym_sec->owner, sym_name);
876 }
877 else
878 continue;
879 }
880
881 /* setjmp always goes via an overlay stub, because
882 then the return and hence the longjmp goes via
883 __ovly_return. That magically makes setjmp/longjmp
884 between overlays work. */
885 is_setjmp = (h != NULL
886 && strncmp (h->root.root.string, "setjmp", 6) == 0
887 && (h->root.root.string[6] == '\0'
888 || h->root.root.string[6] == '@'));
889
890 /* Usually, non-overlay sections don't need stubs. */
891 if (!spu_elf_section_data (sym_sec->output_section)->ovl_index
892 && !non_overlay_stubs
893 && !is_setjmp)
894 continue;
895
896 /* We need a reference from some other section before
897 we consider that a symbol might need an overlay stub. */
898 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
899 == spu_elf_section_data (section->output_section)->ovl_index
900 && !is_setjmp)
901 {
902 /* Or we need this to *not* be a branch. ie. We are
903 possibly taking the address of a function and
904 passing it out somehow. */
905 if (insn_type != non_branch)
906 continue;
907 }
908
909 stub_name = spu_stub_name (section, sym_sec, h, irela);
910 if (stub_name == NULL)
911 goto error_ret_free_internal;
912
913 sh = (struct spu_stub_hash_entry *)
914 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
915 TRUE, FALSE);
916 if (sh == NULL)
917 {
918 free (stub_name);
919 error_ret_free_internal:
920 if (elf_section_data (section)->relocs != internal_relocs)
921 free (internal_relocs);
922 error_ret_free_local:
923 if (local_syms != NULL
924 && (symtab_hdr->contents
925 != (unsigned char *) local_syms))
926 free (local_syms);
927 return FALSE;
928 }
929
930 /* If this entry isn't new, we already have a stub. */
931 if (sh->target_section != NULL)
932 {
933 free (stub_name);
934 continue;
935 }
936
937 sh->target_section = sym_sec;
938 if (h != NULL)
939 sh->target_off = h->root.u.def.value;
940 else
941 sh->target_off = sym->st_value;
942 sh->target_off += irela->r_addend;
943
944 stubs.count += 1;
945 }
946
947 /* We're done with the internal relocs, free them. */
948 if (elf_section_data (section)->relocs != internal_relocs)
949 free (internal_relocs);
950 }
951
952 if (local_syms != NULL
953 && symtab_hdr->contents != (unsigned char *) local_syms)
954 {
955 if (!info->keep_memory)
956 free (local_syms);
957 else
958 symtab_hdr->contents = (unsigned char *) local_syms;
959 }
960 }
961
962 *stub = NULL;
963 if (stubs.count == 0)
964 return TRUE;
965
966 ibfd = info->input_bfds;
967 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
968 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
969 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
970 *stub = htab->stub;
971 if (htab->stub == NULL
972 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
973 return FALSE;
974
975 flags = (SEC_ALLOC | SEC_LOAD
976 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
977 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
978 *ovtab = htab->ovtab;
979 if (htab->ovtab == NULL
980 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
981 return FALSE;
982
983 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
984 if (*toe == NULL
985 || !bfd_set_section_alignment (ibfd, *toe, 4))
986 return FALSE;
987 (*toe)->size = 16;
988
989 /* Retrieve all the stubs and sort. */
990 stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
991 if (stubs.sh == NULL)
992 return FALSE;
993 i = stubs.count;
994 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
995 BFD_ASSERT (stubs.count == 0);
996
997 stubs.count = i;
998 qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
999
1000 /* Now that the stubs are sorted, place them in the stub section.
1001 Stubs are grouped per overlay
1002 . ila $79,func1
1003 . br 1f
1004 . ila $79,func2
1005 . br 1f
1006 .
1007 .
1008 . ila $79,funcn
1009 . nop
1010 . 1:
1011 . ila $78,ovl_index
1012 . br __ovly_load */
1013
1014 group = 0;
1015 for (i = 0; i < stubs.count; i++)
1016 {
1017 if (spu_elf_section_data (stubs.sh[group]->target_section
1018 ->output_section)->ovl_index
1019 != spu_elf_section_data (stubs.sh[i]->target_section
1020 ->output_section)->ovl_index)
1021 {
1022 htab->stub->size += SIZEOF_STUB2;
1023 for (; group != i; group++)
1024 stubs.sh[group]->delta
1025 = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1026 }
1027 if (group == i
1028 || ((stubs.sh[i - 1]->target_section->output_section->vma
1029 + stubs.sh[i - 1]->target_section->output_offset
1030 + stubs.sh[i - 1]->target_off)
1031 != (stubs.sh[i]->target_section->output_section->vma
1032 + stubs.sh[i]->target_section->output_offset
1033 + stubs.sh[i]->target_off)))
1034 {
1035 stubs.sh[i]->off = htab->stub->size;
1036 htab->stub->size += SIZEOF_STUB1;
1037 }
1038 else
1039 stubs.sh[i]->off = stubs.sh[i - 1]->off;
1040 }
1041 if (group != i)
1042 htab->stub->size += SIZEOF_STUB2;
1043 for (; group != i; group++)
1044 stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1045
1046 /* htab->ovtab consists of two arrays.
1047 . struct {
1048 . u32 vma;
1049 . u32 size;
1050 . u32 file_off;
1051 . u32 buf;
1052 . } _ovly_table[];
1053 .
1054 . struct {
1055 . u32 mapped;
1056 . } _ovly_buf_table[]; */
1057
1058 htab->ovtab->alignment_power = 4;
1059 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1060
1061 return TRUE;
1062 }
1063
1064 /* Functions to handle embedded spu_ovl.o object. */
1065
1066 static void *
1067 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1068 {
1069 return stream;
1070 }
1071
1072 static file_ptr
1073 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1074 void *stream,
1075 void *buf,
1076 file_ptr nbytes,
1077 file_ptr offset)
1078 {
1079 struct _ovl_stream *os;
1080 size_t count;
1081 size_t max;
1082
1083 os = (struct _ovl_stream *) stream;
1084 max = (const char *) os->end - (const char *) os->start;
1085
1086 if ((ufile_ptr) offset >= max)
1087 return 0;
1088
1089 count = nbytes;
1090 if (count > max - offset)
1091 count = max - offset;
1092
1093 memcpy (buf, (const char *) os->start + offset, count);
1094 return count;
1095 }
1096
1097 bfd_boolean
1098 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1099 {
1100 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1101 "elf32-spu",
1102 ovl_mgr_open,
1103 (void *) stream,
1104 ovl_mgr_pread,
1105 NULL,
1106 NULL);
1107 return *ovl_bfd != NULL;
1108 }
1109
1110 /* Fill in the ila and br for a stub. On the last stub for a group,
1111 write the stub that sets the overlay number too. */
1112
1113 static bfd_boolean
1114 write_one_stub (struct bfd_hash_entry *bh, void *inf)
1115 {
1116 struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1117 struct spu_link_hash_table *htab = inf;
1118 asection *sec = htab->stub;
1119 asection *s = ent->target_section;
1120 unsigned int ovl;
1121 bfd_vma val;
1122
1123 val = ent->target_off + s->output_offset + s->output_section->vma;
1124 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1125 sec->contents + ent->off);
1126 val = ent->delta + 4;
1127 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1128 sec->contents + ent->off + 4);
1129
1130 /* If this is the last stub of this group, write stub2. */
1131 if (ent->delta == 0)
1132 {
1133 bfd_put_32 (sec->owner, NOP,
1134 sec->contents + ent->off + 4);
1135
1136 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1137 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1138 sec->contents + ent->off + 8);
1139
1140 val = (htab->ovly_load->root.u.def.section->output_section->vma
1141 + htab->ovly_load->root.u.def.section->output_offset
1142 + htab->ovly_load->root.u.def.value
1143 - (sec->output_section->vma
1144 + sec->output_offset
1145 + ent->off + 12));
1146
1147 if (val + 0x20000 >= 0x40000)
1148 htab->stub_overflow = TRUE;
1149
1150 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1151 sec->contents + ent->off + 12);
1152 }
1153
1154 if (htab->emit_stub_syms)
1155 {
1156 struct elf_link_hash_entry *h;
1157 size_t len1, len2;
1158 char *name;
1159
1160 len1 = sizeof ("ovl_call.") - 1;
1161 len2 = strlen (ent->root.string);
1162 name = bfd_malloc (len1 + len2 + 1);
1163 if (name == NULL)
1164 return FALSE;
1165 memcpy (name, ent->root.string, 9);
1166 memcpy (name + 9, "ovl_call.", len1);
1167 memcpy (name + 9 + len1, ent->root.string + 9, len2 - 9 + 1);
1168 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1169 if (h == NULL)
1170 return FALSE;
1171 if (h->root.type == bfd_link_hash_new)
1172 {
1173 h->root.type = bfd_link_hash_defined;
1174 h->root.u.def.section = sec;
1175 h->root.u.def.value = ent->off;
1176 h->size = (ent->delta == 0
1177 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1178 h->type = STT_FUNC;
1179 h->ref_regular = 1;
1180 h->def_regular = 1;
1181 h->ref_regular_nonweak = 1;
1182 h->forced_local = 1;
1183 h->non_elf = 0;
1184 }
1185 }
1186
1187 return TRUE;
1188 }
1189
1190 /* Define an STT_OBJECT symbol. */
1191
1192 static struct elf_link_hash_entry *
1193 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1194 {
1195 struct elf_link_hash_entry *h;
1196
1197 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1198 if (h == NULL)
1199 return NULL;
1200
1201 if (h->root.type != bfd_link_hash_defined
1202 || !h->def_regular)
1203 {
1204 h->root.type = bfd_link_hash_defined;
1205 h->root.u.def.section = htab->ovtab;
1206 h->type = STT_OBJECT;
1207 h->ref_regular = 1;
1208 h->def_regular = 1;
1209 h->ref_regular_nonweak = 1;
1210 h->non_elf = 0;
1211 }
1212 else
1213 {
1214 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1215 h->root.u.def.section->owner,
1216 h->root.root.string);
1217 bfd_set_error (bfd_error_bad_value);
1218 return NULL;
1219 }
1220
1221 return h;
1222 }
1223
1224 /* Fill in all stubs and the overlay tables. */
1225
1226 bfd_boolean
1227 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1228 {
1229 struct spu_link_hash_table *htab = spu_hash_table (info);
1230 struct elf_link_hash_entry *h;
1231 bfd_byte *p;
1232 asection *s;
1233 bfd *obfd;
1234 unsigned int i;
1235
1236 htab->emit_stub_syms = emit_syms;
1237 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1238 if (htab->stub->contents == NULL)
1239 return FALSE;
1240
1241 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1242 htab->ovly_load = h;
1243 BFD_ASSERT (h != NULL
1244 && (h->root.type == bfd_link_hash_defined
1245 || h->root.type == bfd_link_hash_defweak)
1246 && h->def_regular);
1247
1248 s = h->root.u.def.section->output_section;
1249 if (spu_elf_section_data (s)->ovl_index)
1250 {
1251 (*_bfd_error_handler) (_("%s in overlay section"),
1252 h->root.u.def.section->owner);
1253 bfd_set_error (bfd_error_bad_value);
1254 return FALSE;
1255 }
1256
1257 /* Write out all the stubs. */
1258 bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1259
1260 if (htab->stub_overflow)
1261 {
1262 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1263 bfd_set_error (bfd_error_bad_value);
1264 return FALSE;
1265 }
1266
1267 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1268 if (htab->ovtab->contents == NULL)
1269 return FALSE;
1270
1271 /* Write out _ovly_table. */
1272 p = htab->ovtab->contents;
1273 obfd = htab->ovtab->output_section->owner;
1274 for (s = obfd->sections; s != NULL; s = s->next)
1275 {
1276 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1277
1278 if (ovl_index != 0)
1279 {
1280 unsigned int lo, hi, mid;
1281 unsigned long off = (ovl_index - 1) * 16;
1282 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1283 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1284 /* file_off written later in spu_elf_modify_program_headers. */
1285
1286 lo = 0;
1287 hi = htab->num_buf;
1288 while (lo < hi)
1289 {
1290 mid = (lo + hi) >> 1;
1291 if (htab->ovl_region[2 * mid + 1]->vma
1292 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1293 lo = mid + 1;
1294 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1295 hi = mid;
1296 else
1297 {
1298 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1299 break;
1300 }
1301 }
1302 BFD_ASSERT (lo < hi);
1303 }
1304 }
1305
1306 /* Write out _ovly_buf_table. */
1307 p = htab->ovtab->contents + htab->num_overlays * 16;
1308 for (i = 0; i < htab->num_buf; i++)
1309 {
1310 bfd_put_32 (htab->ovtab->owner, 0, p);
1311 p += 4;
1312 }
1313
1314 h = define_ovtab_symbol (htab, "_ovly_table");
1315 if (h == NULL)
1316 return FALSE;
1317 h->root.u.def.value = 0;
1318 h->size = htab->num_overlays * 16;
1319
1320 h = define_ovtab_symbol (htab, "_ovly_table_end");
1321 if (h == NULL)
1322 return FALSE;
1323 h->root.u.def.value = htab->num_overlays * 16;
1324 h->size = 0;
1325
1326 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1327 if (h == NULL)
1328 return FALSE;
1329 h->root.u.def.value = htab->num_overlays * 16;
1330 h->size = htab->num_buf * 4;
1331
1332 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1333 if (h == NULL)
1334 return FALSE;
1335 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1336 h->size = 0;
1337
1338 h = define_ovtab_symbol (htab, "_EAR_");
1339 if (h == NULL)
1340 return FALSE;
1341 h->root.u.def.section = toe;
1342 h->root.u.def.value = 0;
1343 h->size = 16;
1344
1345 return TRUE;
1346 }
1347
1348 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
1349
1350 static bfd_boolean
1351 spu_elf_relocate_section (bfd *output_bfd,
1352 struct bfd_link_info *info,
1353 bfd *input_bfd,
1354 asection *input_section,
1355 bfd_byte *contents,
1356 Elf_Internal_Rela *relocs,
1357 Elf_Internal_Sym *local_syms,
1358 asection **local_sections)
1359 {
1360 Elf_Internal_Shdr *symtab_hdr;
1361 struct elf_link_hash_entry **sym_hashes;
1362 Elf_Internal_Rela *rel, *relend;
1363 struct spu_link_hash_table *htab;
1364 bfd_boolean ret = TRUE;
1365
1366 htab = spu_hash_table (info);
1367 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
1368 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
1369
1370 rel = relocs;
1371 relend = relocs + input_section->reloc_count;
1372 for (; rel < relend; rel++)
1373 {
1374 int r_type;
1375 reloc_howto_type *howto;
1376 unsigned long r_symndx;
1377 Elf_Internal_Sym *sym;
1378 asection *sec;
1379 struct elf_link_hash_entry *h;
1380 const char *sym_name;
1381 bfd_vma relocation;
1382 bfd_vma addend;
1383 bfd_reloc_status_type r;
1384 bfd_boolean unresolved_reloc;
1385 bfd_boolean warned;
1386
1387 r_symndx = ELF32_R_SYM (rel->r_info);
1388 r_type = ELF32_R_TYPE (rel->r_info);
1389 howto = elf_howto_table + r_type;
1390 unresolved_reloc = FALSE;
1391 warned = FALSE;
1392
1393 h = NULL;
1394 sym = NULL;
1395 sec = NULL;
1396 if (r_symndx < symtab_hdr->sh_info)
1397 {
1398 sym = local_syms + r_symndx;
1399 sec = local_sections[r_symndx];
1400 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
1401 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
1402 }
1403 else
1404 {
1405 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
1406 r_symndx, symtab_hdr, sym_hashes,
1407 h, sec, relocation,
1408 unresolved_reloc, warned);
1409 sym_name = h->root.root.string;
1410 }
1411
1412 if (sec != NULL && elf_discarded_section (sec))
1413 {
1414 /* For relocs against symbols from removed linkonce sections,
1415 or sections discarded by a linker script, we just want the
1416 section contents zeroed. Avoid any special processing. */
1417 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
1418 rel->r_info = 0;
1419 rel->r_addend = 0;
1420 continue;
1421 }
1422
1423 if (info->relocatable)
1424 continue;
1425
1426 if (unresolved_reloc)
1427 {
1428 (*_bfd_error_handler)
1429 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
1430 input_bfd,
1431 bfd_get_section_name (input_bfd, input_section),
1432 (long) rel->r_offset,
1433 howto->name,
1434 sym_name);
1435 ret = FALSE;
1436 }
1437
1438 /* If this symbol is in an overlay area, we may need to relocate
1439 to the overlay stub. */
1440 addend = rel->r_addend;
1441 if (sec != NULL
1442 && sec->output_section != NULL
1443 && sec->output_section->owner == output_bfd
1444 && (spu_elf_section_data (sec->output_section)->ovl_index != 0
1445 || htab->non_overlay_stubs)
1446 && !(sec == input_section
1447 && is_branch (contents + rel->r_offset)))
1448 {
1449 char *stub_name;
1450 struct spu_stub_hash_entry *sh;
1451
1452 stub_name = spu_stub_name (input_section, sec, h, rel);
1453 if (stub_name == NULL)
1454 return FALSE;
1455
1456 sh = (struct spu_stub_hash_entry *)
1457 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
1458 if (sh != NULL)
1459 {
1460 relocation = (htab->stub->output_section->vma
1461 + htab->stub->output_offset
1462 + sh->off);
1463 addend = 0;
1464 }
1465 free (stub_name);
1466 }
1467
1468 r = _bfd_final_link_relocate (howto,
1469 input_bfd,
1470 input_section,
1471 contents,
1472 rel->r_offset, relocation, addend);
1473
1474 if (r != bfd_reloc_ok)
1475 {
1476 const char *msg = (const char *) 0;
1477
1478 switch (r)
1479 {
1480 case bfd_reloc_overflow:
1481 if (!((*info->callbacks->reloc_overflow)
1482 (info, (h ? &h->root : NULL), sym_name, howto->name,
1483 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
1484 return FALSE;
1485 break;
1486
1487 case bfd_reloc_undefined:
1488 if (!((*info->callbacks->undefined_symbol)
1489 (info, sym_name, input_bfd, input_section,
1490 rel->r_offset, TRUE)))
1491 return FALSE;
1492 break;
1493
1494 case bfd_reloc_outofrange:
1495 msg = _("internal error: out of range error");
1496 goto common_error;
1497
1498 case bfd_reloc_notsupported:
1499 msg = _("internal error: unsupported relocation error");
1500 goto common_error;
1501
1502 case bfd_reloc_dangerous:
1503 msg = _("internal error: dangerous error");
1504 goto common_error;
1505
1506 default:
1507 msg = _("internal error: unknown error");
1508 /* fall through */
1509
1510 common_error:
1511 if (!((*info->callbacks->warning)
1512 (info, msg, sym_name, input_bfd, input_section,
1513 rel->r_offset)))
1514 return FALSE;
1515 break;
1516 }
1517 }
1518 }
1519
1520 return ret;
1521 }
1522
1523 static int spu_plugin = 0;
1524
1525 void
1526 spu_elf_plugin (int val)
1527 {
1528 spu_plugin = val;
1529 }
1530
1531 /* Set ELF header e_type for plugins. */
1532
1533 static void
1534 spu_elf_post_process_headers (bfd *abfd,
1535 struct bfd_link_info *info ATTRIBUTE_UNUSED)
1536 {
1537 if (spu_plugin)
1538 {
1539 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
1540
1541 i_ehdrp->e_type = ET_DYN;
1542 }
1543 }
1544
1545 /* We may add an extra PT_LOAD segment for .toe. We also need extra
1546 segments for overlays. */
1547
1548 static int
1549 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
1550 {
1551 struct spu_link_hash_table *htab = spu_hash_table (info);
1552 int extra = htab->num_overlays;
1553 asection *sec;
1554
1555 if (extra)
1556 ++extra;
1557
1558 sec = bfd_get_section_by_name (abfd, ".toe");
1559 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
1560 ++extra;
1561
1562 return extra;
1563 }
1564
1565 /* Remove .toe section from other PT_LOAD segments and put it in
1566 a segment of its own. Put overlays in separate segments too. */
1567
1568 static bfd_boolean
1569 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
1570 {
1571 asection *toe, *s;
1572 struct elf_segment_map *m;
1573 unsigned int i;
1574
1575 if (info == NULL)
1576 return TRUE;
1577
1578 toe = bfd_get_section_by_name (abfd, ".toe");
1579 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1580 if (m->p_type == PT_LOAD && m->count > 1)
1581 for (i = 0; i < m->count; i++)
1582 if ((s = m->sections[i]) == toe
1583 || spu_elf_section_data (s)->ovl_index != 0)
1584 {
1585 struct elf_segment_map *m2;
1586 bfd_vma amt;
1587
1588 if (i + 1 < m->count)
1589 {
1590 amt = sizeof (struct elf_segment_map);
1591 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
1592 m2 = bfd_zalloc (abfd, amt);
1593 if (m2 == NULL)
1594 return FALSE;
1595 m2->count = m->count - (i + 1);
1596 memcpy (m2->sections, m->sections + i + 1,
1597 m2->count * sizeof (m->sections[0]));
1598 m2->p_type = PT_LOAD;
1599 m2->next = m->next;
1600 m->next = m2;
1601 }
1602 m->count = 1;
1603 if (i != 0)
1604 {
1605 m->count = i;
1606 amt = sizeof (struct elf_segment_map);
1607 m2 = bfd_zalloc (abfd, amt);
1608 if (m2 == NULL)
1609 return FALSE;
1610 m2->p_type = PT_LOAD;
1611 m2->count = 1;
1612 m2->sections[0] = s;
1613 m2->next = m->next;
1614 m->next = m2;
1615 }
1616 break;
1617 }
1618
1619 return TRUE;
1620 }
1621
1622 /* Check that all loadable section VMAs lie in the range
1623 LO .. HI inclusive. */
1624
1625 asection *
1626 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
1627 {
1628 struct elf_segment_map *m;
1629 unsigned int i;
1630
1631 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1632 if (m->p_type == PT_LOAD)
1633 for (i = 0; i < m->count; i++)
1634 if (m->sections[i]->size != 0
1635 && (m->sections[i]->vma < lo
1636 || m->sections[i]->vma > hi
1637 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1638 return m->sections[i];
1639
1640 return NULL;
1641 }
1642
1643 /* Tweak phdrs before writing them out. */
1644
1645 static int
1646 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
1647 {
1648 const struct elf_backend_data *bed;
1649 struct elf_obj_tdata *tdata;
1650 Elf_Internal_Phdr *phdr, *last;
1651 struct spu_link_hash_table *htab;
1652 unsigned int count;
1653 unsigned int i;
1654
1655 if (info == NULL)
1656 return TRUE;
1657
1658 bed = get_elf_backend_data (abfd);
1659 tdata = elf_tdata (abfd);
1660 phdr = tdata->phdr;
1661 count = tdata->program_header_size / bed->s->sizeof_phdr;
1662 htab = spu_hash_table (info);
1663 if (htab->num_overlays != 0)
1664 {
1665 struct elf_segment_map *m;
1666 unsigned int o;
1667
1668 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
1669 if (m->count != 0
1670 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
1671 {
1672 /* Mark this as an overlay header. */
1673 phdr[i].p_flags |= PF_OVERLAY;
1674
1675 if (htab->ovtab != NULL && htab->ovtab->size != 0)
1676 {
1677 bfd_byte *p = htab->ovtab->contents;
1678 unsigned int off = (o - 1) * 16 + 8;
1679
1680 /* Write file_off into _ovly_table. */
1681 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
1682 }
1683 }
1684 }
1685
1686 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
1687 of 16. This should always be possible when using the standard
1688 linker scripts, but don't create overlapping segments if
1689 someone is playing games with linker scripts. */
1690 last = NULL;
1691 for (i = count; i-- != 0; )
1692 if (phdr[i].p_type == PT_LOAD)
1693 {
1694 unsigned adjust;
1695
1696 adjust = -phdr[i].p_filesz & 15;
1697 if (adjust != 0
1698 && last != NULL
1699 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
1700 break;
1701
1702 adjust = -phdr[i].p_memsz & 15;
1703 if (adjust != 0
1704 && last != NULL
1705 && phdr[i].p_filesz != 0
1706 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
1707 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
1708 break;
1709
1710 if (phdr[i].p_filesz != 0)
1711 last = &phdr[i];
1712 }
1713
1714 if (i == (unsigned int) -1)
1715 for (i = count; i-- != 0; )
1716 if (phdr[i].p_type == PT_LOAD)
1717 {
1718 unsigned adjust;
1719
1720 adjust = -phdr[i].p_filesz & 15;
1721 phdr[i].p_filesz += adjust;
1722
1723 adjust = -phdr[i].p_memsz & 15;
1724 phdr[i].p_memsz += adjust;
1725 }
1726
1727 return TRUE;
1728 }
1729
1730 /* Arrange for our linker created section to be output. */
1731
1732 static bfd_boolean
1733 spu_elf_section_processing (bfd *abfd ATTRIBUTE_UNUSED,
1734 Elf_Internal_Shdr *i_shdrp)
1735 {
1736 asection *sec;
1737
1738 sec = i_shdrp->bfd_section;
1739 if (sec != NULL
1740 && (sec->flags & SEC_LINKER_CREATED) != 0
1741 && sec->name != NULL
1742 && strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
1743 i_shdrp->contents = sec->contents;
1744
1745 return TRUE;
1746 }
1747
1748 #define TARGET_BIG_SYM bfd_elf32_spu_vec
1749 #define TARGET_BIG_NAME "elf32-spu"
1750 #define ELF_ARCH bfd_arch_spu
1751 #define ELF_MACHINE_CODE EM_SPU
1752 /* This matches the alignment need for DMA. */
1753 #define ELF_MAXPAGESIZE 0x80
1754 #define elf_backend_rela_normal 1
1755 #define elf_backend_can_gc_sections 1
1756
1757 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
1758 #define elf_info_to_howto spu_elf_info_to_howto
1759 #define elf_backend_gc_mark_hook spu_elf_gc_mark_hook
1760 #define elf_backend_relocate_section spu_elf_relocate_section
1761 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
1762 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
1763 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
1764 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
1765
1766 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
1767 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
1768 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
1769 #define elf_backend_post_process_headers spu_elf_post_process_headers
1770 #define elf_backend_section_processing spu_elf_section_processing
1771 #define elf_backend_special_sections spu_elf_special_sections
1772
1773 #include "elf32-target.h"
This page took 0.069275 seconds and 5 git commands to generate.