b6e5fbc88862b68d3f298304c1330b6d763aaa9f
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
28
29 /* We use RELA style relocs. Don't define USE_REL. */
30
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 };
82
83 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
84 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
85 { NULL, 0, 0, 0, 0 }
86 };
87
88 static enum elf_spu_reloc_type
89 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
90 {
91 switch (code)
92 {
93 default:
94 return R_SPU_NONE;
95 case BFD_RELOC_SPU_IMM10W:
96 return R_SPU_ADDR10;
97 case BFD_RELOC_SPU_IMM16W:
98 return R_SPU_ADDR16;
99 case BFD_RELOC_SPU_LO16:
100 return R_SPU_ADDR16_LO;
101 case BFD_RELOC_SPU_HI16:
102 return R_SPU_ADDR16_HI;
103 case BFD_RELOC_SPU_IMM18:
104 return R_SPU_ADDR18;
105 case BFD_RELOC_SPU_PCREL16:
106 return R_SPU_REL16;
107 case BFD_RELOC_SPU_IMM7:
108 return R_SPU_ADDR7;
109 case BFD_RELOC_SPU_IMM8:
110 return R_SPU_NONE;
111 case BFD_RELOC_SPU_PCREL9a:
112 return R_SPU_REL9;
113 case BFD_RELOC_SPU_PCREL9b:
114 return R_SPU_REL9I;
115 case BFD_RELOC_SPU_IMM10:
116 return R_SPU_ADDR10I;
117 case BFD_RELOC_SPU_IMM16:
118 return R_SPU_ADDR16I;
119 case BFD_RELOC_32:
120 return R_SPU_ADDR32;
121 case BFD_RELOC_32_PCREL:
122 return R_SPU_REL32;
123 }
124 }
125
126 static void
127 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
128 arelent *cache_ptr,
129 Elf_Internal_Rela *dst)
130 {
131 enum elf_spu_reloc_type r_type;
132
133 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
134 BFD_ASSERT (r_type < R_SPU_max);
135 cache_ptr->howto = &elf_howto_table[(int) r_type];
136 }
137
138 static reloc_howto_type *
139 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
140 bfd_reloc_code_real_type code)
141 {
142 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
143
144 if (r_type == R_SPU_NONE)
145 return NULL;
146
147 return elf_howto_table + r_type;
148 }
149
150 static reloc_howto_type *
151 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
152 const char *r_name)
153 {
154 unsigned int i;
155
156 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
157 if (elf_howto_table[i].name != NULL
158 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
159 return &elf_howto_table[i];
160
161 return NULL;
162 }
163
164 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
165
166 static bfd_reloc_status_type
167 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
168 void *data, asection *input_section,
169 bfd *output_bfd, char **error_message)
170 {
171 bfd_size_type octets;
172 bfd_vma val;
173 long insn;
174
175 /* If this is a relocatable link (output_bfd test tells us), just
176 call the generic function. Any adjustment will be done at final
177 link time. */
178 if (output_bfd != NULL)
179 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
180 input_section, output_bfd, error_message);
181
182 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
183 return bfd_reloc_outofrange;
184 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
185
186 /* Get symbol value. */
187 val = 0;
188 if (!bfd_is_com_section (symbol->section))
189 val = symbol->value;
190 if (symbol->section->output_section)
191 val += symbol->section->output_section->vma;
192
193 val += reloc_entry->addend;
194
195 /* Make it pc-relative. */
196 val -= input_section->output_section->vma + input_section->output_offset;
197
198 val >>= 2;
199 if (val + 256 >= 512)
200 return bfd_reloc_overflow;
201
202 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
203
204 /* Move two high bits of value to REL9I and REL9 position.
205 The mask will take care of selecting the right field. */
206 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
207 insn &= ~reloc_entry->howto->dst_mask;
208 insn |= val & reloc_entry->howto->dst_mask;
209 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
210 return bfd_reloc_ok;
211 }
212
213 static bfd_boolean
214 spu_elf_new_section_hook (bfd *abfd, asection *sec)
215 {
216 if (!sec->used_by_bfd)
217 {
218 struct _spu_elf_section_data *sdata;
219
220 sdata = bfd_zalloc (abfd, sizeof (*sdata));
221 if (sdata == NULL)
222 return FALSE;
223 sec->used_by_bfd = sdata;
224 }
225
226 return _bfd_elf_new_section_hook (abfd, sec);
227 }
228
229 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
230 strip --strip-unneeded will not remove them. */
231
232 static void
233 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
234 {
235 if (sym->name != NULL
236 && sym->section != bfd_abs_section_ptr
237 && strncmp (sym->name, "_EAR_", 5) == 0)
238 sym->flags |= BSF_KEEP;
239 }
240
241 /* SPU ELF linker hash table. */
242
243 struct spu_link_hash_table
244 {
245 struct elf_link_hash_table elf;
246
247 /* The stub hash table. */
248 struct bfd_hash_table stub_hash_table;
249
250 /* Shortcuts to overlay sections. */
251 asection *stub;
252 asection *ovtab;
253
254 struct elf_link_hash_entry *ovly_load;
255
256 /* An array of two output sections per overlay region, chosen such that
257 the first section vma is the overlay buffer vma (ie. the section has
258 the lowest vma in the group that occupy the region), and the second
259 section vma+size specifies the end of the region. We keep pointers
260 to sections like this because section vmas may change when laying
261 them out. */
262 asection **ovl_region;
263
264 /* Number of overlay buffers. */
265 unsigned int num_buf;
266
267 /* Total number of overlays. */
268 unsigned int num_overlays;
269
270 /* Set if we should emit symbols for stubs. */
271 unsigned int emit_stub_syms:1;
272
273 /* Set if we want stubs on calls out of overlay regions to
274 non-overlay regions. */
275 unsigned int non_overlay_stubs : 1;
276
277 /* Set on error. */
278 unsigned int stub_overflow : 1;
279
280 /* Set if stack size analysis should be done. */
281 unsigned int stack_analysis : 1;
282
283 /* Set if __stack_* syms will be emitted. */
284 unsigned int emit_stack_syms : 1;
285 };
286
287 #define spu_hash_table(p) \
288 ((struct spu_link_hash_table *) ((p)->hash))
289
290 struct spu_stub_hash_entry
291 {
292 struct bfd_hash_entry root;
293
294 /* Destination of this stub. */
295 asection *target_section;
296 bfd_vma target_off;
297
298 /* Offset of entry in stub section. */
299 bfd_vma off;
300
301 /* Offset from this stub to stub that loads the overlay index. */
302 bfd_vma delta;
303 };
304
305 /* Create an entry in a spu stub hash table. */
306
307 static struct bfd_hash_entry *
308 stub_hash_newfunc (struct bfd_hash_entry *entry,
309 struct bfd_hash_table *table,
310 const char *string)
311 {
312 /* Allocate the structure if it has not already been allocated by a
313 subclass. */
314 if (entry == NULL)
315 {
316 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
317 if (entry == NULL)
318 return entry;
319 }
320
321 /* Call the allocation method of the superclass. */
322 entry = bfd_hash_newfunc (entry, table, string);
323 if (entry != NULL)
324 {
325 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
326
327 sh->target_section = NULL;
328 sh->target_off = 0;
329 sh->off = 0;
330 sh->delta = 0;
331 }
332
333 return entry;
334 }
335
336 /* Create a spu ELF linker hash table. */
337
338 static struct bfd_link_hash_table *
339 spu_elf_link_hash_table_create (bfd *abfd)
340 {
341 struct spu_link_hash_table *htab;
342
343 htab = bfd_malloc (sizeof (*htab));
344 if (htab == NULL)
345 return NULL;
346
347 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
348 _bfd_elf_link_hash_newfunc,
349 sizeof (struct elf_link_hash_entry)))
350 {
351 free (htab);
352 return NULL;
353 }
354
355 /* Init the stub hash table too. */
356 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
357 sizeof (struct spu_stub_hash_entry)))
358 return NULL;
359
360 memset (&htab->stub, 0,
361 sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
362
363 return &htab->elf.root;
364 }
365
366 /* Free the derived linker hash table. */
367
368 static void
369 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
370 {
371 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
372
373 bfd_hash_table_free (&ret->stub_hash_table);
374 _bfd_generic_link_hash_table_free (hash);
375 }
376
377 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
378 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
379 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
380
381 static bfd_boolean
382 get_sym_h (struct elf_link_hash_entry **hp,
383 Elf_Internal_Sym **symp,
384 asection **symsecp,
385 Elf_Internal_Sym **locsymsp,
386 unsigned long r_symndx,
387 bfd *ibfd)
388 {
389 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
390
391 if (r_symndx >= symtab_hdr->sh_info)
392 {
393 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
394 struct elf_link_hash_entry *h;
395
396 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
397 while (h->root.type == bfd_link_hash_indirect
398 || h->root.type == bfd_link_hash_warning)
399 h = (struct elf_link_hash_entry *) h->root.u.i.link;
400
401 if (hp != NULL)
402 *hp = h;
403
404 if (symp != NULL)
405 *symp = NULL;
406
407 if (symsecp != NULL)
408 {
409 asection *symsec = NULL;
410 if (h->root.type == bfd_link_hash_defined
411 || h->root.type == bfd_link_hash_defweak)
412 symsec = h->root.u.def.section;
413 *symsecp = symsec;
414 }
415 }
416 else
417 {
418 Elf_Internal_Sym *sym;
419 Elf_Internal_Sym *locsyms = *locsymsp;
420
421 if (locsyms == NULL)
422 {
423 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
424 if (locsyms == NULL)
425 {
426 size_t symcount = symtab_hdr->sh_info;
427
428 /* If we are reading symbols into the contents, then
429 read the global syms too. This is done to cache
430 syms for later stack analysis. */
431 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
432 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
433 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
434 NULL, NULL, NULL);
435 }
436 if (locsyms == NULL)
437 return FALSE;
438 *locsymsp = locsyms;
439 }
440 sym = locsyms + r_symndx;
441
442 if (hp != NULL)
443 *hp = NULL;
444
445 if (symp != NULL)
446 *symp = sym;
447
448 if (symsecp != NULL)
449 {
450 asection *symsec = NULL;
451 if ((sym->st_shndx != SHN_UNDEF
452 && sym->st_shndx < SHN_LORESERVE)
453 || sym->st_shndx > SHN_HIRESERVE)
454 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
455 *symsecp = symsec;
456 }
457 }
458
459 return TRUE;
460 }
461
462 /* Build a name for an entry in the stub hash table. We can't use a
463 local symbol name because ld -r might generate duplicate local symbols. */
464
465 static char *
466 spu_stub_name (const asection *sym_sec,
467 const struct elf_link_hash_entry *h,
468 const Elf_Internal_Rela *rel)
469 {
470 char *stub_name;
471 bfd_size_type len;
472
473 if (h)
474 {
475 len = strlen (h->root.root.string) + 1 + 8 + 1;
476 stub_name = bfd_malloc (len);
477 if (stub_name == NULL)
478 return stub_name;
479
480 sprintf (stub_name, "%s+%x",
481 h->root.root.string,
482 (int) rel->r_addend & 0xffffffff);
483 len -= 8;
484 }
485 else
486 {
487 len = 8 + 1 + 8 + 1 + 8 + 1;
488 stub_name = bfd_malloc (len);
489 if (stub_name == NULL)
490 return stub_name;
491
492 sprintf (stub_name, "%x:%x+%x",
493 sym_sec->id & 0xffffffff,
494 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
495 (int) rel->r_addend & 0xffffffff);
496 len = strlen (stub_name);
497 }
498
499 if (stub_name[len - 2] == '+'
500 && stub_name[len - 1] == '0'
501 && stub_name[len] == 0)
502 stub_name[len - 2] = 0;
503
504 return stub_name;
505 }
506
507 /* Create the note section if not already present. This is done early so
508 that the linker maps the sections to the right place in the output. */
509
510 bfd_boolean
511 spu_elf_create_sections (bfd *output_bfd,
512 struct bfd_link_info *info,
513 int stack_analysis,
514 int emit_stack_syms)
515 {
516 bfd *ibfd;
517 struct spu_link_hash_table *htab = spu_hash_table (info);
518
519 /* Stash some options away where we can get at them later. */
520 htab->stack_analysis = stack_analysis;
521 htab->emit_stack_syms = emit_stack_syms;
522
523 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->next)
524 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
525 break;
526
527 if (ibfd == NULL)
528 {
529 /* Make SPU_PTNOTE_SPUNAME section. */
530 asection *s;
531 size_t name_len;
532 size_t size;
533 bfd_byte *data;
534 flagword flags;
535
536 ibfd = info->input_bfds;
537 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
538 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
539 if (s == NULL
540 || !bfd_set_section_alignment (ibfd, s, 4))
541 return FALSE;
542
543 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
544 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
545 size += (name_len + 3) & -4;
546
547 if (!bfd_set_section_size (ibfd, s, size))
548 return FALSE;
549
550 data = bfd_zalloc (ibfd, size);
551 if (data == NULL)
552 return FALSE;
553
554 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
555 bfd_put_32 (ibfd, name_len, data + 4);
556 bfd_put_32 (ibfd, 1, data + 8);
557 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
558 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
559 bfd_get_filename (output_bfd), name_len);
560 s->contents = data;
561 }
562
563 return TRUE;
564 }
565
566 /* qsort predicate to sort sections by vma. */
567
568 static int
569 sort_sections (const void *a, const void *b)
570 {
571 const asection *const *s1 = a;
572 const asection *const *s2 = b;
573 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
574
575 if (delta != 0)
576 return delta < 0 ? -1 : 1;
577
578 return (*s1)->index - (*s2)->index;
579 }
580
581 /* Identify overlays in the output bfd, and number them. */
582
583 bfd_boolean
584 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
585 {
586 struct spu_link_hash_table *htab = spu_hash_table (info);
587 asection **alloc_sec;
588 unsigned int i, n, ovl_index, num_buf;
589 asection *s;
590 bfd_vma ovl_end;
591
592 if (output_bfd->section_count < 2)
593 return FALSE;
594
595 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
596 if (alloc_sec == NULL)
597 return FALSE;
598
599 /* Pick out all the alloced sections. */
600 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
601 if ((s->flags & SEC_ALLOC) != 0
602 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
603 && s->size != 0)
604 alloc_sec[n++] = s;
605
606 if (n == 0)
607 {
608 free (alloc_sec);
609 return FALSE;
610 }
611
612 /* Sort them by vma. */
613 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
614
615 /* Look for overlapping vmas. Any with overlap must be overlays.
616 Count them. Also count the number of overlay regions and for
617 each region save a section from that region with the lowest vma
618 and another section with the highest end vma. */
619 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
620 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
621 {
622 s = alloc_sec[i];
623 if (s->vma < ovl_end)
624 {
625 asection *s0 = alloc_sec[i - 1];
626
627 if (spu_elf_section_data (s0)->ovl_index == 0)
628 {
629 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
630 alloc_sec[num_buf * 2] = s0;
631 alloc_sec[num_buf * 2 + 1] = s0;
632 num_buf++;
633 }
634 spu_elf_section_data (s)->ovl_index = ++ovl_index;
635 if (ovl_end < s->vma + s->size)
636 {
637 ovl_end = s->vma + s->size;
638 alloc_sec[num_buf * 2 - 1] = s;
639 }
640 }
641 else
642 ovl_end = s->vma + s->size;
643 }
644
645 htab->num_overlays = ovl_index;
646 htab->num_buf = num_buf;
647 if (ovl_index == 0)
648 {
649 free (alloc_sec);
650 return FALSE;
651 }
652
653 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
654 if (alloc_sec == NULL)
655 return FALSE;
656
657 htab->ovl_region = alloc_sec;
658 return TRUE;
659 }
660
661 /* One of these per stub. */
662 #define SIZEOF_STUB1 8
663 #define ILA_79 0x4200004f /* ila $79,function_address */
664 #define BR 0x32000000 /* br stub2 */
665
666 /* One of these per overlay. */
667 #define SIZEOF_STUB2 8
668 #define ILA_78 0x4200004e /* ila $78,overlay_number */
669 /* br __ovly_load */
670 #define NOP 0x40200000
671
672 /* Return true for all relative and absolute branch instructions.
673 bra 00110000 0..
674 brasl 00110001 0..
675 br 00110010 0..
676 brsl 00110011 0..
677 brz 00100000 0..
678 brnz 00100001 0..
679 brhz 00100010 0..
680 brhnz 00100011 0.. */
681
682 static bfd_boolean
683 is_branch (const unsigned char *insn)
684 {
685 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
686 }
687
688 /* Return true for branch hint instructions.
689 hbra 0001000..
690 hbrr 0001001.. */
691
692 static bfd_boolean
693 is_hint (const unsigned char *insn)
694 {
695 return (insn[0] & 0xfc) == 0x10;
696 }
697
698 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
699
700 static bfd_boolean
701 needs_ovl_stub (const char *sym_name,
702 asection *sym_sec,
703 asection *input_section,
704 struct spu_link_hash_table *htab,
705 bfd_boolean is_branch)
706 {
707 if (htab->num_overlays == 0)
708 return FALSE;
709
710 if (sym_sec == NULL
711 || sym_sec->output_section == NULL
712 || spu_elf_section_data (sym_sec->output_section) == NULL)
713 return FALSE;
714
715 /* setjmp always goes via an overlay stub, because then the return
716 and hence the longjmp goes via __ovly_return. That magically
717 makes setjmp/longjmp between overlays work. */
718 if (strncmp (sym_name, "setjmp", 6) == 0
719 && (sym_name[6] == '\0' || sym_name[6] == '@'))
720 return TRUE;
721
722 /* Usually, symbols in non-overlay sections don't need stubs. */
723 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
724 && !htab->non_overlay_stubs)
725 return FALSE;
726
727 /* A reference from some other section to a symbol in an overlay
728 section needs a stub. */
729 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
730 != spu_elf_section_data (input_section->output_section)->ovl_index)
731 return TRUE;
732
733 /* If this insn isn't a branch then we are possibly taking the
734 address of a function and passing it out somehow. */
735 return !is_branch;
736 }
737
738 struct stubarr {
739 struct bfd_hash_table *stub_hash_table;
740 struct spu_stub_hash_entry **sh;
741 unsigned int count;
742 int err;
743 };
744
745 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
746 symbols. */
747
748 static bfd_boolean
749 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
750 {
751 /* Symbols starting with _SPUEAR_ need a stub because they may be
752 invoked by the PPU. */
753 if ((h->root.type == bfd_link_hash_defined
754 || h->root.type == bfd_link_hash_defweak)
755 && h->def_regular
756 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
757 {
758 struct stubarr *stubs = inf;
759 static Elf_Internal_Rela zero_rel;
760 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
761 struct spu_stub_hash_entry *sh;
762
763 if (stub_name == NULL)
764 {
765 stubs->err = 1;
766 return FALSE;
767 }
768
769 sh = (struct spu_stub_hash_entry *)
770 bfd_hash_lookup (stubs->stub_hash_table, stub_name, TRUE, FALSE);
771 if (sh == NULL)
772 {
773 free (stub_name);
774 return FALSE;
775 }
776
777 /* If this entry isn't new, we already have a stub. */
778 if (sh->target_section != NULL)
779 {
780 free (stub_name);
781 return TRUE;
782 }
783
784 sh->target_section = h->root.u.def.section;
785 sh->target_off = h->root.u.def.value;
786 stubs->count += 1;
787 }
788
789 return TRUE;
790 }
791
792 /* Called via bfd_hash_traverse to set up pointers to all symbols
793 in the stub hash table. */
794
795 static bfd_boolean
796 populate_stubs (struct bfd_hash_entry *bh, void *inf)
797 {
798 struct stubarr *stubs = inf;
799
800 stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
801 return TRUE;
802 }
803
804 /* qsort predicate to sort stubs by overlay number. */
805
806 static int
807 sort_stubs (const void *a, const void *b)
808 {
809 const struct spu_stub_hash_entry *const *sa = a;
810 const struct spu_stub_hash_entry *const *sb = b;
811 int i;
812 bfd_signed_vma d;
813
814 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
815 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
816 if (i != 0)
817 return i;
818
819 d = ((*sa)->target_section->output_section->vma
820 + (*sa)->target_section->output_offset
821 + (*sa)->target_off
822 - (*sb)->target_section->output_section->vma
823 - (*sb)->target_section->output_offset
824 - (*sb)->target_off);
825 if (d != 0)
826 return d < 0 ? -1 : 1;
827
828 /* Two functions at the same address. Aliases perhaps. */
829 i = strcmp ((*sb)->root.string, (*sa)->root.string);
830 BFD_ASSERT (i != 0);
831 return i;
832 }
833
834 /* Allocate space for overlay call and return stubs. */
835
836 bfd_boolean
837 spu_elf_size_stubs (bfd *output_bfd,
838 struct bfd_link_info *info,
839 int non_overlay_stubs,
840 int stack_analysis,
841 asection **stub,
842 asection **ovtab,
843 asection **toe)
844 {
845 struct spu_link_hash_table *htab = spu_hash_table (info);
846 bfd *ibfd;
847 struct stubarr stubs;
848 unsigned i, group;
849 flagword flags;
850
851 htab->non_overlay_stubs = non_overlay_stubs;
852 stubs.stub_hash_table = &htab->stub_hash_table;
853 stubs.count = 0;
854 stubs.err = 0;
855 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
856 {
857 extern const bfd_target bfd_elf32_spu_vec;
858 Elf_Internal_Shdr *symtab_hdr;
859 asection *section;
860 Elf_Internal_Sym *local_syms = NULL;
861 void *psyms;
862
863 if (ibfd->xvec != &bfd_elf32_spu_vec)
864 continue;
865
866 /* We'll need the symbol table in a second. */
867 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
868 if (symtab_hdr->sh_info == 0)
869 continue;
870
871 /* Arrange to read and keep global syms for later stack analysis. */
872 psyms = &local_syms;
873 if (stack_analysis)
874 psyms = &symtab_hdr->contents;
875
876 /* Walk over each section attached to the input bfd. */
877 for (section = ibfd->sections; section != NULL; section = section->next)
878 {
879 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
880
881 /* If there aren't any relocs, then there's nothing more to do. */
882 if ((section->flags & SEC_RELOC) == 0
883 || (section->flags & SEC_ALLOC) == 0
884 || (section->flags & SEC_LOAD) == 0
885 || section->reloc_count == 0)
886 continue;
887
888 /* If this section is a link-once section that will be
889 discarded, then don't create any stubs. */
890 if (section->output_section == NULL
891 || section->output_section->owner != output_bfd)
892 continue;
893
894 /* Get the relocs. */
895 internal_relocs
896 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
897 info->keep_memory);
898 if (internal_relocs == NULL)
899 goto error_ret_free_local;
900
901 /* Now examine each relocation. */
902 irela = internal_relocs;
903 irelaend = irela + section->reloc_count;
904 for (; irela < irelaend; irela++)
905 {
906 enum elf_spu_reloc_type r_type;
907 unsigned int r_indx;
908 asection *sym_sec;
909 Elf_Internal_Sym *sym;
910 struct elf_link_hash_entry *h;
911 const char *sym_name;
912 char *stub_name;
913 struct spu_stub_hash_entry *sh;
914 unsigned int sym_type;
915 enum _insn_type { non_branch, branch, call } insn_type;
916
917 r_type = ELF32_R_TYPE (irela->r_info);
918 r_indx = ELF32_R_SYM (irela->r_info);
919
920 if (r_type >= R_SPU_max)
921 {
922 bfd_set_error (bfd_error_bad_value);
923 goto error_ret_free_internal;
924 }
925
926 /* Determine the reloc target section. */
927 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
928 goto error_ret_free_internal;
929
930 if (sym_sec == NULL
931 || sym_sec->output_section == NULL
932 || sym_sec->output_section->owner != output_bfd)
933 continue;
934
935 /* Ensure no stubs for user supplied overlay manager syms. */
936 if (h != NULL
937 && (strcmp (h->root.root.string, "__ovly_load") == 0
938 || strcmp (h->root.root.string, "__ovly_return") == 0))
939 continue;
940
941 insn_type = non_branch;
942 if (r_type == R_SPU_REL16
943 || r_type == R_SPU_ADDR16)
944 {
945 unsigned char insn[4];
946
947 if (!bfd_get_section_contents (ibfd, section, insn,
948 irela->r_offset, 4))
949 goto error_ret_free_internal;
950
951 if (is_branch (insn) || is_hint (insn))
952 {
953 insn_type = branch;
954 if ((insn[0] & 0xfd) == 0x31)
955 insn_type = call;
956 }
957 }
958
959 /* We are only interested in function symbols. */
960 if (h != NULL)
961 {
962 sym_type = h->type;
963 sym_name = h->root.root.string;
964 }
965 else
966 {
967 sym_type = ELF_ST_TYPE (sym->st_info);
968 sym_name = bfd_elf_sym_name (sym_sec->owner,
969 symtab_hdr,
970 sym,
971 sym_sec);
972 }
973 if (sym_type != STT_FUNC)
974 {
975 /* It's common for people to write assembly and forget
976 to give function symbols the right type. Handle
977 calls to such symbols, but warn so that (hopefully)
978 people will fix their code. We need the symbol
979 type to be correct to distinguish function pointer
980 initialisation from other pointer initialisation. */
981 if (insn_type == call)
982 (*_bfd_error_handler) (_("warning: call to non-function"
983 " symbol %s defined in %B"),
984 sym_sec->owner, sym_name);
985 else
986 continue;
987 }
988
989 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
990 insn_type != non_branch))
991 continue;
992
993 stub_name = spu_stub_name (sym_sec, h, irela);
994 if (stub_name == NULL)
995 goto error_ret_free_internal;
996
997 sh = (struct spu_stub_hash_entry *)
998 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
999 TRUE, FALSE);
1000 if (sh == NULL)
1001 {
1002 free (stub_name);
1003 error_ret_free_internal:
1004 if (elf_section_data (section)->relocs != internal_relocs)
1005 free (internal_relocs);
1006 error_ret_free_local:
1007 if (local_syms != NULL
1008 && (symtab_hdr->contents
1009 != (unsigned char *) local_syms))
1010 free (local_syms);
1011 return FALSE;
1012 }
1013
1014 /* If this entry isn't new, we already have a stub. */
1015 if (sh->target_section != NULL)
1016 {
1017 free (stub_name);
1018 continue;
1019 }
1020
1021 sh->target_section = sym_sec;
1022 if (h != NULL)
1023 sh->target_off = h->root.u.def.value;
1024 else
1025 sh->target_off = sym->st_value;
1026 sh->target_off += irela->r_addend;
1027
1028 stubs.count += 1;
1029 }
1030
1031 /* We're done with the internal relocs, free them. */
1032 if (elf_section_data (section)->relocs != internal_relocs)
1033 free (internal_relocs);
1034 }
1035
1036 if (local_syms != NULL
1037 && symtab_hdr->contents != (unsigned char *) local_syms)
1038 {
1039 if (!info->keep_memory)
1040 free (local_syms);
1041 else
1042 symtab_hdr->contents = (unsigned char *) local_syms;
1043 }
1044 }
1045
1046 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, &stubs);
1047 if (stubs.err)
1048 return FALSE;
1049
1050 *stub = NULL;
1051 if (stubs.count == 0)
1052 return TRUE;
1053
1054 ibfd = info->input_bfds;
1055 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1056 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1057 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1058 *stub = htab->stub;
1059 if (htab->stub == NULL
1060 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1061 return FALSE;
1062
1063 flags = (SEC_ALLOC | SEC_LOAD
1064 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1065 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1066 *ovtab = htab->ovtab;
1067 if (htab->ovtab == NULL
1068 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1069 return FALSE;
1070
1071 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1072 if (*toe == NULL
1073 || !bfd_set_section_alignment (ibfd, *toe, 4))
1074 return FALSE;
1075 (*toe)->size = 16;
1076
1077 /* Retrieve all the stubs and sort. */
1078 stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
1079 if (stubs.sh == NULL)
1080 return FALSE;
1081 i = stubs.count;
1082 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
1083 BFD_ASSERT (stubs.count == 0);
1084
1085 stubs.count = i;
1086 qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
1087
1088 /* Now that the stubs are sorted, place them in the stub section.
1089 Stubs are grouped per overlay
1090 . ila $79,func1
1091 . br 1f
1092 . ila $79,func2
1093 . br 1f
1094 .
1095 .
1096 . ila $79,funcn
1097 . nop
1098 . 1:
1099 . ila $78,ovl_index
1100 . br __ovly_load */
1101
1102 group = 0;
1103 for (i = 0; i < stubs.count; i++)
1104 {
1105 if (spu_elf_section_data (stubs.sh[group]->target_section
1106 ->output_section)->ovl_index
1107 != spu_elf_section_data (stubs.sh[i]->target_section
1108 ->output_section)->ovl_index)
1109 {
1110 htab->stub->size += SIZEOF_STUB2;
1111 for (; group != i; group++)
1112 stubs.sh[group]->delta
1113 = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1114 }
1115 if (group == i
1116 || ((stubs.sh[i - 1]->target_section->output_section->vma
1117 + stubs.sh[i - 1]->target_section->output_offset
1118 + stubs.sh[i - 1]->target_off)
1119 != (stubs.sh[i]->target_section->output_section->vma
1120 + stubs.sh[i]->target_section->output_offset
1121 + stubs.sh[i]->target_off)))
1122 {
1123 stubs.sh[i]->off = htab->stub->size;
1124 htab->stub->size += SIZEOF_STUB1;
1125 }
1126 else
1127 stubs.sh[i]->off = stubs.sh[i - 1]->off;
1128 }
1129 if (group != i)
1130 htab->stub->size += SIZEOF_STUB2;
1131 for (; group != i; group++)
1132 stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1133
1134 /* htab->ovtab consists of two arrays.
1135 . struct {
1136 . u32 vma;
1137 . u32 size;
1138 . u32 file_off;
1139 . u32 buf;
1140 . } _ovly_table[];
1141 .
1142 . struct {
1143 . u32 mapped;
1144 . } _ovly_buf_table[]; */
1145
1146 htab->ovtab->alignment_power = 4;
1147 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1148
1149 return TRUE;
1150 }
1151
1152 /* Functions to handle embedded spu_ovl.o object. */
1153
1154 static void *
1155 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1156 {
1157 return stream;
1158 }
1159
1160 static file_ptr
1161 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1162 void *stream,
1163 void *buf,
1164 file_ptr nbytes,
1165 file_ptr offset)
1166 {
1167 struct _ovl_stream *os;
1168 size_t count;
1169 size_t max;
1170
1171 os = (struct _ovl_stream *) stream;
1172 max = (const char *) os->end - (const char *) os->start;
1173
1174 if ((ufile_ptr) offset >= max)
1175 return 0;
1176
1177 count = nbytes;
1178 if (count > max - offset)
1179 count = max - offset;
1180
1181 memcpy (buf, (const char *) os->start + offset, count);
1182 return count;
1183 }
1184
1185 bfd_boolean
1186 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1187 {
1188 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1189 "elf32-spu",
1190 ovl_mgr_open,
1191 (void *) stream,
1192 ovl_mgr_pread,
1193 NULL,
1194 NULL);
1195 return *ovl_bfd != NULL;
1196 }
1197
1198 /* Fill in the ila and br for a stub. On the last stub for a group,
1199 write the stub that sets the overlay number too. */
1200
1201 static bfd_boolean
1202 write_one_stub (struct bfd_hash_entry *bh, void *inf)
1203 {
1204 struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1205 struct spu_link_hash_table *htab = inf;
1206 asection *sec = htab->stub;
1207 asection *s = ent->target_section;
1208 unsigned int ovl;
1209 bfd_vma val;
1210
1211 val = ent->target_off + s->output_offset + s->output_section->vma;
1212 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1213 sec->contents + ent->off);
1214 val = ent->delta + 4;
1215 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1216 sec->contents + ent->off + 4);
1217
1218 /* If this is the last stub of this group, write stub2. */
1219 if (ent->delta == 0)
1220 {
1221 bfd_put_32 (sec->owner, NOP,
1222 sec->contents + ent->off + 4);
1223
1224 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1225 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1226 sec->contents + ent->off + 8);
1227
1228 val = (htab->ovly_load->root.u.def.section->output_section->vma
1229 + htab->ovly_load->root.u.def.section->output_offset
1230 + htab->ovly_load->root.u.def.value
1231 - (sec->output_section->vma
1232 + sec->output_offset
1233 + ent->off + 12));
1234
1235 if (val + 0x20000 >= 0x40000)
1236 htab->stub_overflow = TRUE;
1237
1238 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1239 sec->contents + ent->off + 12);
1240 }
1241
1242 if (htab->emit_stub_syms)
1243 {
1244 struct elf_link_hash_entry *h;
1245 size_t len1, len2;
1246 char *name;
1247
1248 len1 = sizeof ("00000000.ovl_call.") - 1;
1249 len2 = strlen (ent->root.string);
1250 name = bfd_malloc (len1 + len2 + 1);
1251 if (name == NULL)
1252 return FALSE;
1253 memcpy (name, "00000000.ovl_call.", len1);
1254 memcpy (name + len1, ent->root.string, len2 + 1);
1255 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1256 free (name);
1257 if (h == NULL)
1258 return FALSE;
1259 if (h->root.type == bfd_link_hash_new)
1260 {
1261 h->root.type = bfd_link_hash_defined;
1262 h->root.u.def.section = sec;
1263 h->root.u.def.value = ent->off;
1264 h->size = (ent->delta == 0
1265 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1266 h->type = STT_FUNC;
1267 h->ref_regular = 1;
1268 h->def_regular = 1;
1269 h->ref_regular_nonweak = 1;
1270 h->forced_local = 1;
1271 h->non_elf = 0;
1272 }
1273 }
1274
1275 return TRUE;
1276 }
1277
1278 /* Define an STT_OBJECT symbol. */
1279
1280 static struct elf_link_hash_entry *
1281 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1282 {
1283 struct elf_link_hash_entry *h;
1284
1285 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1286 if (h == NULL)
1287 return NULL;
1288
1289 if (h->root.type != bfd_link_hash_defined
1290 || !h->def_regular)
1291 {
1292 h->root.type = bfd_link_hash_defined;
1293 h->root.u.def.section = htab->ovtab;
1294 h->type = STT_OBJECT;
1295 h->ref_regular = 1;
1296 h->def_regular = 1;
1297 h->ref_regular_nonweak = 1;
1298 h->non_elf = 0;
1299 }
1300 else
1301 {
1302 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1303 h->root.u.def.section->owner,
1304 h->root.root.string);
1305 bfd_set_error (bfd_error_bad_value);
1306 return NULL;
1307 }
1308
1309 return h;
1310 }
1311
1312 /* Fill in all stubs and the overlay tables. */
1313
1314 bfd_boolean
1315 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1316 {
1317 struct spu_link_hash_table *htab = spu_hash_table (info);
1318 struct elf_link_hash_entry *h;
1319 bfd_byte *p;
1320 asection *s;
1321 bfd *obfd;
1322 unsigned int i;
1323
1324 htab->emit_stub_syms = emit_syms;
1325 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1326 if (htab->stub->contents == NULL)
1327 return FALSE;
1328
1329 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1330 htab->ovly_load = h;
1331 BFD_ASSERT (h != NULL
1332 && (h->root.type == bfd_link_hash_defined
1333 || h->root.type == bfd_link_hash_defweak)
1334 && h->def_regular);
1335
1336 s = h->root.u.def.section->output_section;
1337 if (spu_elf_section_data (s)->ovl_index)
1338 {
1339 (*_bfd_error_handler) (_("%s in overlay section"),
1340 h->root.u.def.section->owner);
1341 bfd_set_error (bfd_error_bad_value);
1342 return FALSE;
1343 }
1344
1345 /* Write out all the stubs. */
1346 bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1347
1348 if (htab->stub_overflow)
1349 {
1350 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1351 bfd_set_error (bfd_error_bad_value);
1352 return FALSE;
1353 }
1354
1355 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1356 if (htab->ovtab->contents == NULL)
1357 return FALSE;
1358
1359 /* Write out _ovly_table. */
1360 p = htab->ovtab->contents;
1361 obfd = htab->ovtab->output_section->owner;
1362 for (s = obfd->sections; s != NULL; s = s->next)
1363 {
1364 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1365
1366 if (ovl_index != 0)
1367 {
1368 unsigned int lo, hi, mid;
1369 unsigned long off = (ovl_index - 1) * 16;
1370 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1371 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1372 /* file_off written later in spu_elf_modify_program_headers. */
1373
1374 lo = 0;
1375 hi = htab->num_buf;
1376 while (lo < hi)
1377 {
1378 mid = (lo + hi) >> 1;
1379 if (htab->ovl_region[2 * mid + 1]->vma
1380 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1381 lo = mid + 1;
1382 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1383 hi = mid;
1384 else
1385 {
1386 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1387 break;
1388 }
1389 }
1390 BFD_ASSERT (lo < hi);
1391 }
1392 }
1393
1394 /* Write out _ovly_buf_table. */
1395 p = htab->ovtab->contents + htab->num_overlays * 16;
1396 for (i = 0; i < htab->num_buf; i++)
1397 {
1398 bfd_put_32 (htab->ovtab->owner, 0, p);
1399 p += 4;
1400 }
1401
1402 h = define_ovtab_symbol (htab, "_ovly_table");
1403 if (h == NULL)
1404 return FALSE;
1405 h->root.u.def.value = 0;
1406 h->size = htab->num_overlays * 16;
1407
1408 h = define_ovtab_symbol (htab, "_ovly_table_end");
1409 if (h == NULL)
1410 return FALSE;
1411 h->root.u.def.value = htab->num_overlays * 16;
1412 h->size = 0;
1413
1414 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1415 if (h == NULL)
1416 return FALSE;
1417 h->root.u.def.value = htab->num_overlays * 16;
1418 h->size = htab->num_buf * 4;
1419
1420 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1421 if (h == NULL)
1422 return FALSE;
1423 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1424 h->size = 0;
1425
1426 h = define_ovtab_symbol (htab, "_EAR_");
1427 if (h == NULL)
1428 return FALSE;
1429 h->root.u.def.section = toe;
1430 h->root.u.def.value = 0;
1431 h->size = 16;
1432
1433 return TRUE;
1434 }
1435
1436 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1437 Search for stack adjusting insns, and return the sp delta. */
1438
1439 static int
1440 find_function_stack_adjust (asection *sec, bfd_vma offset)
1441 {
1442 int unrecog;
1443 int reg[128];
1444
1445 memset (reg, 0, sizeof (reg));
1446 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1447 {
1448 unsigned char buf[4];
1449 int rt, ra;
1450 int imm;
1451
1452 /* Assume no relocs on stack adjusing insns. */
1453 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1454 break;
1455
1456 if (buf[0] == 0x24 /* stqd */)
1457 continue;
1458
1459 rt = buf[3] & 0x7f;
1460 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1461 /* Partly decoded immediate field. */
1462 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1463
1464 if (buf[0] == 0x1c /* ai */)
1465 {
1466 imm >>= 7;
1467 imm = (imm ^ 0x200) - 0x200;
1468 reg[rt] = reg[ra] + imm;
1469
1470 if (rt == 1 /* sp */)
1471 {
1472 if (imm > 0)
1473 break;
1474 return reg[rt];
1475 }
1476 }
1477 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1478 {
1479 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1480
1481 reg[rt] = reg[ra] + reg[rb];
1482 if (rt == 1)
1483 return reg[rt];
1484 }
1485 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1486 {
1487 if (buf[0] >= 0x42 /* ila */)
1488 imm |= (buf[0] & 1) << 17;
1489 else
1490 {
1491 imm &= 0xffff;
1492
1493 if (buf[0] == 0x40 /* il */)
1494 {
1495 if ((buf[1] & 0x80) == 0)
1496 goto unknown_insn;
1497 imm = (imm ^ 0x8000) - 0x8000;
1498 }
1499 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1500 imm <<= 16;
1501 }
1502 reg[rt] = imm;
1503 continue;
1504 }
1505 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1506 {
1507 reg[rt] |= imm & 0xffff;
1508 continue;
1509 }
1510 else if (buf[0] == 0x04 /* ori */)
1511 {
1512 imm >>= 7;
1513 imm = (imm ^ 0x200) - 0x200;
1514 reg[rt] = reg[ra] | imm;
1515 continue;
1516 }
1517 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1518 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1519 {
1520 /* Used in pic reg load. Say rt is trashed. */
1521 reg[rt] = 0;
1522 continue;
1523 }
1524 else if (is_branch (buf))
1525 /* If we hit a branch then we must be out of the prologue. */
1526 break;
1527 unknown_insn:
1528 ++unrecog;
1529 }
1530
1531 return 0;
1532 }
1533
1534 /* qsort predicate to sort symbols by section and value. */
1535
1536 static Elf_Internal_Sym *sort_syms_syms;
1537 static asection **sort_syms_psecs;
1538
1539 static int
1540 sort_syms (const void *a, const void *b)
1541 {
1542 Elf_Internal_Sym *const *s1 = a;
1543 Elf_Internal_Sym *const *s2 = b;
1544 asection *sec1,*sec2;
1545 bfd_signed_vma delta;
1546
1547 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1548 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1549
1550 if (sec1 != sec2)
1551 return sec1->index - sec2->index;
1552
1553 delta = (*s1)->st_value - (*s2)->st_value;
1554 if (delta != 0)
1555 return delta < 0 ? -1 : 1;
1556
1557 delta = (*s2)->st_size - (*s1)->st_size;
1558 if (delta != 0)
1559 return delta < 0 ? -1 : 1;
1560
1561 return *s1 < *s2 ? -1 : 1;
1562 }
1563
1564 struct call_info
1565 {
1566 struct function_info *fun;
1567 struct call_info *next;
1568 int is_tail;
1569 };
1570
1571 struct function_info
1572 {
1573 /* List of functions called. Also branches to hot/cold part of
1574 function. */
1575 struct call_info *call_list;
1576 /* For hot/cold part of function, point to owner. */
1577 struct function_info *start;
1578 /* Symbol at start of function. */
1579 union {
1580 Elf_Internal_Sym *sym;
1581 struct elf_link_hash_entry *h;
1582 } u;
1583 /* Function section. */
1584 asection *sec;
1585 /* Address range of (this part of) function. */
1586 bfd_vma lo, hi;
1587 /* Stack usage. */
1588 int stack;
1589 /* Set if global symbol. */
1590 unsigned int global : 1;
1591 /* Set if known to be start of function (as distinct from a hunk
1592 in hot/cold section. */
1593 unsigned int is_func : 1;
1594 /* Flags used during call tree traversal. */
1595 unsigned int visit1 : 1;
1596 unsigned int non_root : 1;
1597 unsigned int visit2 : 1;
1598 unsigned int marking : 1;
1599 unsigned int visit3 : 1;
1600 };
1601
1602 struct spu_elf_stack_info
1603 {
1604 int num_fun;
1605 int max_fun;
1606 /* Variable size array describing functions, one per contiguous
1607 address range belonging to a function. */
1608 struct function_info fun[1];
1609 };
1610
1611 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1612 entries for section SEC. */
1613
1614 static struct spu_elf_stack_info *
1615 alloc_stack_info (asection *sec, int max_fun)
1616 {
1617 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1618 bfd_size_type amt;
1619
1620 amt = sizeof (struct spu_elf_stack_info);
1621 amt += (max_fun - 1) * sizeof (struct function_info);
1622 sec_data->stack_info = bfd_zmalloc (amt);
1623 if (sec_data->stack_info != NULL)
1624 sec_data->stack_info->max_fun = max_fun;
1625 return sec_data->stack_info;
1626 }
1627
1628 /* Add a new struct function_info describing a (part of a) function
1629 starting at SYM_H. Keep the array sorted by address. */
1630
1631 static struct function_info *
1632 maybe_insert_function (asection *sec,
1633 void *sym_h,
1634 bfd_boolean global,
1635 bfd_boolean is_func)
1636 {
1637 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1638 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1639 int i;
1640 bfd_vma off, size;
1641
1642 if (sinfo == NULL)
1643 {
1644 sinfo = alloc_stack_info (sec, 20);
1645 if (sinfo == NULL)
1646 return NULL;
1647 }
1648
1649 if (!global)
1650 {
1651 Elf_Internal_Sym *sym = sym_h;
1652 off = sym->st_value;
1653 size = sym->st_size;
1654 }
1655 else
1656 {
1657 struct elf_link_hash_entry *h = sym_h;
1658 off = h->root.u.def.value;
1659 size = h->size;
1660 }
1661
1662 for (i = sinfo->num_fun; --i >= 0; )
1663 if (sinfo->fun[i].lo <= off)
1664 break;
1665
1666 if (i >= 0)
1667 {
1668 /* Don't add another entry for an alias, but do update some
1669 info. */
1670 if (sinfo->fun[i].lo == off)
1671 {
1672 /* Prefer globals over local syms. */
1673 if (global && !sinfo->fun[i].global)
1674 {
1675 sinfo->fun[i].global = TRUE;
1676 sinfo->fun[i].u.h = sym_h;
1677 }
1678 if (is_func)
1679 sinfo->fun[i].is_func = TRUE;
1680 return &sinfo->fun[i];
1681 }
1682 /* Ignore a zero-size symbol inside an existing function. */
1683 else if (sinfo->fun[i].hi > off && size == 0)
1684 return &sinfo->fun[i];
1685 }
1686
1687 if (++i < sinfo->num_fun)
1688 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1689 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1690 else if (i >= sinfo->max_fun)
1691 {
1692 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1693 bfd_size_type old = amt;
1694
1695 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1696 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1697 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1698 sinfo = bfd_realloc (sinfo, amt);
1699 if (sinfo == NULL)
1700 return NULL;
1701 memset ((char *) sinfo + old, 0, amt - old);
1702 sec_data->stack_info = sinfo;
1703 }
1704 sinfo->fun[i].is_func = is_func;
1705 sinfo->fun[i].global = global;
1706 sinfo->fun[i].sec = sec;
1707 if (global)
1708 sinfo->fun[i].u.h = sym_h;
1709 else
1710 sinfo->fun[i].u.sym = sym_h;
1711 sinfo->fun[i].lo = off;
1712 sinfo->fun[i].hi = off + size;
1713 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1714 sinfo->num_fun += 1;
1715 return &sinfo->fun[i];
1716 }
1717
1718 /* Return the name of FUN. */
1719
1720 static const char *
1721 func_name (struct function_info *fun)
1722 {
1723 asection *sec;
1724 bfd *ibfd;
1725 Elf_Internal_Shdr *symtab_hdr;
1726
1727 while (fun->start != NULL)
1728 fun = fun->start;
1729
1730 if (fun->global)
1731 return fun->u.h->root.root.string;
1732
1733 sec = fun->sec;
1734 if (fun->u.sym->st_name == 0)
1735 {
1736 size_t len = strlen (sec->name);
1737 char *name = bfd_malloc (len + 10);
1738 if (name == NULL)
1739 return "(null)";
1740 sprintf (name, "%s+%lx", sec->name,
1741 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1742 return name;
1743 }
1744 ibfd = sec->owner;
1745 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1746 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1747 }
1748
1749 /* Read the instruction at OFF in SEC. Return true iff the instruction
1750 is a nop, lnop, or stop 0 (all zero insn). */
1751
1752 static bfd_boolean
1753 is_nop (asection *sec, bfd_vma off)
1754 {
1755 unsigned char insn[4];
1756
1757 if (off + 4 > sec->size
1758 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1759 return FALSE;
1760 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1761 return TRUE;
1762 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1763 return TRUE;
1764 return FALSE;
1765 }
1766
1767 /* Extend the range of FUN to cover nop padding up to LIMIT.
1768 Return TRUE iff some instruction other than a NOP was found. */
1769
1770 static bfd_boolean
1771 insns_at_end (struct function_info *fun, bfd_vma limit)
1772 {
1773 bfd_vma off = (fun->hi + 3) & -4;
1774
1775 while (off < limit && is_nop (fun->sec, off))
1776 off += 4;
1777 if (off < limit)
1778 {
1779 fun->hi = off;
1780 return TRUE;
1781 }
1782 fun->hi = limit;
1783 return FALSE;
1784 }
1785
1786 /* Check and fix overlapping function ranges. Return TRUE iff there
1787 are gaps in the current info we have about functions in SEC. */
1788
1789 static bfd_boolean
1790 check_function_ranges (asection *sec, struct bfd_link_info *info)
1791 {
1792 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1793 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1794 int i;
1795 bfd_boolean gaps = FALSE;
1796
1797 if (sinfo == NULL)
1798 return FALSE;
1799
1800 for (i = 1; i < sinfo->num_fun; i++)
1801 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1802 {
1803 /* Fix overlapping symbols. */
1804 const char *f1 = func_name (&sinfo->fun[i - 1]);
1805 const char *f2 = func_name (&sinfo->fun[i]);
1806
1807 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1808 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1809 }
1810 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1811 gaps = TRUE;
1812
1813 if (sinfo->num_fun == 0)
1814 gaps = TRUE;
1815 else
1816 {
1817 if (sinfo->fun[0].lo != 0)
1818 gaps = TRUE;
1819 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1820 {
1821 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1822
1823 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1824 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1825 }
1826 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1827 gaps = TRUE;
1828 }
1829 return gaps;
1830 }
1831
1832 /* Search current function info for a function that contains address
1833 OFFSET in section SEC. */
1834
1835 static struct function_info *
1836 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1837 {
1838 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1839 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1840 int lo, hi, mid;
1841
1842 lo = 0;
1843 hi = sinfo->num_fun;
1844 while (lo < hi)
1845 {
1846 mid = (lo + hi) / 2;
1847 if (offset < sinfo->fun[mid].lo)
1848 hi = mid;
1849 else if (offset >= sinfo->fun[mid].hi)
1850 lo = mid + 1;
1851 else
1852 return &sinfo->fun[mid];
1853 }
1854 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1855 sec, offset);
1856 return NULL;
1857 }
1858
1859 /* Add CALLEE to CALLER call list if not already present. */
1860
1861 static bfd_boolean
1862 insert_callee (struct function_info *caller, struct call_info *callee)
1863 {
1864 struct call_info *p;
1865 for (p = caller->call_list; p != NULL; p = p->next)
1866 if (p->fun == callee->fun)
1867 {
1868 /* Tail calls use less stack than normal calls. Retain entry
1869 for normal call over one for tail call. */
1870 if (p->is_tail > callee->is_tail)
1871 p->is_tail = callee->is_tail;
1872 return FALSE;
1873 }
1874 callee->next = caller->call_list;
1875 caller->call_list = callee;
1876 return TRUE;
1877 }
1878
1879 /* Rummage through the relocs for SEC, looking for function calls.
1880 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1881 mark destination symbols on calls as being functions. Also
1882 look at branches, which may be tail calls or go to hot/cold
1883 section part of same function. */
1884
1885 static bfd_boolean
1886 mark_functions_via_relocs (asection *sec,
1887 struct bfd_link_info *info,
1888 int call_tree)
1889 {
1890 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1891 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1892 Elf_Internal_Sym *syms;
1893 void *psyms;
1894 static bfd_boolean warned;
1895
1896 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1897 info->keep_memory);
1898 if (internal_relocs == NULL)
1899 return FALSE;
1900
1901 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1902 psyms = &symtab_hdr->contents;
1903 syms = *(Elf_Internal_Sym **) psyms;
1904 irela = internal_relocs;
1905 irelaend = irela + sec->reloc_count;
1906 for (; irela < irelaend; irela++)
1907 {
1908 enum elf_spu_reloc_type r_type;
1909 unsigned int r_indx;
1910 asection *sym_sec;
1911 Elf_Internal_Sym *sym;
1912 struct elf_link_hash_entry *h;
1913 bfd_vma val;
1914 unsigned char insn[4];
1915 bfd_boolean is_call;
1916 struct function_info *caller;
1917 struct call_info *callee;
1918
1919 r_type = ELF32_R_TYPE (irela->r_info);
1920 if (r_type != R_SPU_REL16
1921 && r_type != R_SPU_ADDR16)
1922 continue;
1923
1924 r_indx = ELF32_R_SYM (irela->r_info);
1925 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1926 return FALSE;
1927
1928 if (sym_sec == NULL
1929 || sym_sec->output_section == NULL
1930 || sym_sec->output_section->owner != sec->output_section->owner)
1931 continue;
1932
1933 if (!bfd_get_section_contents (sec->owner, sec, insn,
1934 irela->r_offset, 4))
1935 return FALSE;
1936 if (!is_branch (insn))
1937 continue;
1938
1939 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1940 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1941 {
1942 if (!call_tree)
1943 warned = TRUE;
1944 if (!call_tree || !warned)
1945 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1946 " %B(%A), stack analysis incomplete\n"),
1947 sec->owner, sec, irela->r_offset,
1948 sym_sec->owner, sym_sec);
1949 continue;
1950 }
1951
1952 is_call = (insn[0] & 0xfd) == 0x31;
1953
1954 if (h)
1955 val = h->root.u.def.value;
1956 else
1957 val = sym->st_value;
1958 val += irela->r_addend;
1959
1960 if (!call_tree)
1961 {
1962 struct function_info *fun;
1963
1964 if (irela->r_addend != 0)
1965 {
1966 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1967 if (fake == NULL)
1968 return FALSE;
1969 fake->st_value = val;
1970 fake->st_shndx
1971 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1972 sym = fake;
1973 }
1974 if (sym)
1975 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
1976 else
1977 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
1978 if (fun == NULL)
1979 return FALSE;
1980 if (irela->r_addend != 0
1981 && fun->u.sym != sym)
1982 free (sym);
1983 continue;
1984 }
1985
1986 caller = find_function (sec, irela->r_offset, info);
1987 if (caller == NULL)
1988 return FALSE;
1989 callee = bfd_malloc (sizeof *callee);
1990 if (callee == NULL)
1991 return FALSE;
1992
1993 callee->fun = find_function (sym_sec, val, info);
1994 if (callee->fun == NULL)
1995 return FALSE;
1996 callee->is_tail = !is_call;
1997 if (!insert_callee (caller, callee))
1998 free (callee);
1999 else if (!is_call
2000 && !callee->fun->is_func
2001 && callee->fun->stack == 0)
2002 {
2003 /* This is either a tail call or a branch from one part of
2004 the function to another, ie. hot/cold section. If the
2005 destination has been called by some other function then
2006 it is a separate function. We also assume that functions
2007 are not split across input files. */
2008 if (callee->fun->start != NULL
2009 || sec->owner != sym_sec->owner)
2010 {
2011 callee->fun->start = NULL;
2012 callee->fun->is_func = TRUE;
2013 }
2014 else
2015 callee->fun->start = caller;
2016 }
2017 }
2018
2019 return TRUE;
2020 }
2021
2022 /* Handle something like .init or .fini, which has a piece of a function.
2023 These sections are pasted together to form a single function. */
2024
2025 static bfd_boolean
2026 pasted_function (asection *sec, struct bfd_link_info *info)
2027 {
2028 struct bfd_link_order *l;
2029 struct _spu_elf_section_data *sec_data;
2030 struct spu_elf_stack_info *sinfo;
2031 Elf_Internal_Sym *fake;
2032 struct function_info *fun, *fun_start;
2033
2034 fake = bfd_zmalloc (sizeof (*fake));
2035 if (fake == NULL)
2036 return FALSE;
2037 fake->st_value = 0;
2038 fake->st_size = sec->size;
2039 fake->st_shndx
2040 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2041 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2042 if (!fun)
2043 return FALSE;
2044
2045 /* Find a function immediately preceding this section. */
2046 fun_start = NULL;
2047 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2048 {
2049 if (l->u.indirect.section == sec)
2050 {
2051 if (fun_start != NULL)
2052 {
2053 if (fun_start->start)
2054 fun_start = fun_start->start;
2055 fun->start = fun_start;
2056 }
2057 return TRUE;
2058 }
2059 if (l->type == bfd_indirect_link_order
2060 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2061 && (sinfo = sec_data->stack_info) != NULL
2062 && sinfo->num_fun != 0)
2063 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2064 }
2065
2066 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2067 return FALSE;
2068 }
2069
2070 /* We're only interested in code sections. */
2071
2072 static bfd_boolean
2073 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2074 {
2075 return (s != htab->stub
2076 && s->output_section != NULL
2077 && s->output_section->owner == obfd
2078 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2079 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2080 && s->size != 0);
2081 }
2082
2083 /* Map address ranges in code sections to functions. */
2084
2085 static bfd_boolean
2086 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2087 {
2088 struct spu_link_hash_table *htab = spu_hash_table (info);
2089 bfd *ibfd;
2090 int bfd_idx;
2091 Elf_Internal_Sym ***psym_arr;
2092 asection ***sec_arr;
2093 bfd_boolean gaps = FALSE;
2094
2095 bfd_idx = 0;
2096 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2097 bfd_idx++;
2098
2099 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2100 if (psym_arr == NULL)
2101 return FALSE;
2102 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2103 if (sec_arr == NULL)
2104 return FALSE;
2105
2106
2107 for (ibfd = info->input_bfds, bfd_idx = 0;
2108 ibfd != NULL;
2109 ibfd = ibfd->link_next, bfd_idx++)
2110 {
2111 extern const bfd_target bfd_elf32_spu_vec;
2112 Elf_Internal_Shdr *symtab_hdr;
2113 asection *sec;
2114 size_t symcount;
2115 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2116 asection **psecs, **p;
2117
2118 if (ibfd->xvec != &bfd_elf32_spu_vec)
2119 continue;
2120
2121 /* Read all the symbols. */
2122 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2123 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2124 if (symcount == 0)
2125 continue;
2126
2127 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2128 if (syms == NULL)
2129 {
2130 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2131 NULL, NULL, NULL);
2132 symtab_hdr->contents = (void *) syms;
2133 if (syms == NULL)
2134 return FALSE;
2135 }
2136
2137 /* Select defined function symbols that are going to be output. */
2138 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2139 if (psyms == NULL)
2140 return FALSE;
2141 psym_arr[bfd_idx] = psyms;
2142 psecs = bfd_malloc (symcount * sizeof (*psecs));
2143 if (psecs == NULL)
2144 return FALSE;
2145 sec_arr[bfd_idx] = psecs;
2146 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2147 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2148 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2149 {
2150 asection *s;
2151
2152 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2153 if (s != NULL && interesting_section (s, output_bfd, htab))
2154 *psy++ = sy;
2155 }
2156 symcount = psy - psyms;
2157 *psy = NULL;
2158
2159 /* Sort them by section and offset within section. */
2160 sort_syms_syms = syms;
2161 sort_syms_psecs = psecs;
2162 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2163
2164 /* Now inspect the function symbols. */
2165 for (psy = psyms; psy < psyms + symcount; )
2166 {
2167 asection *s = psecs[*psy - syms];
2168 Elf_Internal_Sym **psy2;
2169
2170 for (psy2 = psy; ++psy2 < psyms + symcount; )
2171 if (psecs[*psy2 - syms] != s)
2172 break;
2173
2174 if (!alloc_stack_info (s, psy2 - psy))
2175 return FALSE;
2176 psy = psy2;
2177 }
2178
2179 /* First install info about properly typed and sized functions.
2180 In an ideal world this will cover all code sections, except
2181 when partitioning functions into hot and cold sections,
2182 and the horrible pasted together .init and .fini functions. */
2183 for (psy = psyms; psy < psyms + symcount; ++psy)
2184 {
2185 sy = *psy;
2186 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2187 {
2188 asection *s = psecs[sy - syms];
2189 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2190 return FALSE;
2191 }
2192 }
2193
2194 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2195 if (interesting_section (sec, output_bfd, htab))
2196 gaps |= check_function_ranges (sec, info);
2197 }
2198
2199 if (gaps)
2200 {
2201 /* See if we can discover more function symbols by looking at
2202 relocations. */
2203 for (ibfd = info->input_bfds, bfd_idx = 0;
2204 ibfd != NULL;
2205 ibfd = ibfd->link_next, bfd_idx++)
2206 {
2207 asection *sec;
2208
2209 if (psym_arr[bfd_idx] == NULL)
2210 continue;
2211
2212 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2213 if (interesting_section (sec, output_bfd, htab)
2214 && sec->reloc_count != 0)
2215 {
2216 if (!mark_functions_via_relocs (sec, info, FALSE))
2217 return FALSE;
2218 }
2219 }
2220
2221 for (ibfd = info->input_bfds, bfd_idx = 0;
2222 ibfd != NULL;
2223 ibfd = ibfd->link_next, bfd_idx++)
2224 {
2225 Elf_Internal_Shdr *symtab_hdr;
2226 asection *sec;
2227 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2228 asection **psecs;
2229
2230 if ((psyms = psym_arr[bfd_idx]) == NULL)
2231 continue;
2232
2233 psecs = sec_arr[bfd_idx];
2234
2235 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2236 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2237
2238 gaps = FALSE;
2239 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2240 if (interesting_section (sec, output_bfd, htab))
2241 gaps |= check_function_ranges (sec, info);
2242 if (!gaps)
2243 continue;
2244
2245 /* Finally, install all globals. */
2246 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2247 {
2248 asection *s;
2249
2250 s = psecs[sy - syms];
2251
2252 /* Global syms might be improperly typed functions. */
2253 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2254 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2255 {
2256 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2257 return FALSE;
2258 }
2259 }
2260
2261 /* Some of the symbols we've installed as marking the
2262 beginning of functions may have a size of zero. Extend
2263 the range of such functions to the beginning of the
2264 next symbol of interest. */
2265 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2266 if (interesting_section (sec, output_bfd, htab))
2267 {
2268 struct _spu_elf_section_data *sec_data;
2269 struct spu_elf_stack_info *sinfo;
2270
2271 sec_data = spu_elf_section_data (sec);
2272 sinfo = sec_data->stack_info;
2273 if (sinfo != NULL)
2274 {
2275 int fun_idx;
2276 bfd_vma hi = sec->size;
2277
2278 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2279 {
2280 sinfo->fun[fun_idx].hi = hi;
2281 hi = sinfo->fun[fun_idx].lo;
2282 }
2283 }
2284 /* No symbols in this section. Must be .init or .fini
2285 or something similar. */
2286 else if (!pasted_function (sec, info))
2287 return FALSE;
2288 }
2289 }
2290 }
2291
2292 for (ibfd = info->input_bfds, bfd_idx = 0;
2293 ibfd != NULL;
2294 ibfd = ibfd->link_next, bfd_idx++)
2295 {
2296 if (psym_arr[bfd_idx] == NULL)
2297 continue;
2298
2299 free (psym_arr[bfd_idx]);
2300 free (sec_arr[bfd_idx]);
2301 }
2302
2303 free (psym_arr);
2304 free (sec_arr);
2305
2306 return TRUE;
2307 }
2308
2309 /* Mark nodes in the call graph that are called by some other node. */
2310
2311 static void
2312 mark_non_root (struct function_info *fun)
2313 {
2314 struct call_info *call;
2315
2316 fun->visit1 = TRUE;
2317 for (call = fun->call_list; call; call = call->next)
2318 {
2319 call->fun->non_root = TRUE;
2320 if (!call->fun->visit1)
2321 mark_non_root (call->fun);
2322 }
2323 }
2324
2325 /* Remove cycles from the call graph. */
2326
2327 static void
2328 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2329 {
2330 struct call_info **callp, *call;
2331
2332 fun->visit2 = TRUE;
2333 fun->marking = TRUE;
2334
2335 callp = &fun->call_list;
2336 while ((call = *callp) != NULL)
2337 {
2338 if (!call->fun->visit2)
2339 call_graph_traverse (call->fun, info);
2340 else if (call->fun->marking)
2341 {
2342 const char *f1 = func_name (fun);
2343 const char *f2 = func_name (call->fun);
2344
2345 info->callbacks->info (_("Stack analysis will ignore the call "
2346 "from %s to %s\n"),
2347 f1, f2);
2348 *callp = call->next;
2349 continue;
2350 }
2351 callp = &call->next;
2352 }
2353 fun->marking = FALSE;
2354 }
2355
2356 /* Populate call_list for each function. */
2357
2358 static bfd_boolean
2359 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2360 {
2361 struct spu_link_hash_table *htab = spu_hash_table (info);
2362 bfd *ibfd;
2363
2364 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2365 {
2366 extern const bfd_target bfd_elf32_spu_vec;
2367 asection *sec;
2368
2369 if (ibfd->xvec != &bfd_elf32_spu_vec)
2370 continue;
2371
2372 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2373 {
2374 if (!interesting_section (sec, output_bfd, htab)
2375 || sec->reloc_count == 0)
2376 continue;
2377
2378 if (!mark_functions_via_relocs (sec, info, TRUE))
2379 return FALSE;
2380 }
2381
2382 /* Transfer call info from hot/cold section part of function
2383 to main entry. */
2384 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2385 {
2386 struct _spu_elf_section_data *sec_data;
2387 struct spu_elf_stack_info *sinfo;
2388
2389 if ((sec_data = spu_elf_section_data (sec)) != NULL
2390 && (sinfo = sec_data->stack_info) != NULL)
2391 {
2392 int i;
2393 for (i = 0; i < sinfo->num_fun; ++i)
2394 {
2395 if (sinfo->fun[i].start != NULL)
2396 {
2397 struct call_info *call = sinfo->fun[i].call_list;
2398
2399 while (call != NULL)
2400 {
2401 struct call_info *call_next = call->next;
2402 if (!insert_callee (sinfo->fun[i].start, call))
2403 free (call);
2404 call = call_next;
2405 }
2406 sinfo->fun[i].call_list = NULL;
2407 sinfo->fun[i].non_root = TRUE;
2408 }
2409 }
2410 }
2411 }
2412 }
2413
2414 /* Find the call graph root(s). */
2415 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2416 {
2417 extern const bfd_target bfd_elf32_spu_vec;
2418 asection *sec;
2419
2420 if (ibfd->xvec != &bfd_elf32_spu_vec)
2421 continue;
2422
2423 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2424 {
2425 struct _spu_elf_section_data *sec_data;
2426 struct spu_elf_stack_info *sinfo;
2427
2428 if ((sec_data = spu_elf_section_data (sec)) != NULL
2429 && (sinfo = sec_data->stack_info) != NULL)
2430 {
2431 int i;
2432 for (i = 0; i < sinfo->num_fun; ++i)
2433 if (!sinfo->fun[i].visit1)
2434 mark_non_root (&sinfo->fun[i]);
2435 }
2436 }
2437 }
2438
2439 /* Remove cycles from the call graph. We start from the root node(s)
2440 so that we break cycles in a reasonable place. */
2441 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2442 {
2443 extern const bfd_target bfd_elf32_spu_vec;
2444 asection *sec;
2445
2446 if (ibfd->xvec != &bfd_elf32_spu_vec)
2447 continue;
2448
2449 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2450 {
2451 struct _spu_elf_section_data *sec_data;
2452 struct spu_elf_stack_info *sinfo;
2453
2454 if ((sec_data = spu_elf_section_data (sec)) != NULL
2455 && (sinfo = sec_data->stack_info) != NULL)
2456 {
2457 int i;
2458 for (i = 0; i < sinfo->num_fun; ++i)
2459 if (!sinfo->fun[i].non_root)
2460 call_graph_traverse (&sinfo->fun[i], info);
2461 }
2462 }
2463 }
2464
2465 return TRUE;
2466 }
2467
2468 /* Descend the call graph for FUN, accumulating total stack required. */
2469
2470 static bfd_vma
2471 sum_stack (struct function_info *fun,
2472 struct bfd_link_info *info,
2473 int emit_stack_syms)
2474 {
2475 struct call_info *call;
2476 struct function_info *max = NULL;
2477 bfd_vma max_stack = fun->stack;
2478 bfd_vma stack;
2479 const char *f1;
2480
2481 if (fun->visit3)
2482 return max_stack;
2483
2484 for (call = fun->call_list; call; call = call->next)
2485 {
2486 stack = sum_stack (call->fun, info, emit_stack_syms);
2487 /* Include caller stack for normal calls, don't do so for
2488 tail calls. fun->stack here is local stack usage for
2489 this function. */
2490 if (!call->is_tail)
2491 stack += fun->stack;
2492 if (max_stack < stack)
2493 {
2494 max_stack = stack;
2495 max = call->fun;
2496 }
2497 }
2498
2499 f1 = func_name (fun);
2500 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"), f1, fun->stack, max_stack);
2501
2502 if (fun->call_list)
2503 {
2504 info->callbacks->minfo (_(" calls:\n"));
2505 for (call = fun->call_list; call; call = call->next)
2506 {
2507 const char *f2 = func_name (call->fun);
2508 const char *ann1 = call->fun == max ? "*" : " ";
2509 const char *ann2 = call->is_tail ? "t" : " ";
2510
2511 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2512 }
2513 }
2514
2515 /* Now fun->stack holds cumulative stack. */
2516 fun->stack = max_stack;
2517 fun->visit3 = TRUE;
2518
2519 if (emit_stack_syms)
2520 {
2521 struct spu_link_hash_table *htab = spu_hash_table (info);
2522 char *name = bfd_malloc (18 + strlen (f1));
2523 struct elf_link_hash_entry *h;
2524
2525 if (name != NULL)
2526 {
2527 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2528 sprintf (name, "__stack_%s", f1);
2529 else
2530 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2531
2532 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2533 free (name);
2534 if (h != NULL
2535 && (h->root.type == bfd_link_hash_new
2536 || h->root.type == bfd_link_hash_undefined
2537 || h->root.type == bfd_link_hash_undefweak))
2538 {
2539 h->root.type = bfd_link_hash_defined;
2540 h->root.u.def.section = bfd_abs_section_ptr;
2541 h->root.u.def.value = max_stack;
2542 h->size = 0;
2543 h->type = 0;
2544 h->ref_regular = 1;
2545 h->def_regular = 1;
2546 h->ref_regular_nonweak = 1;
2547 h->forced_local = 1;
2548 h->non_elf = 0;
2549 }
2550 }
2551 }
2552
2553 return max_stack;
2554 }
2555
2556 /* Provide an estimate of total stack required. */
2557
2558 static bfd_boolean
2559 spu_elf_stack_analysis (bfd *output_bfd,
2560 struct bfd_link_info *info,
2561 int emit_stack_syms)
2562 {
2563 bfd *ibfd;
2564 bfd_vma max_stack = 0;
2565
2566 if (!discover_functions (output_bfd, info))
2567 return FALSE;
2568
2569 if (!build_call_tree (output_bfd, info))
2570 return FALSE;
2571
2572 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2573 info->callbacks->minfo (_("\nStack size for functions. "
2574 "Annotations: '*' max stack, 't' tail call\n"));
2575 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2576 {
2577 extern const bfd_target bfd_elf32_spu_vec;
2578 asection *sec;
2579
2580 if (ibfd->xvec != &bfd_elf32_spu_vec)
2581 continue;
2582
2583 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2584 {
2585 struct _spu_elf_section_data *sec_data;
2586 struct spu_elf_stack_info *sinfo;
2587
2588 if ((sec_data = spu_elf_section_data (sec)) != NULL
2589 && (sinfo = sec_data->stack_info) != NULL)
2590 {
2591 int i;
2592 for (i = 0; i < sinfo->num_fun; ++i)
2593 {
2594 if (!sinfo->fun[i].non_root)
2595 {
2596 bfd_vma stack;
2597 const char *f1;
2598
2599 stack = sum_stack (&sinfo->fun[i], info,
2600 emit_stack_syms);
2601 f1 = func_name (&sinfo->fun[i]);
2602 info->callbacks->info (_(" %s: 0x%v\n"),
2603 f1, stack);
2604 if (max_stack < stack)
2605 max_stack = stack;
2606 }
2607 }
2608 }
2609 }
2610 }
2611
2612 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2613 return TRUE;
2614 }
2615
2616 /* Perform a final link. */
2617
2618 static bfd_boolean
2619 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2620 {
2621 struct spu_link_hash_table *htab = spu_hash_table (info);
2622
2623 if (htab->stack_analysis
2624 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2625 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2626
2627 return bfd_elf_final_link (output_bfd, info);
2628 }
2629
2630 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2631
2632 static bfd_boolean
2633 spu_elf_relocate_section (bfd *output_bfd,
2634 struct bfd_link_info *info,
2635 bfd *input_bfd,
2636 asection *input_section,
2637 bfd_byte *contents,
2638 Elf_Internal_Rela *relocs,
2639 Elf_Internal_Sym *local_syms,
2640 asection **local_sections)
2641 {
2642 Elf_Internal_Shdr *symtab_hdr;
2643 struct elf_link_hash_entry **sym_hashes;
2644 Elf_Internal_Rela *rel, *relend;
2645 struct spu_link_hash_table *htab;
2646 bfd_boolean ret = TRUE;
2647
2648 htab = spu_hash_table (info);
2649 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2650 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2651
2652 rel = relocs;
2653 relend = relocs + input_section->reloc_count;
2654 for (; rel < relend; rel++)
2655 {
2656 int r_type;
2657 reloc_howto_type *howto;
2658 unsigned long r_symndx;
2659 Elf_Internal_Sym *sym;
2660 asection *sec;
2661 struct elf_link_hash_entry *h;
2662 const char *sym_name;
2663 bfd_vma relocation;
2664 bfd_vma addend;
2665 bfd_reloc_status_type r;
2666 bfd_boolean unresolved_reloc;
2667 bfd_boolean warned;
2668 bfd_boolean branch;
2669
2670 r_symndx = ELF32_R_SYM (rel->r_info);
2671 r_type = ELF32_R_TYPE (rel->r_info);
2672 howto = elf_howto_table + r_type;
2673 unresolved_reloc = FALSE;
2674 warned = FALSE;
2675
2676 h = NULL;
2677 sym = NULL;
2678 sec = NULL;
2679 if (r_symndx < symtab_hdr->sh_info)
2680 {
2681 sym = local_syms + r_symndx;
2682 sec = local_sections[r_symndx];
2683 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2684 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2685 }
2686 else
2687 {
2688 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2689 r_symndx, symtab_hdr, sym_hashes,
2690 h, sec, relocation,
2691 unresolved_reloc, warned);
2692 sym_name = h->root.root.string;
2693 }
2694
2695 if (sec != NULL && elf_discarded_section (sec))
2696 {
2697 /* For relocs against symbols from removed linkonce sections,
2698 or sections discarded by a linker script, we just want the
2699 section contents zeroed. Avoid any special processing. */
2700 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2701 rel->r_info = 0;
2702 rel->r_addend = 0;
2703 continue;
2704 }
2705
2706 if (info->relocatable)
2707 continue;
2708
2709 if (unresolved_reloc)
2710 {
2711 (*_bfd_error_handler)
2712 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2713 input_bfd,
2714 bfd_get_section_name (input_bfd, input_section),
2715 (long) rel->r_offset,
2716 howto->name,
2717 sym_name);
2718 ret = FALSE;
2719 }
2720
2721 /* If this symbol is in an overlay area, we may need to relocate
2722 to the overlay stub. */
2723 addend = rel->r_addend;
2724 branch = (is_branch (contents + rel->r_offset)
2725 || is_hint (contents + rel->r_offset));
2726 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2727 {
2728 char *stub_name;
2729 struct spu_stub_hash_entry *sh;
2730
2731 stub_name = spu_stub_name (sec, h, rel);
2732 if (stub_name == NULL)
2733 return FALSE;
2734
2735 sh = (struct spu_stub_hash_entry *)
2736 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2737 if (sh != NULL)
2738 {
2739 relocation = (htab->stub->output_section->vma
2740 + htab->stub->output_offset
2741 + sh->off);
2742 addend = 0;
2743 }
2744 free (stub_name);
2745 }
2746
2747 r = _bfd_final_link_relocate (howto,
2748 input_bfd,
2749 input_section,
2750 contents,
2751 rel->r_offset, relocation, addend);
2752
2753 if (r != bfd_reloc_ok)
2754 {
2755 const char *msg = (const char *) 0;
2756
2757 switch (r)
2758 {
2759 case bfd_reloc_overflow:
2760 if (!((*info->callbacks->reloc_overflow)
2761 (info, (h ? &h->root : NULL), sym_name, howto->name,
2762 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2763 return FALSE;
2764 break;
2765
2766 case bfd_reloc_undefined:
2767 if (!((*info->callbacks->undefined_symbol)
2768 (info, sym_name, input_bfd, input_section,
2769 rel->r_offset, TRUE)))
2770 return FALSE;
2771 break;
2772
2773 case bfd_reloc_outofrange:
2774 msg = _("internal error: out of range error");
2775 goto common_error;
2776
2777 case bfd_reloc_notsupported:
2778 msg = _("internal error: unsupported relocation error");
2779 goto common_error;
2780
2781 case bfd_reloc_dangerous:
2782 msg = _("internal error: dangerous error");
2783 goto common_error;
2784
2785 default:
2786 msg = _("internal error: unknown error");
2787 /* fall through */
2788
2789 common_error:
2790 if (!((*info->callbacks->warning)
2791 (info, msg, sym_name, input_bfd, input_section,
2792 rel->r_offset)))
2793 return FALSE;
2794 break;
2795 }
2796 }
2797 }
2798
2799 return ret;
2800 }
2801
2802 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2803
2804 static bfd_boolean
2805 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2806 const char *sym_name ATTRIBUTE_UNUSED,
2807 Elf_Internal_Sym *sym,
2808 asection *sym_sec ATTRIBUTE_UNUSED,
2809 struct elf_link_hash_entry *h)
2810 {
2811 struct spu_link_hash_table *htab = spu_hash_table (info);
2812
2813 if (!info->relocatable
2814 && htab->num_overlays != 0
2815 && h != NULL
2816 && (h->root.type == bfd_link_hash_defined
2817 || h->root.type == bfd_link_hash_defweak)
2818 && h->def_regular
2819 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2820 {
2821 static Elf_Internal_Rela zero_rel;
2822 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2823 struct spu_stub_hash_entry *sh;
2824
2825 if (stub_name == NULL)
2826 return FALSE;
2827 sh = (struct spu_stub_hash_entry *)
2828 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2829 free (stub_name);
2830 if (sh == NULL)
2831 return TRUE;
2832 sym->st_shndx
2833 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2834 htab->stub->output_section);
2835 sym->st_value = (htab->stub->output_section->vma
2836 + htab->stub->output_offset
2837 + sh->off);
2838 }
2839
2840 return TRUE;
2841 }
2842
2843 static int spu_plugin = 0;
2844
2845 void
2846 spu_elf_plugin (int val)
2847 {
2848 spu_plugin = val;
2849 }
2850
2851 /* Set ELF header e_type for plugins. */
2852
2853 static void
2854 spu_elf_post_process_headers (bfd *abfd,
2855 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2856 {
2857 if (spu_plugin)
2858 {
2859 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2860
2861 i_ehdrp->e_type = ET_DYN;
2862 }
2863 }
2864
2865 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2866 segments for overlays. */
2867
2868 static int
2869 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2870 {
2871 struct spu_link_hash_table *htab = spu_hash_table (info);
2872 int extra = htab->num_overlays;
2873 asection *sec;
2874
2875 if (extra)
2876 ++extra;
2877
2878 sec = bfd_get_section_by_name (abfd, ".toe");
2879 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2880 ++extra;
2881
2882 return extra;
2883 }
2884
2885 /* Remove .toe section from other PT_LOAD segments and put it in
2886 a segment of its own. Put overlays in separate segments too. */
2887
2888 static bfd_boolean
2889 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2890 {
2891 asection *toe, *s;
2892 struct elf_segment_map *m;
2893 unsigned int i;
2894
2895 if (info == NULL)
2896 return TRUE;
2897
2898 toe = bfd_get_section_by_name (abfd, ".toe");
2899 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2900 if (m->p_type == PT_LOAD && m->count > 1)
2901 for (i = 0; i < m->count; i++)
2902 if ((s = m->sections[i]) == toe
2903 || spu_elf_section_data (s)->ovl_index != 0)
2904 {
2905 struct elf_segment_map *m2;
2906 bfd_vma amt;
2907
2908 if (i + 1 < m->count)
2909 {
2910 amt = sizeof (struct elf_segment_map);
2911 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2912 m2 = bfd_zalloc (abfd, amt);
2913 if (m2 == NULL)
2914 return FALSE;
2915 m2->count = m->count - (i + 1);
2916 memcpy (m2->sections, m->sections + i + 1,
2917 m2->count * sizeof (m->sections[0]));
2918 m2->p_type = PT_LOAD;
2919 m2->next = m->next;
2920 m->next = m2;
2921 }
2922 m->count = 1;
2923 if (i != 0)
2924 {
2925 m->count = i;
2926 amt = sizeof (struct elf_segment_map);
2927 m2 = bfd_zalloc (abfd, amt);
2928 if (m2 == NULL)
2929 return FALSE;
2930 m2->p_type = PT_LOAD;
2931 m2->count = 1;
2932 m2->sections[0] = s;
2933 m2->next = m->next;
2934 m->next = m2;
2935 }
2936 break;
2937 }
2938
2939 return TRUE;
2940 }
2941
2942 /* Check that all loadable section VMAs lie in the range
2943 LO .. HI inclusive. */
2944
2945 asection *
2946 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
2947 {
2948 struct elf_segment_map *m;
2949 unsigned int i;
2950
2951 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2952 if (m->p_type == PT_LOAD)
2953 for (i = 0; i < m->count; i++)
2954 if (m->sections[i]->size != 0
2955 && (m->sections[i]->vma < lo
2956 || m->sections[i]->vma > hi
2957 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2958 return m->sections[i];
2959
2960 return NULL;
2961 }
2962
2963 /* Tweak phdrs before writing them out. */
2964
2965 static int
2966 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
2967 {
2968 const struct elf_backend_data *bed;
2969 struct elf_obj_tdata *tdata;
2970 Elf_Internal_Phdr *phdr, *last;
2971 struct spu_link_hash_table *htab;
2972 unsigned int count;
2973 unsigned int i;
2974
2975 if (info == NULL)
2976 return TRUE;
2977
2978 bed = get_elf_backend_data (abfd);
2979 tdata = elf_tdata (abfd);
2980 phdr = tdata->phdr;
2981 count = tdata->program_header_size / bed->s->sizeof_phdr;
2982 htab = spu_hash_table (info);
2983 if (htab->num_overlays != 0)
2984 {
2985 struct elf_segment_map *m;
2986 unsigned int o;
2987
2988 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
2989 if (m->count != 0
2990 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
2991 {
2992 /* Mark this as an overlay header. */
2993 phdr[i].p_flags |= PF_OVERLAY;
2994
2995 if (htab->ovtab != NULL && htab->ovtab->size != 0)
2996 {
2997 bfd_byte *p = htab->ovtab->contents;
2998 unsigned int off = (o - 1) * 16 + 8;
2999
3000 /* Write file_off into _ovly_table. */
3001 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3002 }
3003 }
3004 }
3005
3006 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3007 of 16. This should always be possible when using the standard
3008 linker scripts, but don't create overlapping segments if
3009 someone is playing games with linker scripts. */
3010 last = NULL;
3011 for (i = count; i-- != 0; )
3012 if (phdr[i].p_type == PT_LOAD)
3013 {
3014 unsigned adjust;
3015
3016 adjust = -phdr[i].p_filesz & 15;
3017 if (adjust != 0
3018 && last != NULL
3019 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3020 break;
3021
3022 adjust = -phdr[i].p_memsz & 15;
3023 if (adjust != 0
3024 && last != NULL
3025 && phdr[i].p_filesz != 0
3026 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3027 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3028 break;
3029
3030 if (phdr[i].p_filesz != 0)
3031 last = &phdr[i];
3032 }
3033
3034 if (i == (unsigned int) -1)
3035 for (i = count; i-- != 0; )
3036 if (phdr[i].p_type == PT_LOAD)
3037 {
3038 unsigned adjust;
3039
3040 adjust = -phdr[i].p_filesz & 15;
3041 phdr[i].p_filesz += adjust;
3042
3043 adjust = -phdr[i].p_memsz & 15;
3044 phdr[i].p_memsz += adjust;
3045 }
3046
3047 return TRUE;
3048 }
3049
3050 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3051 #define TARGET_BIG_NAME "elf32-spu"
3052 #define ELF_ARCH bfd_arch_spu
3053 #define ELF_MACHINE_CODE EM_SPU
3054 /* This matches the alignment need for DMA. */
3055 #define ELF_MAXPAGESIZE 0x80
3056 #define elf_backend_rela_normal 1
3057 #define elf_backend_can_gc_sections 1
3058
3059 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3060 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3061 #define elf_info_to_howto spu_elf_info_to_howto
3062 #define elf_backend_relocate_section spu_elf_relocate_section
3063 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3064 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3065 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3066 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3067 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3068
3069 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3070 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3071 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3072 #define elf_backend_post_process_headers spu_elf_post_process_headers
3073 #define elf_backend_special_sections spu_elf_special_sections
3074 #define bfd_elf32_bfd_final_link spu_elf_final_link
3075
3076 #include "elf32-target.h"
This page took 0.122135 seconds and 3 git commands to generate.