* elf32-spu.c (is_indirect_branch): New function.
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
28
29 /* We use RELA style relocs. Don't define USE_REL. */
30
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
90 };
91
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
95 };
96
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
99 {
100 switch (code)
101 {
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
136 }
137 }
138
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
143 {
144 enum elf_spu_reloc_type r_type;
145
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
149 }
150
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
154 {
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
156
157 if (r_type == R_SPU_NONE)
158 return NULL;
159
160 return elf_howto_table + r_type;
161 }
162
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
166 {
167 unsigned int i;
168
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
173
174 return NULL;
175 }
176
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
178
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
183 {
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
187
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
194
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
198
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
205
206 val += reloc_entry->addend;
207
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
210
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
214
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
216
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
224 }
225
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
228 {
229 if (!sec->used_by_bfd)
230 {
231 struct _spu_elf_section_data *sdata;
232
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
237 }
238
239 return _bfd_elf_new_section_hook (abfd, sec);
240 }
241
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
244
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
247 {
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
252 }
253
254 /* SPU ELF linker hash table. */
255
256 struct spu_link_hash_table
257 {
258 struct elf_link_hash_table elf;
259
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table;
262
263 /* Shortcuts to overlay sections. */
264 asection *stub;
265 asection *ovtab;
266
267 struct elf_link_hash_entry *ovly_load;
268
269 /* An array of two output sections per overlay region, chosen such that
270 the first section vma is the overlay buffer vma (ie. the section has
271 the lowest vma in the group that occupy the region), and the second
272 section vma+size specifies the end of the region. We keep pointers
273 to sections like this because section vmas may change when laying
274 them out. */
275 asection **ovl_region;
276
277 /* Number of overlay buffers. */
278 unsigned int num_buf;
279
280 /* Total number of overlays. */
281 unsigned int num_overlays;
282
283 /* Set if we should emit symbols for stubs. */
284 unsigned int emit_stub_syms:1;
285
286 /* Set if we want stubs on calls out of overlay regions to
287 non-overlay regions. */
288 unsigned int non_overlay_stubs : 1;
289
290 /* Set on error. */
291 unsigned int stub_overflow : 1;
292
293 /* Set if stack size analysis should be done. */
294 unsigned int stack_analysis : 1;
295
296 /* Set if __stack_* syms will be emitted. */
297 unsigned int emit_stack_syms : 1;
298 };
299
300 #define spu_hash_table(p) \
301 ((struct spu_link_hash_table *) ((p)->hash))
302
303 struct spu_stub_hash_entry
304 {
305 struct bfd_hash_entry root;
306
307 /* Destination of this stub. */
308 asection *target_section;
309 bfd_vma target_off;
310
311 /* Offset of entry in stub section. */
312 bfd_vma off;
313
314 /* Offset from this stub to stub that loads the overlay index. */
315 bfd_vma delta;
316 };
317
318 /* Create an entry in a spu stub hash table. */
319
320 static struct bfd_hash_entry *
321 stub_hash_newfunc (struct bfd_hash_entry *entry,
322 struct bfd_hash_table *table,
323 const char *string)
324 {
325 /* Allocate the structure if it has not already been allocated by a
326 subclass. */
327 if (entry == NULL)
328 {
329 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
330 if (entry == NULL)
331 return entry;
332 }
333
334 /* Call the allocation method of the superclass. */
335 entry = bfd_hash_newfunc (entry, table, string);
336 if (entry != NULL)
337 {
338 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
339
340 sh->target_section = NULL;
341 sh->target_off = 0;
342 sh->off = 0;
343 sh->delta = 0;
344 }
345
346 return entry;
347 }
348
349 /* Create a spu ELF linker hash table. */
350
351 static struct bfd_link_hash_table *
352 spu_elf_link_hash_table_create (bfd *abfd)
353 {
354 struct spu_link_hash_table *htab;
355
356 htab = bfd_malloc (sizeof (*htab));
357 if (htab == NULL)
358 return NULL;
359
360 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
361 _bfd_elf_link_hash_newfunc,
362 sizeof (struct elf_link_hash_entry)))
363 {
364 free (htab);
365 return NULL;
366 }
367
368 /* Init the stub hash table too. */
369 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
370 sizeof (struct spu_stub_hash_entry)))
371 return NULL;
372
373 memset (&htab->stub, 0,
374 sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
375
376 return &htab->elf.root;
377 }
378
379 /* Free the derived linker hash table. */
380
381 static void
382 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
383 {
384 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
385
386 bfd_hash_table_free (&ret->stub_hash_table);
387 _bfd_generic_link_hash_table_free (hash);
388 }
389
390 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
391 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
392 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
393
394 static bfd_boolean
395 get_sym_h (struct elf_link_hash_entry **hp,
396 Elf_Internal_Sym **symp,
397 asection **symsecp,
398 Elf_Internal_Sym **locsymsp,
399 unsigned long r_symndx,
400 bfd *ibfd)
401 {
402 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
403
404 if (r_symndx >= symtab_hdr->sh_info)
405 {
406 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
407 struct elf_link_hash_entry *h;
408
409 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
410 while (h->root.type == bfd_link_hash_indirect
411 || h->root.type == bfd_link_hash_warning)
412 h = (struct elf_link_hash_entry *) h->root.u.i.link;
413
414 if (hp != NULL)
415 *hp = h;
416
417 if (symp != NULL)
418 *symp = NULL;
419
420 if (symsecp != NULL)
421 {
422 asection *symsec = NULL;
423 if (h->root.type == bfd_link_hash_defined
424 || h->root.type == bfd_link_hash_defweak)
425 symsec = h->root.u.def.section;
426 *symsecp = symsec;
427 }
428 }
429 else
430 {
431 Elf_Internal_Sym *sym;
432 Elf_Internal_Sym *locsyms = *locsymsp;
433
434 if (locsyms == NULL)
435 {
436 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
437 if (locsyms == NULL)
438 {
439 size_t symcount = symtab_hdr->sh_info;
440
441 /* If we are reading symbols into the contents, then
442 read the global syms too. This is done to cache
443 syms for later stack analysis. */
444 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
445 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
446 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
447 NULL, NULL, NULL);
448 }
449 if (locsyms == NULL)
450 return FALSE;
451 *locsymsp = locsyms;
452 }
453 sym = locsyms + r_symndx;
454
455 if (hp != NULL)
456 *hp = NULL;
457
458 if (symp != NULL)
459 *symp = sym;
460
461 if (symsecp != NULL)
462 {
463 asection *symsec = NULL;
464 if ((sym->st_shndx != SHN_UNDEF
465 && sym->st_shndx < SHN_LORESERVE)
466 || sym->st_shndx > SHN_HIRESERVE)
467 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
468 *symsecp = symsec;
469 }
470 }
471
472 return TRUE;
473 }
474
475 /* Build a name for an entry in the stub hash table. We can't use a
476 local symbol name because ld -r might generate duplicate local symbols. */
477
478 static char *
479 spu_stub_name (const asection *sym_sec,
480 const struct elf_link_hash_entry *h,
481 const Elf_Internal_Rela *rel)
482 {
483 char *stub_name;
484 bfd_size_type len;
485
486 if (h)
487 {
488 len = strlen (h->root.root.string) + 1 + 8 + 1;
489 stub_name = bfd_malloc (len);
490 if (stub_name == NULL)
491 return stub_name;
492
493 sprintf (stub_name, "%s+%x",
494 h->root.root.string,
495 (int) rel->r_addend & 0xffffffff);
496 len -= 8;
497 }
498 else
499 {
500 len = 8 + 1 + 8 + 1 + 8 + 1;
501 stub_name = bfd_malloc (len);
502 if (stub_name == NULL)
503 return stub_name;
504
505 sprintf (stub_name, "%x:%x+%x",
506 sym_sec->id & 0xffffffff,
507 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
508 (int) rel->r_addend & 0xffffffff);
509 len = strlen (stub_name);
510 }
511
512 if (stub_name[len - 2] == '+'
513 && stub_name[len - 1] == '0'
514 && stub_name[len] == 0)
515 stub_name[len - 2] = 0;
516
517 return stub_name;
518 }
519
520 /* Create the note section if not already present. This is done early so
521 that the linker maps the sections to the right place in the output. */
522
523 bfd_boolean
524 spu_elf_create_sections (bfd *output_bfd,
525 struct bfd_link_info *info,
526 int stack_analysis,
527 int emit_stack_syms)
528 {
529 bfd *ibfd;
530 struct spu_link_hash_table *htab = spu_hash_table (info);
531
532 /* Stash some options away where we can get at them later. */
533 htab->stack_analysis = stack_analysis;
534 htab->emit_stack_syms = emit_stack_syms;
535
536 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
537 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
538 break;
539
540 if (ibfd == NULL)
541 {
542 /* Make SPU_PTNOTE_SPUNAME section. */
543 asection *s;
544 size_t name_len;
545 size_t size;
546 bfd_byte *data;
547 flagword flags;
548
549 ibfd = info->input_bfds;
550 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
551 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
552 if (s == NULL
553 || !bfd_set_section_alignment (ibfd, s, 4))
554 return FALSE;
555
556 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
557 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
558 size += (name_len + 3) & -4;
559
560 if (!bfd_set_section_size (ibfd, s, size))
561 return FALSE;
562
563 data = bfd_zalloc (ibfd, size);
564 if (data == NULL)
565 return FALSE;
566
567 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
568 bfd_put_32 (ibfd, name_len, data + 4);
569 bfd_put_32 (ibfd, 1, data + 8);
570 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
571 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
572 bfd_get_filename (output_bfd), name_len);
573 s->contents = data;
574 }
575
576 return TRUE;
577 }
578
579 /* qsort predicate to sort sections by vma. */
580
581 static int
582 sort_sections (const void *a, const void *b)
583 {
584 const asection *const *s1 = a;
585 const asection *const *s2 = b;
586 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
587
588 if (delta != 0)
589 return delta < 0 ? -1 : 1;
590
591 return (*s1)->index - (*s2)->index;
592 }
593
594 /* Identify overlays in the output bfd, and number them. */
595
596 bfd_boolean
597 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
598 {
599 struct spu_link_hash_table *htab = spu_hash_table (info);
600 asection **alloc_sec;
601 unsigned int i, n, ovl_index, num_buf;
602 asection *s;
603 bfd_vma ovl_end;
604
605 if (output_bfd->section_count < 2)
606 return FALSE;
607
608 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
609 if (alloc_sec == NULL)
610 return FALSE;
611
612 /* Pick out all the alloced sections. */
613 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
614 if ((s->flags & SEC_ALLOC) != 0
615 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
616 && s->size != 0)
617 alloc_sec[n++] = s;
618
619 if (n == 0)
620 {
621 free (alloc_sec);
622 return FALSE;
623 }
624
625 /* Sort them by vma. */
626 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
627
628 /* Look for overlapping vmas. Any with overlap must be overlays.
629 Count them. Also count the number of overlay regions and for
630 each region save a section from that region with the lowest vma
631 and another section with the highest end vma. */
632 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
633 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
634 {
635 s = alloc_sec[i];
636 if (s->vma < ovl_end)
637 {
638 asection *s0 = alloc_sec[i - 1];
639
640 if (spu_elf_section_data (s0)->ovl_index == 0)
641 {
642 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
643 alloc_sec[num_buf * 2] = s0;
644 alloc_sec[num_buf * 2 + 1] = s0;
645 num_buf++;
646 }
647 spu_elf_section_data (s)->ovl_index = ++ovl_index;
648 if (ovl_end < s->vma + s->size)
649 {
650 ovl_end = s->vma + s->size;
651 alloc_sec[num_buf * 2 - 1] = s;
652 }
653 }
654 else
655 ovl_end = s->vma + s->size;
656 }
657
658 htab->num_overlays = ovl_index;
659 htab->num_buf = num_buf;
660 if (ovl_index == 0)
661 {
662 free (alloc_sec);
663 return FALSE;
664 }
665
666 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
667 if (alloc_sec == NULL)
668 return FALSE;
669
670 htab->ovl_region = alloc_sec;
671 return TRUE;
672 }
673
674 /* One of these per stub. */
675 #define SIZEOF_STUB1 8
676 #define ILA_79 0x4200004f /* ila $79,function_address */
677 #define BR 0x32000000 /* br stub2 */
678
679 /* One of these per overlay. */
680 #define SIZEOF_STUB2 8
681 #define ILA_78 0x4200004e /* ila $78,overlay_number */
682 /* br __ovly_load */
683 #define NOP 0x40200000
684
685 /* Return true for all relative and absolute branch instructions.
686 bra 00110000 0..
687 brasl 00110001 0..
688 br 00110010 0..
689 brsl 00110011 0..
690 brz 00100000 0..
691 brnz 00100001 0..
692 brhz 00100010 0..
693 brhnz 00100011 0.. */
694
695 static bfd_boolean
696 is_branch (const unsigned char *insn)
697 {
698 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
699 }
700
701 /* Return true for all indirect branch instructions.
702 bi 00110101 000
703 bisl 00110101 001
704 iret 00110101 010
705 bisled 00110101 011
706 biz 00100101 000
707 binz 00100101 001
708 bihz 00100101 010
709 bihnz 00100101 011 */
710
711 static bfd_boolean
712 is_indirect_branch (const unsigned char *insn)
713 {
714 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
715 }
716
717 /* Return true for branch hint instructions.
718 hbra 0001000..
719 hbrr 0001001.. */
720
721 static bfd_boolean
722 is_hint (const unsigned char *insn)
723 {
724 return (insn[0] & 0xfc) == 0x10;
725 }
726
727 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
728
729 static bfd_boolean
730 needs_ovl_stub (const char *sym_name,
731 asection *sym_sec,
732 asection *input_section,
733 struct spu_link_hash_table *htab,
734 bfd_boolean is_branch)
735 {
736 if (htab->num_overlays == 0)
737 return FALSE;
738
739 if (sym_sec == NULL
740 || sym_sec->output_section == NULL
741 || spu_elf_section_data (sym_sec->output_section) == NULL)
742 return FALSE;
743
744 /* setjmp always goes via an overlay stub, because then the return
745 and hence the longjmp goes via __ovly_return. That magically
746 makes setjmp/longjmp between overlays work. */
747 if (strncmp (sym_name, "setjmp", 6) == 0
748 && (sym_name[6] == '\0' || sym_name[6] == '@'))
749 return TRUE;
750
751 /* Usually, symbols in non-overlay sections don't need stubs. */
752 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
753 && !htab->non_overlay_stubs)
754 return FALSE;
755
756 /* A reference from some other section to a symbol in an overlay
757 section needs a stub. */
758 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
759 != spu_elf_section_data (input_section->output_section)->ovl_index)
760 return TRUE;
761
762 /* If this insn isn't a branch then we are possibly taking the
763 address of a function and passing it out somehow. */
764 return !is_branch;
765 }
766
767 struct stubarr {
768 struct bfd_hash_table *stub_hash_table;
769 struct spu_stub_hash_entry **sh;
770 unsigned int count;
771 int err;
772 };
773
774 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
775 symbols. */
776
777 static bfd_boolean
778 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
779 {
780 /* Symbols starting with _SPUEAR_ need a stub because they may be
781 invoked by the PPU. */
782 if ((h->root.type == bfd_link_hash_defined
783 || h->root.type == bfd_link_hash_defweak)
784 && h->def_regular
785 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
786 {
787 struct stubarr *stubs = inf;
788 static Elf_Internal_Rela zero_rel;
789 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
790 struct spu_stub_hash_entry *sh;
791
792 if (stub_name == NULL)
793 {
794 stubs->err = 1;
795 return FALSE;
796 }
797
798 sh = (struct spu_stub_hash_entry *)
799 bfd_hash_lookup (stubs->stub_hash_table, stub_name, TRUE, FALSE);
800 if (sh == NULL)
801 {
802 free (stub_name);
803 return FALSE;
804 }
805
806 /* If this entry isn't new, we already have a stub. */
807 if (sh->target_section != NULL)
808 {
809 free (stub_name);
810 return TRUE;
811 }
812
813 sh->target_section = h->root.u.def.section;
814 sh->target_off = h->root.u.def.value;
815 stubs->count += 1;
816 }
817
818 return TRUE;
819 }
820
821 /* Called via bfd_hash_traverse to set up pointers to all symbols
822 in the stub hash table. */
823
824 static bfd_boolean
825 populate_stubs (struct bfd_hash_entry *bh, void *inf)
826 {
827 struct stubarr *stubs = inf;
828
829 stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
830 return TRUE;
831 }
832
833 /* qsort predicate to sort stubs by overlay number. */
834
835 static int
836 sort_stubs (const void *a, const void *b)
837 {
838 const struct spu_stub_hash_entry *const *sa = a;
839 const struct spu_stub_hash_entry *const *sb = b;
840 int i;
841 bfd_signed_vma d;
842
843 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
844 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
845 if (i != 0)
846 return i;
847
848 d = ((*sa)->target_section->output_section->vma
849 + (*sa)->target_section->output_offset
850 + (*sa)->target_off
851 - (*sb)->target_section->output_section->vma
852 - (*sb)->target_section->output_offset
853 - (*sb)->target_off);
854 if (d != 0)
855 return d < 0 ? -1 : 1;
856
857 /* Two functions at the same address. Aliases perhaps. */
858 i = strcmp ((*sb)->root.string, (*sa)->root.string);
859 BFD_ASSERT (i != 0);
860 return i;
861 }
862
863 /* Allocate space for overlay call and return stubs. */
864
865 bfd_boolean
866 spu_elf_size_stubs (bfd *output_bfd,
867 struct bfd_link_info *info,
868 int non_overlay_stubs,
869 int stack_analysis,
870 asection **stub,
871 asection **ovtab,
872 asection **toe)
873 {
874 struct spu_link_hash_table *htab = spu_hash_table (info);
875 bfd *ibfd;
876 struct stubarr stubs;
877 unsigned i, group;
878 flagword flags;
879
880 htab->non_overlay_stubs = non_overlay_stubs;
881 stubs.stub_hash_table = &htab->stub_hash_table;
882 stubs.count = 0;
883 stubs.err = 0;
884 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
885 {
886 extern const bfd_target bfd_elf32_spu_vec;
887 Elf_Internal_Shdr *symtab_hdr;
888 asection *section;
889 Elf_Internal_Sym *local_syms = NULL;
890 void *psyms;
891
892 if (ibfd->xvec != &bfd_elf32_spu_vec)
893 continue;
894
895 /* We'll need the symbol table in a second. */
896 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
897 if (symtab_hdr->sh_info == 0)
898 continue;
899
900 /* Arrange to read and keep global syms for later stack analysis. */
901 psyms = &local_syms;
902 if (stack_analysis)
903 psyms = &symtab_hdr->contents;
904
905 /* Walk over each section attached to the input bfd. */
906 for (section = ibfd->sections; section != NULL; section = section->next)
907 {
908 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
909
910 /* If there aren't any relocs, then there's nothing more to do. */
911 if ((section->flags & SEC_RELOC) == 0
912 || (section->flags & SEC_ALLOC) == 0
913 || (section->flags & SEC_LOAD) == 0
914 || section->reloc_count == 0)
915 continue;
916
917 /* If this section is a link-once section that will be
918 discarded, then don't create any stubs. */
919 if (section->output_section == NULL
920 || section->output_section->owner != output_bfd)
921 continue;
922
923 /* Get the relocs. */
924 internal_relocs
925 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
926 info->keep_memory);
927 if (internal_relocs == NULL)
928 goto error_ret_free_local;
929
930 /* Now examine each relocation. */
931 irela = internal_relocs;
932 irelaend = irela + section->reloc_count;
933 for (; irela < irelaend; irela++)
934 {
935 enum elf_spu_reloc_type r_type;
936 unsigned int r_indx;
937 asection *sym_sec;
938 Elf_Internal_Sym *sym;
939 struct elf_link_hash_entry *h;
940 const char *sym_name;
941 char *stub_name;
942 struct spu_stub_hash_entry *sh;
943 unsigned int sym_type;
944 enum _insn_type { non_branch, branch, call } insn_type;
945
946 r_type = ELF32_R_TYPE (irela->r_info);
947 r_indx = ELF32_R_SYM (irela->r_info);
948
949 if (r_type >= R_SPU_max)
950 {
951 bfd_set_error (bfd_error_bad_value);
952 goto error_ret_free_internal;
953 }
954
955 /* Determine the reloc target section. */
956 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
957 goto error_ret_free_internal;
958
959 if (sym_sec == NULL
960 || sym_sec->output_section == NULL
961 || sym_sec->output_section->owner != output_bfd)
962 continue;
963
964 /* Ensure no stubs for user supplied overlay manager syms. */
965 if (h != NULL
966 && (strcmp (h->root.root.string, "__ovly_load") == 0
967 || strcmp (h->root.root.string, "__ovly_return") == 0))
968 continue;
969
970 insn_type = non_branch;
971 if (r_type == R_SPU_REL16
972 || r_type == R_SPU_ADDR16)
973 {
974 unsigned char insn[4];
975
976 if (!bfd_get_section_contents (ibfd, section, insn,
977 irela->r_offset, 4))
978 goto error_ret_free_internal;
979
980 if (is_branch (insn) || is_hint (insn))
981 {
982 insn_type = branch;
983 if ((insn[0] & 0xfd) == 0x31)
984 insn_type = call;
985 }
986 }
987
988 /* We are only interested in function symbols. */
989 if (h != NULL)
990 {
991 sym_type = h->type;
992 sym_name = h->root.root.string;
993 }
994 else
995 {
996 sym_type = ELF_ST_TYPE (sym->st_info);
997 sym_name = bfd_elf_sym_name (sym_sec->owner,
998 symtab_hdr,
999 sym,
1000 sym_sec);
1001 }
1002 if (sym_type != STT_FUNC)
1003 {
1004 /* It's common for people to write assembly and forget
1005 to give function symbols the right type. Handle
1006 calls to such symbols, but warn so that (hopefully)
1007 people will fix their code. We need the symbol
1008 type to be correct to distinguish function pointer
1009 initialisation from other pointer initialisation. */
1010 if (insn_type == call)
1011 (*_bfd_error_handler) (_("warning: call to non-function"
1012 " symbol %s defined in %B"),
1013 sym_sec->owner, sym_name);
1014 else
1015 continue;
1016 }
1017
1018 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1019 insn_type != non_branch))
1020 continue;
1021
1022 stub_name = spu_stub_name (sym_sec, h, irela);
1023 if (stub_name == NULL)
1024 goto error_ret_free_internal;
1025
1026 sh = (struct spu_stub_hash_entry *)
1027 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1028 TRUE, FALSE);
1029 if (sh == NULL)
1030 {
1031 free (stub_name);
1032 error_ret_free_internal:
1033 if (elf_section_data (section)->relocs != internal_relocs)
1034 free (internal_relocs);
1035 error_ret_free_local:
1036 if (local_syms != NULL
1037 && (symtab_hdr->contents
1038 != (unsigned char *) local_syms))
1039 free (local_syms);
1040 return FALSE;
1041 }
1042
1043 /* If this entry isn't new, we already have a stub. */
1044 if (sh->target_section != NULL)
1045 {
1046 free (stub_name);
1047 continue;
1048 }
1049
1050 sh->target_section = sym_sec;
1051 if (h != NULL)
1052 sh->target_off = h->root.u.def.value;
1053 else
1054 sh->target_off = sym->st_value;
1055 sh->target_off += irela->r_addend;
1056
1057 stubs.count += 1;
1058 }
1059
1060 /* We're done with the internal relocs, free them. */
1061 if (elf_section_data (section)->relocs != internal_relocs)
1062 free (internal_relocs);
1063 }
1064
1065 if (local_syms != NULL
1066 && symtab_hdr->contents != (unsigned char *) local_syms)
1067 {
1068 if (!info->keep_memory)
1069 free (local_syms);
1070 else
1071 symtab_hdr->contents = (unsigned char *) local_syms;
1072 }
1073 }
1074
1075 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, &stubs);
1076 if (stubs.err)
1077 return FALSE;
1078
1079 *stub = NULL;
1080 if (stubs.count == 0)
1081 return TRUE;
1082
1083 ibfd = info->input_bfds;
1084 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1085 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1086 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1087 *stub = htab->stub;
1088 if (htab->stub == NULL
1089 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1090 return FALSE;
1091
1092 flags = (SEC_ALLOC | SEC_LOAD
1093 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1094 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1095 *ovtab = htab->ovtab;
1096 if (htab->ovtab == NULL
1097 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1098 return FALSE;
1099
1100 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1101 if (*toe == NULL
1102 || !bfd_set_section_alignment (ibfd, *toe, 4))
1103 return FALSE;
1104 (*toe)->size = 16;
1105
1106 /* Retrieve all the stubs and sort. */
1107 stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
1108 if (stubs.sh == NULL)
1109 return FALSE;
1110 i = stubs.count;
1111 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
1112 BFD_ASSERT (stubs.count == 0);
1113
1114 stubs.count = i;
1115 qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
1116
1117 /* Now that the stubs are sorted, place them in the stub section.
1118 Stubs are grouped per overlay
1119 . ila $79,func1
1120 . br 1f
1121 . ila $79,func2
1122 . br 1f
1123 .
1124 .
1125 . ila $79,funcn
1126 . nop
1127 . 1:
1128 . ila $78,ovl_index
1129 . br __ovly_load */
1130
1131 group = 0;
1132 for (i = 0; i < stubs.count; i++)
1133 {
1134 if (spu_elf_section_data (stubs.sh[group]->target_section
1135 ->output_section)->ovl_index
1136 != spu_elf_section_data (stubs.sh[i]->target_section
1137 ->output_section)->ovl_index)
1138 {
1139 htab->stub->size += SIZEOF_STUB2;
1140 for (; group != i; group++)
1141 stubs.sh[group]->delta
1142 = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1143 }
1144 if (group == i
1145 || ((stubs.sh[i - 1]->target_section->output_section->vma
1146 + stubs.sh[i - 1]->target_section->output_offset
1147 + stubs.sh[i - 1]->target_off)
1148 != (stubs.sh[i]->target_section->output_section->vma
1149 + stubs.sh[i]->target_section->output_offset
1150 + stubs.sh[i]->target_off)))
1151 {
1152 stubs.sh[i]->off = htab->stub->size;
1153 htab->stub->size += SIZEOF_STUB1;
1154 }
1155 else
1156 stubs.sh[i]->off = stubs.sh[i - 1]->off;
1157 }
1158 if (group != i)
1159 htab->stub->size += SIZEOF_STUB2;
1160 for (; group != i; group++)
1161 stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1162
1163 /* htab->ovtab consists of two arrays.
1164 . struct {
1165 . u32 vma;
1166 . u32 size;
1167 . u32 file_off;
1168 . u32 buf;
1169 . } _ovly_table[];
1170 .
1171 . struct {
1172 . u32 mapped;
1173 . } _ovly_buf_table[]; */
1174
1175 htab->ovtab->alignment_power = 4;
1176 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1177
1178 return TRUE;
1179 }
1180
1181 /* Functions to handle embedded spu_ovl.o object. */
1182
1183 static void *
1184 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1185 {
1186 return stream;
1187 }
1188
1189 static file_ptr
1190 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1191 void *stream,
1192 void *buf,
1193 file_ptr nbytes,
1194 file_ptr offset)
1195 {
1196 struct _ovl_stream *os;
1197 size_t count;
1198 size_t max;
1199
1200 os = (struct _ovl_stream *) stream;
1201 max = (const char *) os->end - (const char *) os->start;
1202
1203 if ((ufile_ptr) offset >= max)
1204 return 0;
1205
1206 count = nbytes;
1207 if (count > max - offset)
1208 count = max - offset;
1209
1210 memcpy (buf, (const char *) os->start + offset, count);
1211 return count;
1212 }
1213
1214 bfd_boolean
1215 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1216 {
1217 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1218 "elf32-spu",
1219 ovl_mgr_open,
1220 (void *) stream,
1221 ovl_mgr_pread,
1222 NULL,
1223 NULL);
1224 return *ovl_bfd != NULL;
1225 }
1226
1227 /* Fill in the ila and br for a stub. On the last stub for a group,
1228 write the stub that sets the overlay number too. */
1229
1230 static bfd_boolean
1231 write_one_stub (struct bfd_hash_entry *bh, void *inf)
1232 {
1233 struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1234 struct spu_link_hash_table *htab = inf;
1235 asection *sec = htab->stub;
1236 asection *s = ent->target_section;
1237 unsigned int ovl;
1238 bfd_vma val;
1239
1240 val = ent->target_off + s->output_offset + s->output_section->vma;
1241 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1242 sec->contents + ent->off);
1243 val = ent->delta + 4;
1244 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1245 sec->contents + ent->off + 4);
1246
1247 /* If this is the last stub of this group, write stub2. */
1248 if (ent->delta == 0)
1249 {
1250 bfd_put_32 (sec->owner, NOP,
1251 sec->contents + ent->off + 4);
1252
1253 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1254 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1255 sec->contents + ent->off + 8);
1256
1257 val = (htab->ovly_load->root.u.def.section->output_section->vma
1258 + htab->ovly_load->root.u.def.section->output_offset
1259 + htab->ovly_load->root.u.def.value
1260 - (sec->output_section->vma
1261 + sec->output_offset
1262 + ent->off + 12));
1263
1264 if (val + 0x20000 >= 0x40000)
1265 htab->stub_overflow = TRUE;
1266
1267 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1268 sec->contents + ent->off + 12);
1269 }
1270
1271 if (htab->emit_stub_syms)
1272 {
1273 struct elf_link_hash_entry *h;
1274 size_t len1, len2;
1275 char *name;
1276
1277 len1 = sizeof ("00000000.ovl_call.") - 1;
1278 len2 = strlen (ent->root.string);
1279 name = bfd_malloc (len1 + len2 + 1);
1280 if (name == NULL)
1281 return FALSE;
1282 memcpy (name, "00000000.ovl_call.", len1);
1283 memcpy (name + len1, ent->root.string, len2 + 1);
1284 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1285 free (name);
1286 if (h == NULL)
1287 return FALSE;
1288 if (h->root.type == bfd_link_hash_new)
1289 {
1290 h->root.type = bfd_link_hash_defined;
1291 h->root.u.def.section = sec;
1292 h->root.u.def.value = ent->off;
1293 h->size = (ent->delta == 0
1294 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1295 h->type = STT_FUNC;
1296 h->ref_regular = 1;
1297 h->def_regular = 1;
1298 h->ref_regular_nonweak = 1;
1299 h->forced_local = 1;
1300 h->non_elf = 0;
1301 }
1302 }
1303
1304 return TRUE;
1305 }
1306
1307 /* Define an STT_OBJECT symbol. */
1308
1309 static struct elf_link_hash_entry *
1310 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1311 {
1312 struct elf_link_hash_entry *h;
1313
1314 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1315 if (h == NULL)
1316 return NULL;
1317
1318 if (h->root.type != bfd_link_hash_defined
1319 || !h->def_regular)
1320 {
1321 h->root.type = bfd_link_hash_defined;
1322 h->root.u.def.section = htab->ovtab;
1323 h->type = STT_OBJECT;
1324 h->ref_regular = 1;
1325 h->def_regular = 1;
1326 h->ref_regular_nonweak = 1;
1327 h->non_elf = 0;
1328 }
1329 else
1330 {
1331 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1332 h->root.u.def.section->owner,
1333 h->root.root.string);
1334 bfd_set_error (bfd_error_bad_value);
1335 return NULL;
1336 }
1337
1338 return h;
1339 }
1340
1341 /* Fill in all stubs and the overlay tables. */
1342
1343 bfd_boolean
1344 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1345 {
1346 struct spu_link_hash_table *htab = spu_hash_table (info);
1347 struct elf_link_hash_entry *h;
1348 bfd_byte *p;
1349 asection *s;
1350 bfd *obfd;
1351 unsigned int i;
1352
1353 htab->emit_stub_syms = emit_syms;
1354 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1355 if (htab->stub->contents == NULL)
1356 return FALSE;
1357
1358 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1359 htab->ovly_load = h;
1360 BFD_ASSERT (h != NULL
1361 && (h->root.type == bfd_link_hash_defined
1362 || h->root.type == bfd_link_hash_defweak)
1363 && h->def_regular);
1364
1365 s = h->root.u.def.section->output_section;
1366 if (spu_elf_section_data (s)->ovl_index)
1367 {
1368 (*_bfd_error_handler) (_("%s in overlay section"),
1369 h->root.u.def.section->owner);
1370 bfd_set_error (bfd_error_bad_value);
1371 return FALSE;
1372 }
1373
1374 /* Write out all the stubs. */
1375 bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1376
1377 if (htab->stub_overflow)
1378 {
1379 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1380 bfd_set_error (bfd_error_bad_value);
1381 return FALSE;
1382 }
1383
1384 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1385 if (htab->ovtab->contents == NULL)
1386 return FALSE;
1387
1388 /* Write out _ovly_table. */
1389 p = htab->ovtab->contents;
1390 obfd = htab->ovtab->output_section->owner;
1391 for (s = obfd->sections; s != NULL; s = s->next)
1392 {
1393 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1394
1395 if (ovl_index != 0)
1396 {
1397 unsigned int lo, hi, mid;
1398 unsigned long off = (ovl_index - 1) * 16;
1399 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1400 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1401 /* file_off written later in spu_elf_modify_program_headers. */
1402
1403 lo = 0;
1404 hi = htab->num_buf;
1405 while (lo < hi)
1406 {
1407 mid = (lo + hi) >> 1;
1408 if (htab->ovl_region[2 * mid + 1]->vma
1409 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1410 lo = mid + 1;
1411 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1412 hi = mid;
1413 else
1414 {
1415 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1416 break;
1417 }
1418 }
1419 BFD_ASSERT (lo < hi);
1420 }
1421 }
1422
1423 /* Write out _ovly_buf_table. */
1424 p = htab->ovtab->contents + htab->num_overlays * 16;
1425 for (i = 0; i < htab->num_buf; i++)
1426 {
1427 bfd_put_32 (htab->ovtab->owner, 0, p);
1428 p += 4;
1429 }
1430
1431 h = define_ovtab_symbol (htab, "_ovly_table");
1432 if (h == NULL)
1433 return FALSE;
1434 h->root.u.def.value = 0;
1435 h->size = htab->num_overlays * 16;
1436
1437 h = define_ovtab_symbol (htab, "_ovly_table_end");
1438 if (h == NULL)
1439 return FALSE;
1440 h->root.u.def.value = htab->num_overlays * 16;
1441 h->size = 0;
1442
1443 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1444 if (h == NULL)
1445 return FALSE;
1446 h->root.u.def.value = htab->num_overlays * 16;
1447 h->size = htab->num_buf * 4;
1448
1449 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1450 if (h == NULL)
1451 return FALSE;
1452 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1453 h->size = 0;
1454
1455 h = define_ovtab_symbol (htab, "_EAR_");
1456 if (h == NULL)
1457 return FALSE;
1458 h->root.u.def.section = toe;
1459 h->root.u.def.value = 0;
1460 h->size = 16;
1461
1462 return TRUE;
1463 }
1464
1465 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1466 Search for stack adjusting insns, and return the sp delta. */
1467
1468 static int
1469 find_function_stack_adjust (asection *sec, bfd_vma offset)
1470 {
1471 int unrecog;
1472 int reg[128];
1473
1474 memset (reg, 0, sizeof (reg));
1475 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1476 {
1477 unsigned char buf[4];
1478 int rt, ra;
1479 int imm;
1480
1481 /* Assume no relocs on stack adjusing insns. */
1482 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1483 break;
1484
1485 if (buf[0] == 0x24 /* stqd */)
1486 continue;
1487
1488 rt = buf[3] & 0x7f;
1489 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1490 /* Partly decoded immediate field. */
1491 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1492
1493 if (buf[0] == 0x1c /* ai */)
1494 {
1495 imm >>= 7;
1496 imm = (imm ^ 0x200) - 0x200;
1497 reg[rt] = reg[ra] + imm;
1498
1499 if (rt == 1 /* sp */)
1500 {
1501 if (imm > 0)
1502 break;
1503 return reg[rt];
1504 }
1505 }
1506 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1507 {
1508 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1509
1510 reg[rt] = reg[ra] + reg[rb];
1511 if (rt == 1)
1512 return reg[rt];
1513 }
1514 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1515 {
1516 if (buf[0] >= 0x42 /* ila */)
1517 imm |= (buf[0] & 1) << 17;
1518 else
1519 {
1520 imm &= 0xffff;
1521
1522 if (buf[0] == 0x40 /* il */)
1523 {
1524 if ((buf[1] & 0x80) == 0)
1525 goto unknown_insn;
1526 imm = (imm ^ 0x8000) - 0x8000;
1527 }
1528 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1529 imm <<= 16;
1530 }
1531 reg[rt] = imm;
1532 continue;
1533 }
1534 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1535 {
1536 reg[rt] |= imm & 0xffff;
1537 continue;
1538 }
1539 else if (buf[0] == 0x04 /* ori */)
1540 {
1541 imm >>= 7;
1542 imm = (imm ^ 0x200) - 0x200;
1543 reg[rt] = reg[ra] | imm;
1544 continue;
1545 }
1546 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1547 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1548 {
1549 /* Used in pic reg load. Say rt is trashed. */
1550 reg[rt] = 0;
1551 continue;
1552 }
1553 else if (is_branch (buf) || is_indirect_branch (buf))
1554 /* If we hit a branch then we must be out of the prologue. */
1555 break;
1556 unknown_insn:
1557 ++unrecog;
1558 }
1559
1560 return 0;
1561 }
1562
1563 /* qsort predicate to sort symbols by section and value. */
1564
1565 static Elf_Internal_Sym *sort_syms_syms;
1566 static asection **sort_syms_psecs;
1567
1568 static int
1569 sort_syms (const void *a, const void *b)
1570 {
1571 Elf_Internal_Sym *const *s1 = a;
1572 Elf_Internal_Sym *const *s2 = b;
1573 asection *sec1,*sec2;
1574 bfd_signed_vma delta;
1575
1576 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1577 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1578
1579 if (sec1 != sec2)
1580 return sec1->index - sec2->index;
1581
1582 delta = (*s1)->st_value - (*s2)->st_value;
1583 if (delta != 0)
1584 return delta < 0 ? -1 : 1;
1585
1586 delta = (*s2)->st_size - (*s1)->st_size;
1587 if (delta != 0)
1588 return delta < 0 ? -1 : 1;
1589
1590 return *s1 < *s2 ? -1 : 1;
1591 }
1592
1593 struct call_info
1594 {
1595 struct function_info *fun;
1596 struct call_info *next;
1597 int is_tail;
1598 };
1599
1600 struct function_info
1601 {
1602 /* List of functions called. Also branches to hot/cold part of
1603 function. */
1604 struct call_info *call_list;
1605 /* For hot/cold part of function, point to owner. */
1606 struct function_info *start;
1607 /* Symbol at start of function. */
1608 union {
1609 Elf_Internal_Sym *sym;
1610 struct elf_link_hash_entry *h;
1611 } u;
1612 /* Function section. */
1613 asection *sec;
1614 /* Address range of (this part of) function. */
1615 bfd_vma lo, hi;
1616 /* Stack usage. */
1617 int stack;
1618 /* Set if global symbol. */
1619 unsigned int global : 1;
1620 /* Set if known to be start of function (as distinct from a hunk
1621 in hot/cold section. */
1622 unsigned int is_func : 1;
1623 /* Flags used during call tree traversal. */
1624 unsigned int visit1 : 1;
1625 unsigned int non_root : 1;
1626 unsigned int visit2 : 1;
1627 unsigned int marking : 1;
1628 unsigned int visit3 : 1;
1629 };
1630
1631 struct spu_elf_stack_info
1632 {
1633 int num_fun;
1634 int max_fun;
1635 /* Variable size array describing functions, one per contiguous
1636 address range belonging to a function. */
1637 struct function_info fun[1];
1638 };
1639
1640 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1641 entries for section SEC. */
1642
1643 static struct spu_elf_stack_info *
1644 alloc_stack_info (asection *sec, int max_fun)
1645 {
1646 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1647 bfd_size_type amt;
1648
1649 amt = sizeof (struct spu_elf_stack_info);
1650 amt += (max_fun - 1) * sizeof (struct function_info);
1651 sec_data->stack_info = bfd_zmalloc (amt);
1652 if (sec_data->stack_info != NULL)
1653 sec_data->stack_info->max_fun = max_fun;
1654 return sec_data->stack_info;
1655 }
1656
1657 /* Add a new struct function_info describing a (part of a) function
1658 starting at SYM_H. Keep the array sorted by address. */
1659
1660 static struct function_info *
1661 maybe_insert_function (asection *sec,
1662 void *sym_h,
1663 bfd_boolean global,
1664 bfd_boolean is_func)
1665 {
1666 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1667 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1668 int i;
1669 bfd_vma off, size;
1670
1671 if (sinfo == NULL)
1672 {
1673 sinfo = alloc_stack_info (sec, 20);
1674 if (sinfo == NULL)
1675 return NULL;
1676 }
1677
1678 if (!global)
1679 {
1680 Elf_Internal_Sym *sym = sym_h;
1681 off = sym->st_value;
1682 size = sym->st_size;
1683 }
1684 else
1685 {
1686 struct elf_link_hash_entry *h = sym_h;
1687 off = h->root.u.def.value;
1688 size = h->size;
1689 }
1690
1691 for (i = sinfo->num_fun; --i >= 0; )
1692 if (sinfo->fun[i].lo <= off)
1693 break;
1694
1695 if (i >= 0)
1696 {
1697 /* Don't add another entry for an alias, but do update some
1698 info. */
1699 if (sinfo->fun[i].lo == off)
1700 {
1701 /* Prefer globals over local syms. */
1702 if (global && !sinfo->fun[i].global)
1703 {
1704 sinfo->fun[i].global = TRUE;
1705 sinfo->fun[i].u.h = sym_h;
1706 }
1707 if (is_func)
1708 sinfo->fun[i].is_func = TRUE;
1709 return &sinfo->fun[i];
1710 }
1711 /* Ignore a zero-size symbol inside an existing function. */
1712 else if (sinfo->fun[i].hi > off && size == 0)
1713 return &sinfo->fun[i];
1714 }
1715
1716 if (++i < sinfo->num_fun)
1717 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1718 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1719 else if (i >= sinfo->max_fun)
1720 {
1721 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1722 bfd_size_type old = amt;
1723
1724 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1725 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1726 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1727 sinfo = bfd_realloc (sinfo, amt);
1728 if (sinfo == NULL)
1729 return NULL;
1730 memset ((char *) sinfo + old, 0, amt - old);
1731 sec_data->stack_info = sinfo;
1732 }
1733 sinfo->fun[i].is_func = is_func;
1734 sinfo->fun[i].global = global;
1735 sinfo->fun[i].sec = sec;
1736 if (global)
1737 sinfo->fun[i].u.h = sym_h;
1738 else
1739 sinfo->fun[i].u.sym = sym_h;
1740 sinfo->fun[i].lo = off;
1741 sinfo->fun[i].hi = off + size;
1742 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1743 sinfo->num_fun += 1;
1744 return &sinfo->fun[i];
1745 }
1746
1747 /* Return the name of FUN. */
1748
1749 static const char *
1750 func_name (struct function_info *fun)
1751 {
1752 asection *sec;
1753 bfd *ibfd;
1754 Elf_Internal_Shdr *symtab_hdr;
1755
1756 while (fun->start != NULL)
1757 fun = fun->start;
1758
1759 if (fun->global)
1760 return fun->u.h->root.root.string;
1761
1762 sec = fun->sec;
1763 if (fun->u.sym->st_name == 0)
1764 {
1765 size_t len = strlen (sec->name);
1766 char *name = bfd_malloc (len + 10);
1767 if (name == NULL)
1768 return "(null)";
1769 sprintf (name, "%s+%lx", sec->name,
1770 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1771 return name;
1772 }
1773 ibfd = sec->owner;
1774 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1775 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1776 }
1777
1778 /* Read the instruction at OFF in SEC. Return true iff the instruction
1779 is a nop, lnop, or stop 0 (all zero insn). */
1780
1781 static bfd_boolean
1782 is_nop (asection *sec, bfd_vma off)
1783 {
1784 unsigned char insn[4];
1785
1786 if (off + 4 > sec->size
1787 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1788 return FALSE;
1789 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1790 return TRUE;
1791 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1792 return TRUE;
1793 return FALSE;
1794 }
1795
1796 /* Extend the range of FUN to cover nop padding up to LIMIT.
1797 Return TRUE iff some instruction other than a NOP was found. */
1798
1799 static bfd_boolean
1800 insns_at_end (struct function_info *fun, bfd_vma limit)
1801 {
1802 bfd_vma off = (fun->hi + 3) & -4;
1803
1804 while (off < limit && is_nop (fun->sec, off))
1805 off += 4;
1806 if (off < limit)
1807 {
1808 fun->hi = off;
1809 return TRUE;
1810 }
1811 fun->hi = limit;
1812 return FALSE;
1813 }
1814
1815 /* Check and fix overlapping function ranges. Return TRUE iff there
1816 are gaps in the current info we have about functions in SEC. */
1817
1818 static bfd_boolean
1819 check_function_ranges (asection *sec, struct bfd_link_info *info)
1820 {
1821 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1822 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1823 int i;
1824 bfd_boolean gaps = FALSE;
1825
1826 if (sinfo == NULL)
1827 return FALSE;
1828
1829 for (i = 1; i < sinfo->num_fun; i++)
1830 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1831 {
1832 /* Fix overlapping symbols. */
1833 const char *f1 = func_name (&sinfo->fun[i - 1]);
1834 const char *f2 = func_name (&sinfo->fun[i]);
1835
1836 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1837 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1838 }
1839 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1840 gaps = TRUE;
1841
1842 if (sinfo->num_fun == 0)
1843 gaps = TRUE;
1844 else
1845 {
1846 if (sinfo->fun[0].lo != 0)
1847 gaps = TRUE;
1848 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1849 {
1850 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1851
1852 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1853 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1854 }
1855 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1856 gaps = TRUE;
1857 }
1858 return gaps;
1859 }
1860
1861 /* Search current function info for a function that contains address
1862 OFFSET in section SEC. */
1863
1864 static struct function_info *
1865 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1866 {
1867 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1868 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1869 int lo, hi, mid;
1870
1871 lo = 0;
1872 hi = sinfo->num_fun;
1873 while (lo < hi)
1874 {
1875 mid = (lo + hi) / 2;
1876 if (offset < sinfo->fun[mid].lo)
1877 hi = mid;
1878 else if (offset >= sinfo->fun[mid].hi)
1879 lo = mid + 1;
1880 else
1881 return &sinfo->fun[mid];
1882 }
1883 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1884 sec, offset);
1885 return NULL;
1886 }
1887
1888 /* Add CALLEE to CALLER call list if not already present. */
1889
1890 static bfd_boolean
1891 insert_callee (struct function_info *caller, struct call_info *callee)
1892 {
1893 struct call_info *p;
1894 for (p = caller->call_list; p != NULL; p = p->next)
1895 if (p->fun == callee->fun)
1896 {
1897 /* Tail calls use less stack than normal calls. Retain entry
1898 for normal call over one for tail call. */
1899 if (p->is_tail > callee->is_tail)
1900 p->is_tail = callee->is_tail;
1901 return FALSE;
1902 }
1903 callee->next = caller->call_list;
1904 caller->call_list = callee;
1905 return TRUE;
1906 }
1907
1908 /* Rummage through the relocs for SEC, looking for function calls.
1909 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1910 mark destination symbols on calls as being functions. Also
1911 look at branches, which may be tail calls or go to hot/cold
1912 section part of same function. */
1913
1914 static bfd_boolean
1915 mark_functions_via_relocs (asection *sec,
1916 struct bfd_link_info *info,
1917 int call_tree)
1918 {
1919 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1920 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1921 Elf_Internal_Sym *syms;
1922 void *psyms;
1923 static bfd_boolean warned;
1924
1925 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1926 info->keep_memory);
1927 if (internal_relocs == NULL)
1928 return FALSE;
1929
1930 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1931 psyms = &symtab_hdr->contents;
1932 syms = *(Elf_Internal_Sym **) psyms;
1933 irela = internal_relocs;
1934 irelaend = irela + sec->reloc_count;
1935 for (; irela < irelaend; irela++)
1936 {
1937 enum elf_spu_reloc_type r_type;
1938 unsigned int r_indx;
1939 asection *sym_sec;
1940 Elf_Internal_Sym *sym;
1941 struct elf_link_hash_entry *h;
1942 bfd_vma val;
1943 unsigned char insn[4];
1944 bfd_boolean is_call;
1945 struct function_info *caller;
1946 struct call_info *callee;
1947
1948 r_type = ELF32_R_TYPE (irela->r_info);
1949 if (r_type != R_SPU_REL16
1950 && r_type != R_SPU_ADDR16)
1951 continue;
1952
1953 r_indx = ELF32_R_SYM (irela->r_info);
1954 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1955 return FALSE;
1956
1957 if (sym_sec == NULL
1958 || sym_sec->output_section == NULL
1959 || sym_sec->output_section->owner != sec->output_section->owner)
1960 continue;
1961
1962 if (!bfd_get_section_contents (sec->owner, sec, insn,
1963 irela->r_offset, 4))
1964 return FALSE;
1965 if (!is_branch (insn))
1966 continue;
1967
1968 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1969 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1970 {
1971 if (!call_tree)
1972 warned = TRUE;
1973 if (!call_tree || !warned)
1974 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1975 " %B(%A), stack analysis incomplete\n"),
1976 sec->owner, sec, irela->r_offset,
1977 sym_sec->owner, sym_sec);
1978 continue;
1979 }
1980
1981 is_call = (insn[0] & 0xfd) == 0x31;
1982
1983 if (h)
1984 val = h->root.u.def.value;
1985 else
1986 val = sym->st_value;
1987 val += irela->r_addend;
1988
1989 if (!call_tree)
1990 {
1991 struct function_info *fun;
1992
1993 if (irela->r_addend != 0)
1994 {
1995 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1996 if (fake == NULL)
1997 return FALSE;
1998 fake->st_value = val;
1999 fake->st_shndx
2000 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2001 sym = fake;
2002 }
2003 if (sym)
2004 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2005 else
2006 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2007 if (fun == NULL)
2008 return FALSE;
2009 if (irela->r_addend != 0
2010 && fun->u.sym != sym)
2011 free (sym);
2012 continue;
2013 }
2014
2015 caller = find_function (sec, irela->r_offset, info);
2016 if (caller == NULL)
2017 return FALSE;
2018 callee = bfd_malloc (sizeof *callee);
2019 if (callee == NULL)
2020 return FALSE;
2021
2022 callee->fun = find_function (sym_sec, val, info);
2023 if (callee->fun == NULL)
2024 return FALSE;
2025 callee->is_tail = !is_call;
2026 if (!insert_callee (caller, callee))
2027 free (callee);
2028 else if (!is_call
2029 && !callee->fun->is_func
2030 && callee->fun->stack == 0)
2031 {
2032 /* This is either a tail call or a branch from one part of
2033 the function to another, ie. hot/cold section. If the
2034 destination has been called by some other function then
2035 it is a separate function. We also assume that functions
2036 are not split across input files. */
2037 if (callee->fun->start != NULL
2038 || sec->owner != sym_sec->owner)
2039 {
2040 callee->fun->start = NULL;
2041 callee->fun->is_func = TRUE;
2042 }
2043 else
2044 callee->fun->start = caller;
2045 }
2046 }
2047
2048 return TRUE;
2049 }
2050
2051 /* Handle something like .init or .fini, which has a piece of a function.
2052 These sections are pasted together to form a single function. */
2053
2054 static bfd_boolean
2055 pasted_function (asection *sec, struct bfd_link_info *info)
2056 {
2057 struct bfd_link_order *l;
2058 struct _spu_elf_section_data *sec_data;
2059 struct spu_elf_stack_info *sinfo;
2060 Elf_Internal_Sym *fake;
2061 struct function_info *fun, *fun_start;
2062
2063 fake = bfd_zmalloc (sizeof (*fake));
2064 if (fake == NULL)
2065 return FALSE;
2066 fake->st_value = 0;
2067 fake->st_size = sec->size;
2068 fake->st_shndx
2069 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2070 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2071 if (!fun)
2072 return FALSE;
2073
2074 /* Find a function immediately preceding this section. */
2075 fun_start = NULL;
2076 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2077 {
2078 if (l->u.indirect.section == sec)
2079 {
2080 if (fun_start != NULL)
2081 {
2082 if (fun_start->start)
2083 fun_start = fun_start->start;
2084 fun->start = fun_start;
2085 }
2086 return TRUE;
2087 }
2088 if (l->type == bfd_indirect_link_order
2089 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2090 && (sinfo = sec_data->stack_info) != NULL
2091 && sinfo->num_fun != 0)
2092 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2093 }
2094
2095 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2096 return FALSE;
2097 }
2098
2099 /* We're only interested in code sections. */
2100
2101 static bfd_boolean
2102 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2103 {
2104 return (s != htab->stub
2105 && s->output_section != NULL
2106 && s->output_section->owner == obfd
2107 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2108 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2109 && s->size != 0);
2110 }
2111
2112 /* Map address ranges in code sections to functions. */
2113
2114 static bfd_boolean
2115 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2116 {
2117 struct spu_link_hash_table *htab = spu_hash_table (info);
2118 bfd *ibfd;
2119 int bfd_idx;
2120 Elf_Internal_Sym ***psym_arr;
2121 asection ***sec_arr;
2122 bfd_boolean gaps = FALSE;
2123
2124 bfd_idx = 0;
2125 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2126 bfd_idx++;
2127
2128 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2129 if (psym_arr == NULL)
2130 return FALSE;
2131 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2132 if (sec_arr == NULL)
2133 return FALSE;
2134
2135
2136 for (ibfd = info->input_bfds, bfd_idx = 0;
2137 ibfd != NULL;
2138 ibfd = ibfd->link_next, bfd_idx++)
2139 {
2140 extern const bfd_target bfd_elf32_spu_vec;
2141 Elf_Internal_Shdr *symtab_hdr;
2142 asection *sec;
2143 size_t symcount;
2144 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2145 asection **psecs, **p;
2146
2147 if (ibfd->xvec != &bfd_elf32_spu_vec)
2148 continue;
2149
2150 /* Read all the symbols. */
2151 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2152 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2153 if (symcount == 0)
2154 continue;
2155
2156 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2157 if (syms == NULL)
2158 {
2159 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2160 NULL, NULL, NULL);
2161 symtab_hdr->contents = (void *) syms;
2162 if (syms == NULL)
2163 return FALSE;
2164 }
2165
2166 /* Select defined function symbols that are going to be output. */
2167 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2168 if (psyms == NULL)
2169 return FALSE;
2170 psym_arr[bfd_idx] = psyms;
2171 psecs = bfd_malloc (symcount * sizeof (*psecs));
2172 if (psecs == NULL)
2173 return FALSE;
2174 sec_arr[bfd_idx] = psecs;
2175 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2176 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2177 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2178 {
2179 asection *s;
2180
2181 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2182 if (s != NULL && interesting_section (s, output_bfd, htab))
2183 *psy++ = sy;
2184 }
2185 symcount = psy - psyms;
2186 *psy = NULL;
2187
2188 /* Sort them by section and offset within section. */
2189 sort_syms_syms = syms;
2190 sort_syms_psecs = psecs;
2191 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2192
2193 /* Now inspect the function symbols. */
2194 for (psy = psyms; psy < psyms + symcount; )
2195 {
2196 asection *s = psecs[*psy - syms];
2197 Elf_Internal_Sym **psy2;
2198
2199 for (psy2 = psy; ++psy2 < psyms + symcount; )
2200 if (psecs[*psy2 - syms] != s)
2201 break;
2202
2203 if (!alloc_stack_info (s, psy2 - psy))
2204 return FALSE;
2205 psy = psy2;
2206 }
2207
2208 /* First install info about properly typed and sized functions.
2209 In an ideal world this will cover all code sections, except
2210 when partitioning functions into hot and cold sections,
2211 and the horrible pasted together .init and .fini functions. */
2212 for (psy = psyms; psy < psyms + symcount; ++psy)
2213 {
2214 sy = *psy;
2215 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2216 {
2217 asection *s = psecs[sy - syms];
2218 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2219 return FALSE;
2220 }
2221 }
2222
2223 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2224 if (interesting_section (sec, output_bfd, htab))
2225 gaps |= check_function_ranges (sec, info);
2226 }
2227
2228 if (gaps)
2229 {
2230 /* See if we can discover more function symbols by looking at
2231 relocations. */
2232 for (ibfd = info->input_bfds, bfd_idx = 0;
2233 ibfd != NULL;
2234 ibfd = ibfd->link_next, bfd_idx++)
2235 {
2236 asection *sec;
2237
2238 if (psym_arr[bfd_idx] == NULL)
2239 continue;
2240
2241 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2242 if (interesting_section (sec, output_bfd, htab)
2243 && sec->reloc_count != 0)
2244 {
2245 if (!mark_functions_via_relocs (sec, info, FALSE))
2246 return FALSE;
2247 }
2248 }
2249
2250 for (ibfd = info->input_bfds, bfd_idx = 0;
2251 ibfd != NULL;
2252 ibfd = ibfd->link_next, bfd_idx++)
2253 {
2254 Elf_Internal_Shdr *symtab_hdr;
2255 asection *sec;
2256 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2257 asection **psecs;
2258
2259 if ((psyms = psym_arr[bfd_idx]) == NULL)
2260 continue;
2261
2262 psecs = sec_arr[bfd_idx];
2263
2264 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2265 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2266
2267 gaps = FALSE;
2268 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2269 if (interesting_section (sec, output_bfd, htab))
2270 gaps |= check_function_ranges (sec, info);
2271 if (!gaps)
2272 continue;
2273
2274 /* Finally, install all globals. */
2275 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2276 {
2277 asection *s;
2278
2279 s = psecs[sy - syms];
2280
2281 /* Global syms might be improperly typed functions. */
2282 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2283 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2284 {
2285 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2286 return FALSE;
2287 }
2288 }
2289
2290 /* Some of the symbols we've installed as marking the
2291 beginning of functions may have a size of zero. Extend
2292 the range of such functions to the beginning of the
2293 next symbol of interest. */
2294 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2295 if (interesting_section (sec, output_bfd, htab))
2296 {
2297 struct _spu_elf_section_data *sec_data;
2298 struct spu_elf_stack_info *sinfo;
2299
2300 sec_data = spu_elf_section_data (sec);
2301 sinfo = sec_data->stack_info;
2302 if (sinfo != NULL)
2303 {
2304 int fun_idx;
2305 bfd_vma hi = sec->size;
2306
2307 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2308 {
2309 sinfo->fun[fun_idx].hi = hi;
2310 hi = sinfo->fun[fun_idx].lo;
2311 }
2312 }
2313 /* No symbols in this section. Must be .init or .fini
2314 or something similar. */
2315 else if (!pasted_function (sec, info))
2316 return FALSE;
2317 }
2318 }
2319 }
2320
2321 for (ibfd = info->input_bfds, bfd_idx = 0;
2322 ibfd != NULL;
2323 ibfd = ibfd->link_next, bfd_idx++)
2324 {
2325 if (psym_arr[bfd_idx] == NULL)
2326 continue;
2327
2328 free (psym_arr[bfd_idx]);
2329 free (sec_arr[bfd_idx]);
2330 }
2331
2332 free (psym_arr);
2333 free (sec_arr);
2334
2335 return TRUE;
2336 }
2337
2338 /* Mark nodes in the call graph that are called by some other node. */
2339
2340 static void
2341 mark_non_root (struct function_info *fun)
2342 {
2343 struct call_info *call;
2344
2345 fun->visit1 = TRUE;
2346 for (call = fun->call_list; call; call = call->next)
2347 {
2348 call->fun->non_root = TRUE;
2349 if (!call->fun->visit1)
2350 mark_non_root (call->fun);
2351 }
2352 }
2353
2354 /* Remove cycles from the call graph. */
2355
2356 static void
2357 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2358 {
2359 struct call_info **callp, *call;
2360
2361 fun->visit2 = TRUE;
2362 fun->marking = TRUE;
2363
2364 callp = &fun->call_list;
2365 while ((call = *callp) != NULL)
2366 {
2367 if (!call->fun->visit2)
2368 call_graph_traverse (call->fun, info);
2369 else if (call->fun->marking)
2370 {
2371 const char *f1 = func_name (fun);
2372 const char *f2 = func_name (call->fun);
2373
2374 info->callbacks->info (_("Stack analysis will ignore the call "
2375 "from %s to %s\n"),
2376 f1, f2);
2377 *callp = call->next;
2378 continue;
2379 }
2380 callp = &call->next;
2381 }
2382 fun->marking = FALSE;
2383 }
2384
2385 /* Populate call_list for each function. */
2386
2387 static bfd_boolean
2388 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2389 {
2390 struct spu_link_hash_table *htab = spu_hash_table (info);
2391 bfd *ibfd;
2392
2393 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2394 {
2395 extern const bfd_target bfd_elf32_spu_vec;
2396 asection *sec;
2397
2398 if (ibfd->xvec != &bfd_elf32_spu_vec)
2399 continue;
2400
2401 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2402 {
2403 if (!interesting_section (sec, output_bfd, htab)
2404 || sec->reloc_count == 0)
2405 continue;
2406
2407 if (!mark_functions_via_relocs (sec, info, TRUE))
2408 return FALSE;
2409 }
2410
2411 /* Transfer call info from hot/cold section part of function
2412 to main entry. */
2413 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2414 {
2415 struct _spu_elf_section_data *sec_data;
2416 struct spu_elf_stack_info *sinfo;
2417
2418 if ((sec_data = spu_elf_section_data (sec)) != NULL
2419 && (sinfo = sec_data->stack_info) != NULL)
2420 {
2421 int i;
2422 for (i = 0; i < sinfo->num_fun; ++i)
2423 {
2424 if (sinfo->fun[i].start != NULL)
2425 {
2426 struct call_info *call = sinfo->fun[i].call_list;
2427
2428 while (call != NULL)
2429 {
2430 struct call_info *call_next = call->next;
2431 if (!insert_callee (sinfo->fun[i].start, call))
2432 free (call);
2433 call = call_next;
2434 }
2435 sinfo->fun[i].call_list = NULL;
2436 sinfo->fun[i].non_root = TRUE;
2437 }
2438 }
2439 }
2440 }
2441 }
2442
2443 /* Find the call graph root(s). */
2444 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2445 {
2446 extern const bfd_target bfd_elf32_spu_vec;
2447 asection *sec;
2448
2449 if (ibfd->xvec != &bfd_elf32_spu_vec)
2450 continue;
2451
2452 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2453 {
2454 struct _spu_elf_section_data *sec_data;
2455 struct spu_elf_stack_info *sinfo;
2456
2457 if ((sec_data = spu_elf_section_data (sec)) != NULL
2458 && (sinfo = sec_data->stack_info) != NULL)
2459 {
2460 int i;
2461 for (i = 0; i < sinfo->num_fun; ++i)
2462 if (!sinfo->fun[i].visit1)
2463 mark_non_root (&sinfo->fun[i]);
2464 }
2465 }
2466 }
2467
2468 /* Remove cycles from the call graph. We start from the root node(s)
2469 so that we break cycles in a reasonable place. */
2470 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2471 {
2472 extern const bfd_target bfd_elf32_spu_vec;
2473 asection *sec;
2474
2475 if (ibfd->xvec != &bfd_elf32_spu_vec)
2476 continue;
2477
2478 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2479 {
2480 struct _spu_elf_section_data *sec_data;
2481 struct spu_elf_stack_info *sinfo;
2482
2483 if ((sec_data = spu_elf_section_data (sec)) != NULL
2484 && (sinfo = sec_data->stack_info) != NULL)
2485 {
2486 int i;
2487 for (i = 0; i < sinfo->num_fun; ++i)
2488 if (!sinfo->fun[i].non_root)
2489 call_graph_traverse (&sinfo->fun[i], info);
2490 }
2491 }
2492 }
2493
2494 return TRUE;
2495 }
2496
2497 /* Descend the call graph for FUN, accumulating total stack required. */
2498
2499 static bfd_vma
2500 sum_stack (struct function_info *fun,
2501 struct bfd_link_info *info,
2502 int emit_stack_syms)
2503 {
2504 struct call_info *call;
2505 struct function_info *max = NULL;
2506 bfd_vma max_stack = fun->stack;
2507 bfd_vma stack;
2508 const char *f1;
2509
2510 if (fun->visit3)
2511 return max_stack;
2512
2513 for (call = fun->call_list; call; call = call->next)
2514 {
2515 stack = sum_stack (call->fun, info, emit_stack_syms);
2516 /* Include caller stack for normal calls, don't do so for
2517 tail calls. fun->stack here is local stack usage for
2518 this function. */
2519 if (!call->is_tail)
2520 stack += fun->stack;
2521 if (max_stack < stack)
2522 {
2523 max_stack = stack;
2524 max = call->fun;
2525 }
2526 }
2527
2528 f1 = func_name (fun);
2529 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2530 f1, (bfd_vma) fun->stack, max_stack);
2531
2532 if (fun->call_list)
2533 {
2534 info->callbacks->minfo (_(" calls:\n"));
2535 for (call = fun->call_list; call; call = call->next)
2536 {
2537 const char *f2 = func_name (call->fun);
2538 const char *ann1 = call->fun == max ? "*" : " ";
2539 const char *ann2 = call->is_tail ? "t" : " ";
2540
2541 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2542 }
2543 }
2544
2545 /* Now fun->stack holds cumulative stack. */
2546 fun->stack = max_stack;
2547 fun->visit3 = TRUE;
2548
2549 if (emit_stack_syms)
2550 {
2551 struct spu_link_hash_table *htab = spu_hash_table (info);
2552 char *name = bfd_malloc (18 + strlen (f1));
2553 struct elf_link_hash_entry *h;
2554
2555 if (name != NULL)
2556 {
2557 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2558 sprintf (name, "__stack_%s", f1);
2559 else
2560 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2561
2562 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2563 free (name);
2564 if (h != NULL
2565 && (h->root.type == bfd_link_hash_new
2566 || h->root.type == bfd_link_hash_undefined
2567 || h->root.type == bfd_link_hash_undefweak))
2568 {
2569 h->root.type = bfd_link_hash_defined;
2570 h->root.u.def.section = bfd_abs_section_ptr;
2571 h->root.u.def.value = max_stack;
2572 h->size = 0;
2573 h->type = 0;
2574 h->ref_regular = 1;
2575 h->def_regular = 1;
2576 h->ref_regular_nonweak = 1;
2577 h->forced_local = 1;
2578 h->non_elf = 0;
2579 }
2580 }
2581 }
2582
2583 return max_stack;
2584 }
2585
2586 /* Provide an estimate of total stack required. */
2587
2588 static bfd_boolean
2589 spu_elf_stack_analysis (bfd *output_bfd,
2590 struct bfd_link_info *info,
2591 int emit_stack_syms)
2592 {
2593 bfd *ibfd;
2594 bfd_vma max_stack = 0;
2595
2596 if (!discover_functions (output_bfd, info))
2597 return FALSE;
2598
2599 if (!build_call_tree (output_bfd, info))
2600 return FALSE;
2601
2602 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2603 info->callbacks->minfo (_("\nStack size for functions. "
2604 "Annotations: '*' max stack, 't' tail call\n"));
2605 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2606 {
2607 extern const bfd_target bfd_elf32_spu_vec;
2608 asection *sec;
2609
2610 if (ibfd->xvec != &bfd_elf32_spu_vec)
2611 continue;
2612
2613 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2614 {
2615 struct _spu_elf_section_data *sec_data;
2616 struct spu_elf_stack_info *sinfo;
2617
2618 if ((sec_data = spu_elf_section_data (sec)) != NULL
2619 && (sinfo = sec_data->stack_info) != NULL)
2620 {
2621 int i;
2622 for (i = 0; i < sinfo->num_fun; ++i)
2623 {
2624 if (!sinfo->fun[i].non_root)
2625 {
2626 bfd_vma stack;
2627 const char *f1;
2628
2629 stack = sum_stack (&sinfo->fun[i], info,
2630 emit_stack_syms);
2631 f1 = func_name (&sinfo->fun[i]);
2632 info->callbacks->info (_(" %s: 0x%v\n"),
2633 f1, stack);
2634 if (max_stack < stack)
2635 max_stack = stack;
2636 }
2637 }
2638 }
2639 }
2640 }
2641
2642 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2643 return TRUE;
2644 }
2645
2646 /* Perform a final link. */
2647
2648 static bfd_boolean
2649 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2650 {
2651 struct spu_link_hash_table *htab = spu_hash_table (info);
2652
2653 if (htab->stack_analysis
2654 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2655 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2656
2657 return bfd_elf_final_link (output_bfd, info);
2658 }
2659
2660 /* Called when not normally emitting relocs, ie. !info->relocatable
2661 and !info->emitrelocations. Returns a count of special relocs
2662 that need to be emitted. */
2663
2664 static unsigned int
2665 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2666 {
2667 unsigned int count = 0;
2668 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2669
2670 for (; relocs < relend; relocs++)
2671 {
2672 int r_type = ELF32_R_TYPE (relocs->r_info);
2673 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2674 ++count;
2675 }
2676
2677 return count;
2678 }
2679
2680 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2681
2682 static bfd_boolean
2683 spu_elf_relocate_section (bfd *output_bfd,
2684 struct bfd_link_info *info,
2685 bfd *input_bfd,
2686 asection *input_section,
2687 bfd_byte *contents,
2688 Elf_Internal_Rela *relocs,
2689 Elf_Internal_Sym *local_syms,
2690 asection **local_sections)
2691 {
2692 Elf_Internal_Shdr *symtab_hdr;
2693 struct elf_link_hash_entry **sym_hashes;
2694 Elf_Internal_Rela *rel, *relend;
2695 struct spu_link_hash_table *htab;
2696 bfd_boolean ret = TRUE;
2697 bfd_boolean emit_these_relocs = FALSE;
2698
2699 htab = spu_hash_table (info);
2700 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2701 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2702
2703 rel = relocs;
2704 relend = relocs + input_section->reloc_count;
2705 for (; rel < relend; rel++)
2706 {
2707 int r_type;
2708 reloc_howto_type *howto;
2709 unsigned long r_symndx;
2710 Elf_Internal_Sym *sym;
2711 asection *sec;
2712 struct elf_link_hash_entry *h;
2713 const char *sym_name;
2714 bfd_vma relocation;
2715 bfd_vma addend;
2716 bfd_reloc_status_type r;
2717 bfd_boolean unresolved_reloc;
2718 bfd_boolean warned;
2719 bfd_boolean branch;
2720
2721 r_symndx = ELF32_R_SYM (rel->r_info);
2722 r_type = ELF32_R_TYPE (rel->r_info);
2723 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2724 {
2725 emit_these_relocs = TRUE;
2726 continue;
2727 }
2728
2729 howto = elf_howto_table + r_type;
2730 unresolved_reloc = FALSE;
2731 warned = FALSE;
2732 h = NULL;
2733 sym = NULL;
2734 sec = NULL;
2735 if (r_symndx < symtab_hdr->sh_info)
2736 {
2737 sym = local_syms + r_symndx;
2738 sec = local_sections[r_symndx];
2739 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2740 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2741 }
2742 else
2743 {
2744 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2745 r_symndx, symtab_hdr, sym_hashes,
2746 h, sec, relocation,
2747 unresolved_reloc, warned);
2748 sym_name = h->root.root.string;
2749 }
2750
2751 if (sec != NULL && elf_discarded_section (sec))
2752 {
2753 /* For relocs against symbols from removed linkonce sections,
2754 or sections discarded by a linker script, we just want the
2755 section contents zeroed. Avoid any special processing. */
2756 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2757 rel->r_info = 0;
2758 rel->r_addend = 0;
2759 continue;
2760 }
2761
2762 if (info->relocatable)
2763 continue;
2764
2765 if (unresolved_reloc)
2766 {
2767 (*_bfd_error_handler)
2768 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2769 input_bfd,
2770 bfd_get_section_name (input_bfd, input_section),
2771 (long) rel->r_offset,
2772 howto->name,
2773 sym_name);
2774 ret = FALSE;
2775 }
2776
2777 /* If this symbol is in an overlay area, we may need to relocate
2778 to the overlay stub. */
2779 addend = rel->r_addend;
2780 branch = (is_branch (contents + rel->r_offset)
2781 || is_hint (contents + rel->r_offset));
2782 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2783 {
2784 char *stub_name;
2785 struct spu_stub_hash_entry *sh;
2786
2787 stub_name = spu_stub_name (sec, h, rel);
2788 if (stub_name == NULL)
2789 return FALSE;
2790
2791 sh = (struct spu_stub_hash_entry *)
2792 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2793 if (sh != NULL)
2794 {
2795 relocation = (htab->stub->output_section->vma
2796 + htab->stub->output_offset
2797 + sh->off);
2798 addend = 0;
2799 }
2800 free (stub_name);
2801 }
2802
2803 r = _bfd_final_link_relocate (howto,
2804 input_bfd,
2805 input_section,
2806 contents,
2807 rel->r_offset, relocation, addend);
2808
2809 if (r != bfd_reloc_ok)
2810 {
2811 const char *msg = (const char *) 0;
2812
2813 switch (r)
2814 {
2815 case bfd_reloc_overflow:
2816 if (!((*info->callbacks->reloc_overflow)
2817 (info, (h ? &h->root : NULL), sym_name, howto->name,
2818 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2819 return FALSE;
2820 break;
2821
2822 case bfd_reloc_undefined:
2823 if (!((*info->callbacks->undefined_symbol)
2824 (info, sym_name, input_bfd, input_section,
2825 rel->r_offset, TRUE)))
2826 return FALSE;
2827 break;
2828
2829 case bfd_reloc_outofrange:
2830 msg = _("internal error: out of range error");
2831 goto common_error;
2832
2833 case bfd_reloc_notsupported:
2834 msg = _("internal error: unsupported relocation error");
2835 goto common_error;
2836
2837 case bfd_reloc_dangerous:
2838 msg = _("internal error: dangerous error");
2839 goto common_error;
2840
2841 default:
2842 msg = _("internal error: unknown error");
2843 /* fall through */
2844
2845 common_error:
2846 if (!((*info->callbacks->warning)
2847 (info, msg, sym_name, input_bfd, input_section,
2848 rel->r_offset)))
2849 return FALSE;
2850 break;
2851 }
2852 }
2853 }
2854
2855 if (ret
2856 && emit_these_relocs
2857 && !info->relocatable
2858 && !info->emitrelocations)
2859 {
2860 Elf_Internal_Rela *wrel;
2861 Elf_Internal_Shdr *rel_hdr;
2862
2863 wrel = rel = relocs;
2864 relend = relocs + input_section->reloc_count;
2865 for (; rel < relend; rel++)
2866 {
2867 int r_type;
2868
2869 r_type = ELF32_R_TYPE (rel->r_info);
2870 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2871 *wrel++ = *rel;
2872 }
2873 input_section->reloc_count = wrel - relocs;
2874 /* Backflips for _bfd_elf_link_output_relocs. */
2875 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2876 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2877 ret = 2;
2878 }
2879
2880 return ret;
2881 }
2882
2883 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2884
2885 static bfd_boolean
2886 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2887 const char *sym_name ATTRIBUTE_UNUSED,
2888 Elf_Internal_Sym *sym,
2889 asection *sym_sec ATTRIBUTE_UNUSED,
2890 struct elf_link_hash_entry *h)
2891 {
2892 struct spu_link_hash_table *htab = spu_hash_table (info);
2893
2894 if (!info->relocatable
2895 && htab->num_overlays != 0
2896 && h != NULL
2897 && (h->root.type == bfd_link_hash_defined
2898 || h->root.type == bfd_link_hash_defweak)
2899 && h->def_regular
2900 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2901 {
2902 static Elf_Internal_Rela zero_rel;
2903 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2904 struct spu_stub_hash_entry *sh;
2905
2906 if (stub_name == NULL)
2907 return FALSE;
2908 sh = (struct spu_stub_hash_entry *)
2909 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2910 free (stub_name);
2911 if (sh == NULL)
2912 return TRUE;
2913 sym->st_shndx
2914 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2915 htab->stub->output_section);
2916 sym->st_value = (htab->stub->output_section->vma
2917 + htab->stub->output_offset
2918 + sh->off);
2919 }
2920
2921 return TRUE;
2922 }
2923
2924 static int spu_plugin = 0;
2925
2926 void
2927 spu_elf_plugin (int val)
2928 {
2929 spu_plugin = val;
2930 }
2931
2932 /* Set ELF header e_type for plugins. */
2933
2934 static void
2935 spu_elf_post_process_headers (bfd *abfd,
2936 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2937 {
2938 if (spu_plugin)
2939 {
2940 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2941
2942 i_ehdrp->e_type = ET_DYN;
2943 }
2944 }
2945
2946 /* We may add an extra PT_LOAD segment for .toe. We also need extra
2947 segments for overlays. */
2948
2949 static int
2950 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2951 {
2952 struct spu_link_hash_table *htab = spu_hash_table (info);
2953 int extra = htab->num_overlays;
2954 asection *sec;
2955
2956 if (extra)
2957 ++extra;
2958
2959 sec = bfd_get_section_by_name (abfd, ".toe");
2960 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2961 ++extra;
2962
2963 return extra;
2964 }
2965
2966 /* Remove .toe section from other PT_LOAD segments and put it in
2967 a segment of its own. Put overlays in separate segments too. */
2968
2969 static bfd_boolean
2970 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2971 {
2972 asection *toe, *s;
2973 struct elf_segment_map *m;
2974 unsigned int i;
2975
2976 if (info == NULL)
2977 return TRUE;
2978
2979 toe = bfd_get_section_by_name (abfd, ".toe");
2980 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2981 if (m->p_type == PT_LOAD && m->count > 1)
2982 for (i = 0; i < m->count; i++)
2983 if ((s = m->sections[i]) == toe
2984 || spu_elf_section_data (s)->ovl_index != 0)
2985 {
2986 struct elf_segment_map *m2;
2987 bfd_vma amt;
2988
2989 if (i + 1 < m->count)
2990 {
2991 amt = sizeof (struct elf_segment_map);
2992 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2993 m2 = bfd_zalloc (abfd, amt);
2994 if (m2 == NULL)
2995 return FALSE;
2996 m2->count = m->count - (i + 1);
2997 memcpy (m2->sections, m->sections + i + 1,
2998 m2->count * sizeof (m->sections[0]));
2999 m2->p_type = PT_LOAD;
3000 m2->next = m->next;
3001 m->next = m2;
3002 }
3003 m->count = 1;
3004 if (i != 0)
3005 {
3006 m->count = i;
3007 amt = sizeof (struct elf_segment_map);
3008 m2 = bfd_zalloc (abfd, amt);
3009 if (m2 == NULL)
3010 return FALSE;
3011 m2->p_type = PT_LOAD;
3012 m2->count = 1;
3013 m2->sections[0] = s;
3014 m2->next = m->next;
3015 m->next = m2;
3016 }
3017 break;
3018 }
3019
3020 return TRUE;
3021 }
3022
3023 /* Check that all loadable section VMAs lie in the range
3024 LO .. HI inclusive. */
3025
3026 asection *
3027 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3028 {
3029 struct elf_segment_map *m;
3030 unsigned int i;
3031
3032 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3033 if (m->p_type == PT_LOAD)
3034 for (i = 0; i < m->count; i++)
3035 if (m->sections[i]->size != 0
3036 && (m->sections[i]->vma < lo
3037 || m->sections[i]->vma > hi
3038 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3039 return m->sections[i];
3040
3041 return NULL;
3042 }
3043
3044 /* Tweak the section type of .note.spu_name. */
3045
3046 static bfd_boolean
3047 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3048 Elf_Internal_Shdr *hdr,
3049 asection *sec)
3050 {
3051 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3052 hdr->sh_type = SHT_NOTE;
3053 return TRUE;
3054 }
3055
3056 /* Tweak phdrs before writing them out. */
3057
3058 static int
3059 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3060 {
3061 const struct elf_backend_data *bed;
3062 struct elf_obj_tdata *tdata;
3063 Elf_Internal_Phdr *phdr, *last;
3064 struct spu_link_hash_table *htab;
3065 unsigned int count;
3066 unsigned int i;
3067
3068 if (info == NULL)
3069 return TRUE;
3070
3071 bed = get_elf_backend_data (abfd);
3072 tdata = elf_tdata (abfd);
3073 phdr = tdata->phdr;
3074 count = tdata->program_header_size / bed->s->sizeof_phdr;
3075 htab = spu_hash_table (info);
3076 if (htab->num_overlays != 0)
3077 {
3078 struct elf_segment_map *m;
3079 unsigned int o;
3080
3081 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3082 if (m->count != 0
3083 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3084 {
3085 /* Mark this as an overlay header. */
3086 phdr[i].p_flags |= PF_OVERLAY;
3087
3088 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3089 {
3090 bfd_byte *p = htab->ovtab->contents;
3091 unsigned int off = (o - 1) * 16 + 8;
3092
3093 /* Write file_off into _ovly_table. */
3094 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3095 }
3096 }
3097 }
3098
3099 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3100 of 16. This should always be possible when using the standard
3101 linker scripts, but don't create overlapping segments if
3102 someone is playing games with linker scripts. */
3103 last = NULL;
3104 for (i = count; i-- != 0; )
3105 if (phdr[i].p_type == PT_LOAD)
3106 {
3107 unsigned adjust;
3108
3109 adjust = -phdr[i].p_filesz & 15;
3110 if (adjust != 0
3111 && last != NULL
3112 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3113 break;
3114
3115 adjust = -phdr[i].p_memsz & 15;
3116 if (adjust != 0
3117 && last != NULL
3118 && phdr[i].p_filesz != 0
3119 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3120 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3121 break;
3122
3123 if (phdr[i].p_filesz != 0)
3124 last = &phdr[i];
3125 }
3126
3127 if (i == (unsigned int) -1)
3128 for (i = count; i-- != 0; )
3129 if (phdr[i].p_type == PT_LOAD)
3130 {
3131 unsigned adjust;
3132
3133 adjust = -phdr[i].p_filesz & 15;
3134 phdr[i].p_filesz += adjust;
3135
3136 adjust = -phdr[i].p_memsz & 15;
3137 phdr[i].p_memsz += adjust;
3138 }
3139
3140 return TRUE;
3141 }
3142
3143 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3144 #define TARGET_BIG_NAME "elf32-spu"
3145 #define ELF_ARCH bfd_arch_spu
3146 #define ELF_MACHINE_CODE EM_SPU
3147 /* This matches the alignment need for DMA. */
3148 #define ELF_MAXPAGESIZE 0x80
3149 #define elf_backend_rela_normal 1
3150 #define elf_backend_can_gc_sections 1
3151
3152 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3153 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3154 #define elf_info_to_howto spu_elf_info_to_howto
3155 #define elf_backend_count_relocs spu_elf_count_relocs
3156 #define elf_backend_relocate_section spu_elf_relocate_section
3157 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3158 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3159 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3160 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3161 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3162
3163 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3164 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3165 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3166 #define elf_backend_post_process_headers spu_elf_post_process_headers
3167 #define elf_backend_fake_sections spu_elf_fake_sections
3168 #define elf_backend_special_sections spu_elf_special_sections
3169 #define bfd_elf32_bfd_final_link spu_elf_final_link
3170
3171 #include "elf32-target.h"
This page took 0.115198 seconds and 5 git commands to generate.