* elf32-spu.c (spu_elf_size_stubs): Do consider branches to
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "bfd.h"
23 #include "bfdlink.h"
24 #include "libbfd.h"
25 #include "elf-bfd.h"
26 #include "elf/spu.h"
27 #include "elf32-spu.h"
28
29 /* We use RELA style relocs. Don't define USE_REL. */
30
31 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35 /* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38 static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
82 bfd_elf_generic_reloc, "SPU_ADDR16X",
83 FALSE, 0, 0x007fff80, FALSE),
84 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
85 bfd_elf_generic_reloc, "SPU_PPU32",
86 FALSE, 0, 0xffffffff, FALSE),
87 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
88 bfd_elf_generic_reloc, "SPU_PPU64",
89 FALSE, 0, -1, FALSE),
90 };
91
92 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
93 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
94 { NULL, 0, 0, 0, 0 }
95 };
96
97 static enum elf_spu_reloc_type
98 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
99 {
100 switch (code)
101 {
102 default:
103 return R_SPU_NONE;
104 case BFD_RELOC_SPU_IMM10W:
105 return R_SPU_ADDR10;
106 case BFD_RELOC_SPU_IMM16W:
107 return R_SPU_ADDR16;
108 case BFD_RELOC_SPU_LO16:
109 return R_SPU_ADDR16_LO;
110 case BFD_RELOC_SPU_HI16:
111 return R_SPU_ADDR16_HI;
112 case BFD_RELOC_SPU_IMM18:
113 return R_SPU_ADDR18;
114 case BFD_RELOC_SPU_PCREL16:
115 return R_SPU_REL16;
116 case BFD_RELOC_SPU_IMM7:
117 return R_SPU_ADDR7;
118 case BFD_RELOC_SPU_IMM8:
119 return R_SPU_NONE;
120 case BFD_RELOC_SPU_PCREL9a:
121 return R_SPU_REL9;
122 case BFD_RELOC_SPU_PCREL9b:
123 return R_SPU_REL9I;
124 case BFD_RELOC_SPU_IMM10:
125 return R_SPU_ADDR10I;
126 case BFD_RELOC_SPU_IMM16:
127 return R_SPU_ADDR16I;
128 case BFD_RELOC_32:
129 return R_SPU_ADDR32;
130 case BFD_RELOC_32_PCREL:
131 return R_SPU_REL32;
132 case BFD_RELOC_SPU_PPU32:
133 return R_SPU_PPU32;
134 case BFD_RELOC_SPU_PPU64:
135 return R_SPU_PPU64;
136 }
137 }
138
139 static void
140 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
141 arelent *cache_ptr,
142 Elf_Internal_Rela *dst)
143 {
144 enum elf_spu_reloc_type r_type;
145
146 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
147 BFD_ASSERT (r_type < R_SPU_max);
148 cache_ptr->howto = &elf_howto_table[(int) r_type];
149 }
150
151 static reloc_howto_type *
152 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
153 bfd_reloc_code_real_type code)
154 {
155 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
156
157 if (r_type == R_SPU_NONE)
158 return NULL;
159
160 return elf_howto_table + r_type;
161 }
162
163 static reloc_howto_type *
164 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
165 const char *r_name)
166 {
167 unsigned int i;
168
169 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
170 if (elf_howto_table[i].name != NULL
171 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
172 return &elf_howto_table[i];
173
174 return NULL;
175 }
176
177 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
178
179 static bfd_reloc_status_type
180 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
181 void *data, asection *input_section,
182 bfd *output_bfd, char **error_message)
183 {
184 bfd_size_type octets;
185 bfd_vma val;
186 long insn;
187
188 /* If this is a relocatable link (output_bfd test tells us), just
189 call the generic function. Any adjustment will be done at final
190 link time. */
191 if (output_bfd != NULL)
192 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
193 input_section, output_bfd, error_message);
194
195 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
196 return bfd_reloc_outofrange;
197 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
198
199 /* Get symbol value. */
200 val = 0;
201 if (!bfd_is_com_section (symbol->section))
202 val = symbol->value;
203 if (symbol->section->output_section)
204 val += symbol->section->output_section->vma;
205
206 val += reloc_entry->addend;
207
208 /* Make it pc-relative. */
209 val -= input_section->output_section->vma + input_section->output_offset;
210
211 val >>= 2;
212 if (val + 256 >= 512)
213 return bfd_reloc_overflow;
214
215 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
216
217 /* Move two high bits of value to REL9I and REL9 position.
218 The mask will take care of selecting the right field. */
219 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
220 insn &= ~reloc_entry->howto->dst_mask;
221 insn |= val & reloc_entry->howto->dst_mask;
222 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
223 return bfd_reloc_ok;
224 }
225
226 static bfd_boolean
227 spu_elf_new_section_hook (bfd *abfd, asection *sec)
228 {
229 if (!sec->used_by_bfd)
230 {
231 struct _spu_elf_section_data *sdata;
232
233 sdata = bfd_zalloc (abfd, sizeof (*sdata));
234 if (sdata == NULL)
235 return FALSE;
236 sec->used_by_bfd = sdata;
237 }
238
239 return _bfd_elf_new_section_hook (abfd, sec);
240 }
241
242 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
243 strip --strip-unneeded will not remove them. */
244
245 static void
246 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
247 {
248 if (sym->name != NULL
249 && sym->section != bfd_abs_section_ptr
250 && strncmp (sym->name, "_EAR_", 5) == 0)
251 sym->flags |= BSF_KEEP;
252 }
253
254 /* SPU ELF linker hash table. */
255
256 struct spu_link_hash_table
257 {
258 struct elf_link_hash_table elf;
259
260 /* The stub hash table. */
261 struct bfd_hash_table stub_hash_table;
262
263 /* Sorted array of stubs. */
264 struct {
265 struct spu_stub_hash_entry **sh;
266 unsigned int count;
267 int err;
268 } stubs;
269
270 /* Shortcuts to overlay sections. */
271 asection *stub;
272 asection *ovtab;
273
274 struct elf_link_hash_entry *ovly_load;
275 unsigned long ovly_load_r_symndx;
276
277 /* An array of two output sections per overlay region, chosen such that
278 the first section vma is the overlay buffer vma (ie. the section has
279 the lowest vma in the group that occupy the region), and the second
280 section vma+size specifies the end of the region. We keep pointers
281 to sections like this because section vmas may change when laying
282 them out. */
283 asection **ovl_region;
284
285 /* Number of overlay buffers. */
286 unsigned int num_buf;
287
288 /* Total number of overlays. */
289 unsigned int num_overlays;
290
291 /* Set if we should emit symbols for stubs. */
292 unsigned int emit_stub_syms:1;
293
294 /* Set if we want stubs on calls out of overlay regions to
295 non-overlay regions. */
296 unsigned int non_overlay_stubs : 1;
297
298 /* Set on error. */
299 unsigned int stub_overflow : 1;
300
301 /* Set if stack size analysis should be done. */
302 unsigned int stack_analysis : 1;
303
304 /* Set if __stack_* syms will be emitted. */
305 unsigned int emit_stack_syms : 1;
306 };
307
308 #define spu_hash_table(p) \
309 ((struct spu_link_hash_table *) ((p)->hash))
310
311 struct spu_stub_hash_entry
312 {
313 struct bfd_hash_entry root;
314
315 /* Destination of this stub. */
316 asection *target_section;
317 bfd_vma target_off;
318
319 /* Offset of entry in stub section. */
320 bfd_vma off;
321
322 /* Offset from this stub to stub that loads the overlay index. */
323 bfd_vma delta;
324 };
325
326 /* Create an entry in a spu stub hash table. */
327
328 static struct bfd_hash_entry *
329 stub_hash_newfunc (struct bfd_hash_entry *entry,
330 struct bfd_hash_table *table,
331 const char *string)
332 {
333 /* Allocate the structure if it has not already been allocated by a
334 subclass. */
335 if (entry == NULL)
336 {
337 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
338 if (entry == NULL)
339 return entry;
340 }
341
342 /* Call the allocation method of the superclass. */
343 entry = bfd_hash_newfunc (entry, table, string);
344 if (entry != NULL)
345 {
346 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
347
348 sh->target_section = NULL;
349 sh->target_off = 0;
350 sh->off = 0;
351 sh->delta = 0;
352 }
353
354 return entry;
355 }
356
357 /* Create a spu ELF linker hash table. */
358
359 static struct bfd_link_hash_table *
360 spu_elf_link_hash_table_create (bfd *abfd)
361 {
362 struct spu_link_hash_table *htab;
363
364 htab = bfd_malloc (sizeof (*htab));
365 if (htab == NULL)
366 return NULL;
367
368 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
369 _bfd_elf_link_hash_newfunc,
370 sizeof (struct elf_link_hash_entry)))
371 {
372 free (htab);
373 return NULL;
374 }
375
376 /* Init the stub hash table too. */
377 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
378 sizeof (struct spu_stub_hash_entry)))
379 return NULL;
380
381 memset (&htab->stubs, 0,
382 sizeof (*htab) - offsetof (struct spu_link_hash_table, stubs));
383
384 return &htab->elf.root;
385 }
386
387 /* Free the derived linker hash table. */
388
389 static void
390 spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
391 {
392 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
393
394 bfd_hash_table_free (&ret->stub_hash_table);
395 _bfd_generic_link_hash_table_free (hash);
396 }
397
398 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
399 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
400 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
401
402 static bfd_boolean
403 get_sym_h (struct elf_link_hash_entry **hp,
404 Elf_Internal_Sym **symp,
405 asection **symsecp,
406 Elf_Internal_Sym **locsymsp,
407 unsigned long r_symndx,
408 bfd *ibfd)
409 {
410 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
411
412 if (r_symndx >= symtab_hdr->sh_info)
413 {
414 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
415 struct elf_link_hash_entry *h;
416
417 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
418 while (h->root.type == bfd_link_hash_indirect
419 || h->root.type == bfd_link_hash_warning)
420 h = (struct elf_link_hash_entry *) h->root.u.i.link;
421
422 if (hp != NULL)
423 *hp = h;
424
425 if (symp != NULL)
426 *symp = NULL;
427
428 if (symsecp != NULL)
429 {
430 asection *symsec = NULL;
431 if (h->root.type == bfd_link_hash_defined
432 || h->root.type == bfd_link_hash_defweak)
433 symsec = h->root.u.def.section;
434 *symsecp = symsec;
435 }
436 }
437 else
438 {
439 Elf_Internal_Sym *sym;
440 Elf_Internal_Sym *locsyms = *locsymsp;
441
442 if (locsyms == NULL)
443 {
444 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
445 if (locsyms == NULL)
446 {
447 size_t symcount = symtab_hdr->sh_info;
448
449 /* If we are reading symbols into the contents, then
450 read the global syms too. This is done to cache
451 syms for later stack analysis. */
452 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
453 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
454 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
455 NULL, NULL, NULL);
456 }
457 if (locsyms == NULL)
458 return FALSE;
459 *locsymsp = locsyms;
460 }
461 sym = locsyms + r_symndx;
462
463 if (hp != NULL)
464 *hp = NULL;
465
466 if (symp != NULL)
467 *symp = sym;
468
469 if (symsecp != NULL)
470 {
471 asection *symsec = NULL;
472 if ((sym->st_shndx != SHN_UNDEF
473 && sym->st_shndx < SHN_LORESERVE)
474 || sym->st_shndx > SHN_HIRESERVE)
475 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
476 *symsecp = symsec;
477 }
478 }
479
480 return TRUE;
481 }
482
483 /* Build a name for an entry in the stub hash table. We can't use a
484 local symbol name because ld -r might generate duplicate local symbols. */
485
486 static char *
487 spu_stub_name (const asection *sym_sec,
488 const struct elf_link_hash_entry *h,
489 const Elf_Internal_Rela *rel)
490 {
491 char *stub_name;
492 bfd_size_type len;
493
494 if (h)
495 {
496 len = strlen (h->root.root.string) + 1 + 8 + 1;
497 stub_name = bfd_malloc (len);
498 if (stub_name == NULL)
499 return stub_name;
500
501 sprintf (stub_name, "%s+%x",
502 h->root.root.string,
503 (int) rel->r_addend & 0xffffffff);
504 len -= 8;
505 }
506 else
507 {
508 len = 8 + 1 + 8 + 1 + 8 + 1;
509 stub_name = bfd_malloc (len);
510 if (stub_name == NULL)
511 return stub_name;
512
513 sprintf (stub_name, "%x:%x+%x",
514 sym_sec->id & 0xffffffff,
515 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
516 (int) rel->r_addend & 0xffffffff);
517 len = strlen (stub_name);
518 }
519
520 if (stub_name[len - 2] == '+'
521 && stub_name[len - 1] == '0'
522 && stub_name[len] == 0)
523 stub_name[len - 2] = 0;
524
525 return stub_name;
526 }
527
528 /* Create the note section if not already present. This is done early so
529 that the linker maps the sections to the right place in the output. */
530
531 bfd_boolean
532 spu_elf_create_sections (bfd *output_bfd,
533 struct bfd_link_info *info,
534 int stack_analysis,
535 int emit_stack_syms)
536 {
537 bfd *ibfd;
538 struct spu_link_hash_table *htab = spu_hash_table (info);
539
540 /* Stash some options away where we can get at them later. */
541 htab->stack_analysis = stack_analysis;
542 htab->emit_stack_syms = emit_stack_syms;
543
544 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
545 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
546 break;
547
548 if (ibfd == NULL)
549 {
550 /* Make SPU_PTNOTE_SPUNAME section. */
551 asection *s;
552 size_t name_len;
553 size_t size;
554 bfd_byte *data;
555 flagword flags;
556
557 ibfd = info->input_bfds;
558 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
559 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
560 if (s == NULL
561 || !bfd_set_section_alignment (ibfd, s, 4))
562 return FALSE;
563
564 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
565 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
566 size += (name_len + 3) & -4;
567
568 if (!bfd_set_section_size (ibfd, s, size))
569 return FALSE;
570
571 data = bfd_zalloc (ibfd, size);
572 if (data == NULL)
573 return FALSE;
574
575 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
576 bfd_put_32 (ibfd, name_len, data + 4);
577 bfd_put_32 (ibfd, 1, data + 8);
578 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
579 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
580 bfd_get_filename (output_bfd), name_len);
581 s->contents = data;
582 }
583
584 return TRUE;
585 }
586
587 /* qsort predicate to sort sections by vma. */
588
589 static int
590 sort_sections (const void *a, const void *b)
591 {
592 const asection *const *s1 = a;
593 const asection *const *s2 = b;
594 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
595
596 if (delta != 0)
597 return delta < 0 ? -1 : 1;
598
599 return (*s1)->index - (*s2)->index;
600 }
601
602 /* Identify overlays in the output bfd, and number them. */
603
604 bfd_boolean
605 spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
606 {
607 struct spu_link_hash_table *htab = spu_hash_table (info);
608 asection **alloc_sec;
609 unsigned int i, n, ovl_index, num_buf;
610 asection *s;
611 bfd_vma ovl_end;
612
613 if (output_bfd->section_count < 2)
614 return FALSE;
615
616 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
617 if (alloc_sec == NULL)
618 return FALSE;
619
620 /* Pick out all the alloced sections. */
621 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
622 if ((s->flags & SEC_ALLOC) != 0
623 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
624 && s->size != 0)
625 alloc_sec[n++] = s;
626
627 if (n == 0)
628 {
629 free (alloc_sec);
630 return FALSE;
631 }
632
633 /* Sort them by vma. */
634 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
635
636 /* Look for overlapping vmas. Any with overlap must be overlays.
637 Count them. Also count the number of overlay regions and for
638 each region save a section from that region with the lowest vma
639 and another section with the highest end vma. */
640 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
641 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
642 {
643 s = alloc_sec[i];
644 if (s->vma < ovl_end)
645 {
646 asection *s0 = alloc_sec[i - 1];
647
648 if (spu_elf_section_data (s0)->ovl_index == 0)
649 {
650 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
651 alloc_sec[num_buf * 2] = s0;
652 alloc_sec[num_buf * 2 + 1] = s0;
653 num_buf++;
654 }
655 spu_elf_section_data (s)->ovl_index = ++ovl_index;
656 if (ovl_end < s->vma + s->size)
657 {
658 ovl_end = s->vma + s->size;
659 alloc_sec[num_buf * 2 - 1] = s;
660 }
661 }
662 else
663 ovl_end = s->vma + s->size;
664 }
665
666 htab->num_overlays = ovl_index;
667 htab->num_buf = num_buf;
668 if (ovl_index == 0)
669 {
670 free (alloc_sec);
671 return FALSE;
672 }
673
674 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
675 if (alloc_sec == NULL)
676 return FALSE;
677
678 htab->ovl_region = alloc_sec;
679 return TRUE;
680 }
681
682 /* One of these per stub. */
683 #define SIZEOF_STUB1 8
684 #define ILA_79 0x4200004f /* ila $79,function_address */
685 #define BR 0x32000000 /* br stub2 */
686
687 /* One of these per overlay. */
688 #define SIZEOF_STUB2 8
689 #define ILA_78 0x4200004e /* ila $78,overlay_number */
690 /* br __ovly_load */
691 #define NOP 0x40200000
692
693 /* Return true for all relative and absolute branch instructions.
694 bra 00110000 0..
695 brasl 00110001 0..
696 br 00110010 0..
697 brsl 00110011 0..
698 brz 00100000 0..
699 brnz 00100001 0..
700 brhz 00100010 0..
701 brhnz 00100011 0.. */
702
703 static bfd_boolean
704 is_branch (const unsigned char *insn)
705 {
706 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
707 }
708
709 /* Return true for all indirect branch instructions.
710 bi 00110101 000
711 bisl 00110101 001
712 iret 00110101 010
713 bisled 00110101 011
714 biz 00100101 000
715 binz 00100101 001
716 bihz 00100101 010
717 bihnz 00100101 011 */
718
719 static bfd_boolean
720 is_indirect_branch (const unsigned char *insn)
721 {
722 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
723 }
724
725 /* Return true for branch hint instructions.
726 hbra 0001000..
727 hbrr 0001001.. */
728
729 static bfd_boolean
730 is_hint (const unsigned char *insn)
731 {
732 return (insn[0] & 0xfc) == 0x10;
733 }
734
735 /* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
736
737 static bfd_boolean
738 needs_ovl_stub (const char *sym_name,
739 asection *sym_sec,
740 asection *input_section,
741 struct spu_link_hash_table *htab,
742 bfd_boolean is_branch)
743 {
744 if (htab->num_overlays == 0)
745 return FALSE;
746
747 if (sym_sec == NULL
748 || sym_sec->output_section == NULL
749 || spu_elf_section_data (sym_sec->output_section) == NULL)
750 return FALSE;
751
752 /* setjmp always goes via an overlay stub, because then the return
753 and hence the longjmp goes via __ovly_return. That magically
754 makes setjmp/longjmp between overlays work. */
755 if (strncmp (sym_name, "setjmp", 6) == 0
756 && (sym_name[6] == '\0' || sym_name[6] == '@'))
757 return TRUE;
758
759 /* Usually, symbols in non-overlay sections don't need stubs. */
760 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
761 && !htab->non_overlay_stubs)
762 return FALSE;
763
764 /* A reference from some other section to a symbol in an overlay
765 section needs a stub. */
766 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
767 != spu_elf_section_data (input_section->output_section)->ovl_index)
768 return TRUE;
769
770 /* If this insn isn't a branch then we are possibly taking the
771 address of a function and passing it out somehow. */
772 return !is_branch;
773 }
774
775 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
776 symbols. */
777
778 static bfd_boolean
779 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
780 {
781 /* Symbols starting with _SPUEAR_ need a stub because they may be
782 invoked by the PPU. */
783 if ((h->root.type == bfd_link_hash_defined
784 || h->root.type == bfd_link_hash_defweak)
785 && h->def_regular
786 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
787 {
788 struct spu_link_hash_table *htab = inf;
789 static Elf_Internal_Rela zero_rel;
790 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
791 struct spu_stub_hash_entry *sh;
792
793 if (stub_name == NULL)
794 {
795 htab->stubs.err = 1;
796 return FALSE;
797 }
798
799 sh = (struct spu_stub_hash_entry *)
800 bfd_hash_lookup (&htab->stub_hash_table, stub_name, TRUE, FALSE);
801 if (sh == NULL)
802 {
803 free (stub_name);
804 return FALSE;
805 }
806
807 /* If this entry isn't new, we already have a stub. */
808 if (sh->target_section != NULL)
809 {
810 free (stub_name);
811 return TRUE;
812 }
813
814 sh->target_section = h->root.u.def.section;
815 sh->target_off = h->root.u.def.value;
816 htab->stubs.count += 1;
817 }
818
819 return TRUE;
820 }
821
822 /* Called via bfd_hash_traverse to set up pointers to all symbols
823 in the stub hash table. */
824
825 static bfd_boolean
826 populate_stubs (struct bfd_hash_entry *bh, void *inf)
827 {
828 struct spu_link_hash_table *htab = inf;
829
830 htab->stubs.sh[--htab->stubs.count] = (struct spu_stub_hash_entry *) bh;
831 return TRUE;
832 }
833
834 /* qsort predicate to sort stubs by overlay number. */
835
836 static int
837 sort_stubs (const void *a, const void *b)
838 {
839 const struct spu_stub_hash_entry *const *sa = a;
840 const struct spu_stub_hash_entry *const *sb = b;
841 int i;
842 bfd_signed_vma d;
843
844 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
845 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
846 if (i != 0)
847 return i;
848
849 d = ((*sa)->target_section->output_section->vma
850 + (*sa)->target_section->output_offset
851 + (*sa)->target_off
852 - (*sb)->target_section->output_section->vma
853 - (*sb)->target_section->output_offset
854 - (*sb)->target_off);
855 if (d != 0)
856 return d < 0 ? -1 : 1;
857
858 /* Two functions at the same address. Aliases perhaps. */
859 i = strcmp ((*sb)->root.string, (*sa)->root.string);
860 BFD_ASSERT (i != 0);
861 return i;
862 }
863
864 /* Allocate space for overlay call and return stubs. */
865
866 bfd_boolean
867 spu_elf_size_stubs (bfd *output_bfd,
868 struct bfd_link_info *info,
869 int non_overlay_stubs,
870 int stack_analysis,
871 asection **stub,
872 asection **ovtab,
873 asection **toe)
874 {
875 struct spu_link_hash_table *htab = spu_hash_table (info);
876 bfd *ibfd;
877 unsigned i, group;
878 flagword flags;
879
880 htab->non_overlay_stubs = non_overlay_stubs;
881 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
882 {
883 extern const bfd_target bfd_elf32_spu_vec;
884 Elf_Internal_Shdr *symtab_hdr;
885 asection *section;
886 Elf_Internal_Sym *local_syms = NULL;
887 void *psyms;
888
889 if (ibfd->xvec != &bfd_elf32_spu_vec)
890 continue;
891
892 /* We'll need the symbol table in a second. */
893 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
894 if (symtab_hdr->sh_info == 0)
895 continue;
896
897 /* Arrange to read and keep global syms for later stack analysis. */
898 psyms = &local_syms;
899 if (stack_analysis)
900 psyms = &symtab_hdr->contents;
901
902 /* Walk over each section attached to the input bfd. */
903 for (section = ibfd->sections; section != NULL; section = section->next)
904 {
905 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
906
907 /* If there aren't any relocs, then there's nothing more to do. */
908 if ((section->flags & SEC_RELOC) == 0
909 || (section->flags & SEC_ALLOC) == 0
910 || (section->flags & SEC_LOAD) == 0
911 || section->reloc_count == 0)
912 continue;
913
914 /* If this section is a link-once section that will be
915 discarded, then don't create any stubs. */
916 if (section->output_section == NULL
917 || section->output_section->owner != output_bfd)
918 continue;
919
920 /* Get the relocs. */
921 internal_relocs
922 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
923 info->keep_memory);
924 if (internal_relocs == NULL)
925 goto error_ret_free_local;
926
927 /* Now examine each relocation. */
928 irela = internal_relocs;
929 irelaend = irela + section->reloc_count;
930 for (; irela < irelaend; irela++)
931 {
932 enum elf_spu_reloc_type r_type;
933 unsigned int r_indx;
934 asection *sym_sec;
935 Elf_Internal_Sym *sym;
936 struct elf_link_hash_entry *h;
937 const char *sym_name;
938 char *stub_name;
939 struct spu_stub_hash_entry *sh;
940 unsigned int sym_type;
941 enum _insn_type { non_branch, branch, call } insn_type;
942
943 r_type = ELF32_R_TYPE (irela->r_info);
944 r_indx = ELF32_R_SYM (irela->r_info);
945
946 if (r_type >= R_SPU_max)
947 {
948 bfd_set_error (bfd_error_bad_value);
949 goto error_ret_free_internal;
950 }
951
952 /* Determine the reloc target section. */
953 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
954 goto error_ret_free_internal;
955
956 if (sym_sec == NULL
957 || sym_sec->output_section == NULL
958 || sym_sec->output_section->owner != output_bfd)
959 continue;
960
961 /* Ensure no stubs for user supplied overlay manager syms. */
962 if (h != NULL
963 && (strcmp (h->root.root.string, "__ovly_load") == 0
964 || strcmp (h->root.root.string, "__ovly_return") == 0))
965 continue;
966
967 insn_type = non_branch;
968 if (r_type == R_SPU_REL16
969 || r_type == R_SPU_ADDR16)
970 {
971 unsigned char insn[4];
972
973 if (!bfd_get_section_contents (ibfd, section, insn,
974 irela->r_offset, 4))
975 goto error_ret_free_internal;
976
977 if (is_branch (insn) || is_hint (insn))
978 {
979 insn_type = branch;
980 if ((insn[0] & 0xfd) == 0x31)
981 insn_type = call;
982 }
983 }
984
985 /* We are only interested in function symbols. */
986 if (h != NULL)
987 {
988 sym_type = h->type;
989 sym_name = h->root.root.string;
990 }
991 else
992 {
993 sym_type = ELF_ST_TYPE (sym->st_info);
994 sym_name = bfd_elf_sym_name (sym_sec->owner,
995 symtab_hdr,
996 sym,
997 sym_sec);
998 }
999
1000 if (sym_type != STT_FUNC)
1001 {
1002 /* It's common for people to write assembly and forget
1003 to give function symbols the right type. Handle
1004 calls to such symbols, but warn so that (hopefully)
1005 people will fix their code. We need the symbol
1006 type to be correct to distinguish function pointer
1007 initialisation from other pointer initialisation. */
1008 if (insn_type == call)
1009 (*_bfd_error_handler) (_("warning: call to non-function"
1010 " symbol %s defined in %B"),
1011 sym_sec->owner, sym_name);
1012 else if (insn_type == non_branch)
1013 continue;
1014 }
1015
1016 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1017 insn_type != non_branch))
1018 continue;
1019
1020 stub_name = spu_stub_name (sym_sec, h, irela);
1021 if (stub_name == NULL)
1022 goto error_ret_free_internal;
1023
1024 sh = (struct spu_stub_hash_entry *)
1025 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1026 TRUE, FALSE);
1027 if (sh == NULL)
1028 {
1029 free (stub_name);
1030 error_ret_free_internal:
1031 if (elf_section_data (section)->relocs != internal_relocs)
1032 free (internal_relocs);
1033 error_ret_free_local:
1034 if (local_syms != NULL
1035 && (symtab_hdr->contents
1036 != (unsigned char *) local_syms))
1037 free (local_syms);
1038 return FALSE;
1039 }
1040
1041 /* If this entry isn't new, we already have a stub. */
1042 if (sh->target_section != NULL)
1043 {
1044 free (stub_name);
1045 continue;
1046 }
1047
1048 sh->target_section = sym_sec;
1049 if (h != NULL)
1050 sh->target_off = h->root.u.def.value;
1051 else
1052 sh->target_off = sym->st_value;
1053 sh->target_off += irela->r_addend;
1054
1055 htab->stubs.count += 1;
1056 }
1057
1058 /* We're done with the internal relocs, free them. */
1059 if (elf_section_data (section)->relocs != internal_relocs)
1060 free (internal_relocs);
1061 }
1062
1063 if (local_syms != NULL
1064 && symtab_hdr->contents != (unsigned char *) local_syms)
1065 {
1066 if (!info->keep_memory)
1067 free (local_syms);
1068 else
1069 symtab_hdr->contents = (unsigned char *) local_syms;
1070 }
1071 }
1072
1073 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
1074 if (htab->stubs.err)
1075 return FALSE;
1076
1077 *stub = NULL;
1078 if (htab->stubs.count == 0)
1079 return TRUE;
1080
1081 ibfd = info->input_bfds;
1082 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1083 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1084 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1085 *stub = htab->stub;
1086 if (htab->stub == NULL
1087 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1088 return FALSE;
1089
1090 flags = (SEC_ALLOC | SEC_LOAD
1091 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1092 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1093 *ovtab = htab->ovtab;
1094 if (htab->ovtab == NULL
1095 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1096 return FALSE;
1097
1098 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1099 if (*toe == NULL
1100 || !bfd_set_section_alignment (ibfd, *toe, 4))
1101 return FALSE;
1102 (*toe)->size = 16;
1103
1104 /* Retrieve all the stubs and sort. */
1105 htab->stubs.sh = bfd_malloc (htab->stubs.count * sizeof (*htab->stubs.sh));
1106 if (htab->stubs.sh == NULL)
1107 return FALSE;
1108 i = htab->stubs.count;
1109 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, htab);
1110 BFD_ASSERT (htab->stubs.count == 0);
1111
1112 htab->stubs.count = i;
1113 qsort (htab->stubs.sh, htab->stubs.count, sizeof (*htab->stubs.sh),
1114 sort_stubs);
1115
1116 /* Now that the stubs are sorted, place them in the stub section.
1117 Stubs are grouped per overlay
1118 . ila $79,func1
1119 . br 1f
1120 . ila $79,func2
1121 . br 1f
1122 .
1123 .
1124 . ila $79,funcn
1125 . nop
1126 . 1:
1127 . ila $78,ovl_index
1128 . br __ovly_load */
1129
1130 group = 0;
1131 for (i = 0; i < htab->stubs.count; i++)
1132 {
1133 if (spu_elf_section_data (htab->stubs.sh[group]->target_section
1134 ->output_section)->ovl_index
1135 != spu_elf_section_data (htab->stubs.sh[i]->target_section
1136 ->output_section)->ovl_index)
1137 {
1138 htab->stub->size += SIZEOF_STUB2;
1139 for (; group != i; group++)
1140 htab->stubs.sh[group]->delta
1141 = htab->stubs.sh[i - 1]->off - htab->stubs.sh[group]->off;
1142 }
1143 if (group == i
1144 || ((htab->stubs.sh[i - 1]->target_section->output_section->vma
1145 + htab->stubs.sh[i - 1]->target_section->output_offset
1146 + htab->stubs.sh[i - 1]->target_off)
1147 != (htab->stubs.sh[i]->target_section->output_section->vma
1148 + htab->stubs.sh[i]->target_section->output_offset
1149 + htab->stubs.sh[i]->target_off)))
1150 {
1151 htab->stubs.sh[i]->off = htab->stub->size;
1152 htab->stub->size += SIZEOF_STUB1;
1153 if (info->emitrelocations)
1154 htab->stub->reloc_count += 1;
1155 }
1156 else
1157 htab->stubs.sh[i]->off = htab->stubs.sh[i - 1]->off;
1158 }
1159 if (group != i)
1160 htab->stub->size += SIZEOF_STUB2;
1161 if (info->emitrelocations)
1162 htab->stub->flags |= SEC_RELOC;
1163 for (; group != i; group++)
1164 htab->stubs.sh[group]->delta
1165 = htab->stubs.sh[i - 1]->off - htab->stubs.sh[group]->off;
1166
1167 /* htab->ovtab consists of two arrays.
1168 . struct {
1169 . u32 vma;
1170 . u32 size;
1171 . u32 file_off;
1172 . u32 buf;
1173 . } _ovly_table[];
1174 .
1175 . struct {
1176 . u32 mapped;
1177 . } _ovly_buf_table[]; */
1178
1179 htab->ovtab->alignment_power = 4;
1180 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1181
1182 return TRUE;
1183 }
1184
1185 /* Functions to handle embedded spu_ovl.o object. */
1186
1187 static void *
1188 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1189 {
1190 return stream;
1191 }
1192
1193 static file_ptr
1194 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1195 void *stream,
1196 void *buf,
1197 file_ptr nbytes,
1198 file_ptr offset)
1199 {
1200 struct _ovl_stream *os;
1201 size_t count;
1202 size_t max;
1203
1204 os = (struct _ovl_stream *) stream;
1205 max = (const char *) os->end - (const char *) os->start;
1206
1207 if ((ufile_ptr) offset >= max)
1208 return 0;
1209
1210 count = nbytes;
1211 if (count > max - offset)
1212 count = max - offset;
1213
1214 memcpy (buf, (const char *) os->start + offset, count);
1215 return count;
1216 }
1217
1218 bfd_boolean
1219 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1220 {
1221 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1222 "elf32-spu",
1223 ovl_mgr_open,
1224 (void *) stream,
1225 ovl_mgr_pread,
1226 NULL,
1227 NULL);
1228 return *ovl_bfd != NULL;
1229 }
1230
1231 /* Fill in the ila and br for a stub. On the last stub for a group,
1232 write the stub that sets the overlay number too. */
1233
1234 static bfd_boolean
1235 write_one_stub (struct spu_stub_hash_entry *ent, struct bfd_link_info *info)
1236 {
1237 struct spu_link_hash_table *htab = spu_hash_table (info);
1238 asection *sec = htab->stub;
1239 asection *s = ent->target_section;
1240 unsigned int ovl;
1241 bfd_vma val;
1242
1243 val = ent->target_off + s->output_offset + s->output_section->vma;
1244 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1245 sec->contents + ent->off);
1246 val = ent->delta + 4;
1247 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1248 sec->contents + ent->off + 4);
1249
1250 if (info->emitrelocations)
1251 {
1252 Elf_Internal_Rela *relocs, *r;
1253 struct bfd_elf_section_data *elfsec_data;
1254
1255 elfsec_data = elf_section_data (sec);
1256 relocs = elfsec_data->relocs;
1257 if (relocs == NULL)
1258 {
1259 bfd_size_type relsize;
1260 Elf_Internal_Shdr *symtab_hdr;
1261 struct elf_link_hash_entry **sym_hash;
1262 unsigned long symcount;
1263 bfd_vma amt;
1264
1265 relsize = sec->reloc_count * sizeof (*relocs);
1266 relocs = bfd_alloc (sec->owner, relsize);
1267 if (relocs == NULL)
1268 return FALSE;
1269 elfsec_data->relocs = relocs;
1270 elfsec_data->rel_hdr.sh_size
1271 = sec->reloc_count * sizeof (Elf32_External_Rela);
1272 elfsec_data->rel_hdr.sh_entsize = sizeof (Elf32_External_Rela);
1273 sec->reloc_count = 0;
1274
1275 /* Increase the size of symbol hash array on the bfd to
1276 which we attached our .stub section. This hack allows
1277 us to create relocs against global symbols. */
1278 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1279 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
1280 symcount -= symtab_hdr->sh_info;
1281 amt = symcount * sizeof (*sym_hash);
1282 sym_hash = bfd_alloc (sec->owner, amt + sizeof (*sym_hash));
1283 if (sym_hash == NULL)
1284 return FALSE;
1285 memcpy (sym_hash, elf_sym_hashes (sec->owner), amt);
1286 sym_hash[symcount] = htab->ovly_load;
1287 htab->ovly_load_r_symndx = symcount + symtab_hdr->sh_info;
1288 elf_sym_hashes (sec->owner) = sym_hash;
1289 }
1290 r = relocs + sec->reloc_count;
1291 sec->reloc_count += 1;
1292 r->r_offset = ent->off + 4;
1293 r->r_info = ELF32_R_INFO (0, R_SPU_REL16);
1294 r->r_addend = (sec->output_section->vma
1295 + sec->output_offset
1296 + ent->off + 4
1297 + val);
1298 }
1299
1300 /* If this is the last stub of this group, write stub2. */
1301 if (ent->delta == 0)
1302 {
1303 bfd_put_32 (sec->owner, NOP,
1304 sec->contents + ent->off + 4);
1305
1306 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1307 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1308 sec->contents + ent->off + 8);
1309
1310 val = (htab->ovly_load->root.u.def.section->output_section->vma
1311 + htab->ovly_load->root.u.def.section->output_offset
1312 + htab->ovly_load->root.u.def.value
1313 - (sec->output_section->vma
1314 + sec->output_offset
1315 + ent->off + 12));
1316
1317 if (val + 0x20000 >= 0x40000)
1318 htab->stub_overflow = TRUE;
1319
1320 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1321 sec->contents + ent->off + 12);
1322
1323 if (info->emitrelocations)
1324 {
1325 Elf_Internal_Rela *relocs, *r;
1326 struct bfd_elf_section_data *elfsec_data;
1327
1328 elfsec_data = elf_section_data (sec);
1329 relocs = elfsec_data->relocs;
1330 /* The last branch is overwritten, so overwrite its reloc too. */
1331 r = relocs + sec->reloc_count - 1;
1332 r->r_offset = ent->off + 12;
1333 r->r_info = ELF32_R_INFO (htab->ovly_load_r_symndx, R_SPU_REL16);
1334 r->r_addend = 0;
1335 }
1336 }
1337
1338 if (htab->emit_stub_syms)
1339 {
1340 struct elf_link_hash_entry *h;
1341 size_t len1, len2;
1342 char *name;
1343
1344 len1 = sizeof ("00000000.ovl_call.") - 1;
1345 len2 = strlen (ent->root.string);
1346 name = bfd_malloc (len1 + len2 + 1);
1347 if (name == NULL)
1348 return FALSE;
1349 memcpy (name, "00000000.ovl_call.", len1);
1350 memcpy (name + len1, ent->root.string, len2 + 1);
1351 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1352 free (name);
1353 if (h == NULL)
1354 return FALSE;
1355 if (h->root.type == bfd_link_hash_new)
1356 {
1357 h->root.type = bfd_link_hash_defined;
1358 h->root.u.def.section = sec;
1359 h->root.u.def.value = ent->off;
1360 h->size = (ent->delta == 0
1361 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1362 h->type = STT_FUNC;
1363 h->ref_regular = 1;
1364 h->def_regular = 1;
1365 h->ref_regular_nonweak = 1;
1366 h->forced_local = 1;
1367 h->non_elf = 0;
1368 }
1369 }
1370
1371 return TRUE;
1372 }
1373
1374 /* Define an STT_OBJECT symbol. */
1375
1376 static struct elf_link_hash_entry *
1377 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1378 {
1379 struct elf_link_hash_entry *h;
1380
1381 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1382 if (h == NULL)
1383 return NULL;
1384
1385 if (h->root.type != bfd_link_hash_defined
1386 || !h->def_regular)
1387 {
1388 h->root.type = bfd_link_hash_defined;
1389 h->root.u.def.section = htab->ovtab;
1390 h->type = STT_OBJECT;
1391 h->ref_regular = 1;
1392 h->def_regular = 1;
1393 h->ref_regular_nonweak = 1;
1394 h->non_elf = 0;
1395 }
1396 else
1397 {
1398 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1399 h->root.u.def.section->owner,
1400 h->root.root.string);
1401 bfd_set_error (bfd_error_bad_value);
1402 return NULL;
1403 }
1404
1405 return h;
1406 }
1407
1408 /* Fill in all stubs and the overlay tables. */
1409
1410 bfd_boolean
1411 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1412 {
1413 struct spu_link_hash_table *htab = spu_hash_table (info);
1414 struct elf_link_hash_entry *h;
1415 bfd_byte *p;
1416 asection *s;
1417 bfd *obfd;
1418 unsigned int i;
1419
1420 htab->emit_stub_syms = emit_syms;
1421 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1422 if (htab->stub->contents == NULL)
1423 return FALSE;
1424
1425 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1426 htab->ovly_load = h;
1427 BFD_ASSERT (h != NULL
1428 && (h->root.type == bfd_link_hash_defined
1429 || h->root.type == bfd_link_hash_defweak)
1430 && h->def_regular);
1431
1432 s = h->root.u.def.section->output_section;
1433 if (spu_elf_section_data (s)->ovl_index)
1434 {
1435 (*_bfd_error_handler) (_("%s in overlay section"),
1436 h->root.u.def.section->owner);
1437 bfd_set_error (bfd_error_bad_value);
1438 return FALSE;
1439 }
1440
1441 /* Write out all the stubs. */
1442 for (i = 0; i < htab->stubs.count; i++)
1443 write_one_stub (htab->stubs.sh[i], info);
1444
1445 if (htab->stub_overflow)
1446 {
1447 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1448 bfd_set_error (bfd_error_bad_value);
1449 return FALSE;
1450 }
1451
1452 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1453 if (htab->ovtab->contents == NULL)
1454 return FALSE;
1455
1456 /* Write out _ovly_table. */
1457 p = htab->ovtab->contents;
1458 obfd = htab->ovtab->output_section->owner;
1459 for (s = obfd->sections; s != NULL; s = s->next)
1460 {
1461 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1462
1463 if (ovl_index != 0)
1464 {
1465 unsigned int lo, hi, mid;
1466 unsigned long off = (ovl_index - 1) * 16;
1467 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1468 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1469 /* file_off written later in spu_elf_modify_program_headers. */
1470
1471 lo = 0;
1472 hi = htab->num_buf;
1473 while (lo < hi)
1474 {
1475 mid = (lo + hi) >> 1;
1476 if (htab->ovl_region[2 * mid + 1]->vma
1477 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1478 lo = mid + 1;
1479 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1480 hi = mid;
1481 else
1482 {
1483 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1484 break;
1485 }
1486 }
1487 BFD_ASSERT (lo < hi);
1488 }
1489 }
1490
1491 /* Write out _ovly_buf_table. */
1492 p = htab->ovtab->contents + htab->num_overlays * 16;
1493 for (i = 0; i < htab->num_buf; i++)
1494 {
1495 bfd_put_32 (htab->ovtab->owner, 0, p);
1496 p += 4;
1497 }
1498
1499 h = define_ovtab_symbol (htab, "_ovly_table");
1500 if (h == NULL)
1501 return FALSE;
1502 h->root.u.def.value = 0;
1503 h->size = htab->num_overlays * 16;
1504
1505 h = define_ovtab_symbol (htab, "_ovly_table_end");
1506 if (h == NULL)
1507 return FALSE;
1508 h->root.u.def.value = htab->num_overlays * 16;
1509 h->size = 0;
1510
1511 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1512 if (h == NULL)
1513 return FALSE;
1514 h->root.u.def.value = htab->num_overlays * 16;
1515 h->size = htab->num_buf * 4;
1516
1517 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1518 if (h == NULL)
1519 return FALSE;
1520 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1521 h->size = 0;
1522
1523 h = define_ovtab_symbol (htab, "_EAR_");
1524 if (h == NULL)
1525 return FALSE;
1526 h->root.u.def.section = toe;
1527 h->root.u.def.value = 0;
1528 h->size = 16;
1529
1530 return TRUE;
1531 }
1532
1533 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1534 Search for stack adjusting insns, and return the sp delta. */
1535
1536 static int
1537 find_function_stack_adjust (asection *sec, bfd_vma offset)
1538 {
1539 int unrecog;
1540 int reg[128];
1541
1542 memset (reg, 0, sizeof (reg));
1543 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1544 {
1545 unsigned char buf[4];
1546 int rt, ra;
1547 int imm;
1548
1549 /* Assume no relocs on stack adjusing insns. */
1550 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1551 break;
1552
1553 if (buf[0] == 0x24 /* stqd */)
1554 continue;
1555
1556 rt = buf[3] & 0x7f;
1557 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1558 /* Partly decoded immediate field. */
1559 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1560
1561 if (buf[0] == 0x1c /* ai */)
1562 {
1563 imm >>= 7;
1564 imm = (imm ^ 0x200) - 0x200;
1565 reg[rt] = reg[ra] + imm;
1566
1567 if (rt == 1 /* sp */)
1568 {
1569 if (imm > 0)
1570 break;
1571 return reg[rt];
1572 }
1573 }
1574 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1575 {
1576 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1577
1578 reg[rt] = reg[ra] + reg[rb];
1579 if (rt == 1)
1580 return reg[rt];
1581 }
1582 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1583 {
1584 if (buf[0] >= 0x42 /* ila */)
1585 imm |= (buf[0] & 1) << 17;
1586 else
1587 {
1588 imm &= 0xffff;
1589
1590 if (buf[0] == 0x40 /* il */)
1591 {
1592 if ((buf[1] & 0x80) == 0)
1593 goto unknown_insn;
1594 imm = (imm ^ 0x8000) - 0x8000;
1595 }
1596 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1597 imm <<= 16;
1598 }
1599 reg[rt] = imm;
1600 continue;
1601 }
1602 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1603 {
1604 reg[rt] |= imm & 0xffff;
1605 continue;
1606 }
1607 else if (buf[0] == 0x04 /* ori */)
1608 {
1609 imm >>= 7;
1610 imm = (imm ^ 0x200) - 0x200;
1611 reg[rt] = reg[ra] | imm;
1612 continue;
1613 }
1614 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1615 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1616 {
1617 /* Used in pic reg load. Say rt is trashed. */
1618 reg[rt] = 0;
1619 continue;
1620 }
1621 else if (is_branch (buf) || is_indirect_branch (buf))
1622 /* If we hit a branch then we must be out of the prologue. */
1623 break;
1624 unknown_insn:
1625 ++unrecog;
1626 }
1627
1628 return 0;
1629 }
1630
1631 /* qsort predicate to sort symbols by section and value. */
1632
1633 static Elf_Internal_Sym *sort_syms_syms;
1634 static asection **sort_syms_psecs;
1635
1636 static int
1637 sort_syms (const void *a, const void *b)
1638 {
1639 Elf_Internal_Sym *const *s1 = a;
1640 Elf_Internal_Sym *const *s2 = b;
1641 asection *sec1,*sec2;
1642 bfd_signed_vma delta;
1643
1644 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1645 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1646
1647 if (sec1 != sec2)
1648 return sec1->index - sec2->index;
1649
1650 delta = (*s1)->st_value - (*s2)->st_value;
1651 if (delta != 0)
1652 return delta < 0 ? -1 : 1;
1653
1654 delta = (*s2)->st_size - (*s1)->st_size;
1655 if (delta != 0)
1656 return delta < 0 ? -1 : 1;
1657
1658 return *s1 < *s2 ? -1 : 1;
1659 }
1660
1661 struct call_info
1662 {
1663 struct function_info *fun;
1664 struct call_info *next;
1665 int is_tail;
1666 };
1667
1668 struct function_info
1669 {
1670 /* List of functions called. Also branches to hot/cold part of
1671 function. */
1672 struct call_info *call_list;
1673 /* For hot/cold part of function, point to owner. */
1674 struct function_info *start;
1675 /* Symbol at start of function. */
1676 union {
1677 Elf_Internal_Sym *sym;
1678 struct elf_link_hash_entry *h;
1679 } u;
1680 /* Function section. */
1681 asection *sec;
1682 /* Address range of (this part of) function. */
1683 bfd_vma lo, hi;
1684 /* Stack usage. */
1685 int stack;
1686 /* Set if global symbol. */
1687 unsigned int global : 1;
1688 /* Set if known to be start of function (as distinct from a hunk
1689 in hot/cold section. */
1690 unsigned int is_func : 1;
1691 /* Flags used during call tree traversal. */
1692 unsigned int visit1 : 1;
1693 unsigned int non_root : 1;
1694 unsigned int visit2 : 1;
1695 unsigned int marking : 1;
1696 unsigned int visit3 : 1;
1697 };
1698
1699 struct spu_elf_stack_info
1700 {
1701 int num_fun;
1702 int max_fun;
1703 /* Variable size array describing functions, one per contiguous
1704 address range belonging to a function. */
1705 struct function_info fun[1];
1706 };
1707
1708 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1709 entries for section SEC. */
1710
1711 static struct spu_elf_stack_info *
1712 alloc_stack_info (asection *sec, int max_fun)
1713 {
1714 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1715 bfd_size_type amt;
1716
1717 amt = sizeof (struct spu_elf_stack_info);
1718 amt += (max_fun - 1) * sizeof (struct function_info);
1719 sec_data->stack_info = bfd_zmalloc (amt);
1720 if (sec_data->stack_info != NULL)
1721 sec_data->stack_info->max_fun = max_fun;
1722 return sec_data->stack_info;
1723 }
1724
1725 /* Add a new struct function_info describing a (part of a) function
1726 starting at SYM_H. Keep the array sorted by address. */
1727
1728 static struct function_info *
1729 maybe_insert_function (asection *sec,
1730 void *sym_h,
1731 bfd_boolean global,
1732 bfd_boolean is_func)
1733 {
1734 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1735 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1736 int i;
1737 bfd_vma off, size;
1738
1739 if (sinfo == NULL)
1740 {
1741 sinfo = alloc_stack_info (sec, 20);
1742 if (sinfo == NULL)
1743 return NULL;
1744 }
1745
1746 if (!global)
1747 {
1748 Elf_Internal_Sym *sym = sym_h;
1749 off = sym->st_value;
1750 size = sym->st_size;
1751 }
1752 else
1753 {
1754 struct elf_link_hash_entry *h = sym_h;
1755 off = h->root.u.def.value;
1756 size = h->size;
1757 }
1758
1759 for (i = sinfo->num_fun; --i >= 0; )
1760 if (sinfo->fun[i].lo <= off)
1761 break;
1762
1763 if (i >= 0)
1764 {
1765 /* Don't add another entry for an alias, but do update some
1766 info. */
1767 if (sinfo->fun[i].lo == off)
1768 {
1769 /* Prefer globals over local syms. */
1770 if (global && !sinfo->fun[i].global)
1771 {
1772 sinfo->fun[i].global = TRUE;
1773 sinfo->fun[i].u.h = sym_h;
1774 }
1775 if (is_func)
1776 sinfo->fun[i].is_func = TRUE;
1777 return &sinfo->fun[i];
1778 }
1779 /* Ignore a zero-size symbol inside an existing function. */
1780 else if (sinfo->fun[i].hi > off && size == 0)
1781 return &sinfo->fun[i];
1782 }
1783
1784 if (++i < sinfo->num_fun)
1785 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1786 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1787 else if (i >= sinfo->max_fun)
1788 {
1789 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1790 bfd_size_type old = amt;
1791
1792 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1793 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1794 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1795 sinfo = bfd_realloc (sinfo, amt);
1796 if (sinfo == NULL)
1797 return NULL;
1798 memset ((char *) sinfo + old, 0, amt - old);
1799 sec_data->stack_info = sinfo;
1800 }
1801 sinfo->fun[i].is_func = is_func;
1802 sinfo->fun[i].global = global;
1803 sinfo->fun[i].sec = sec;
1804 if (global)
1805 sinfo->fun[i].u.h = sym_h;
1806 else
1807 sinfo->fun[i].u.sym = sym_h;
1808 sinfo->fun[i].lo = off;
1809 sinfo->fun[i].hi = off + size;
1810 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1811 sinfo->num_fun += 1;
1812 return &sinfo->fun[i];
1813 }
1814
1815 /* Return the name of FUN. */
1816
1817 static const char *
1818 func_name (struct function_info *fun)
1819 {
1820 asection *sec;
1821 bfd *ibfd;
1822 Elf_Internal_Shdr *symtab_hdr;
1823
1824 while (fun->start != NULL)
1825 fun = fun->start;
1826
1827 if (fun->global)
1828 return fun->u.h->root.root.string;
1829
1830 sec = fun->sec;
1831 if (fun->u.sym->st_name == 0)
1832 {
1833 size_t len = strlen (sec->name);
1834 char *name = bfd_malloc (len + 10);
1835 if (name == NULL)
1836 return "(null)";
1837 sprintf (name, "%s+%lx", sec->name,
1838 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1839 return name;
1840 }
1841 ibfd = sec->owner;
1842 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1843 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1844 }
1845
1846 /* Read the instruction at OFF in SEC. Return true iff the instruction
1847 is a nop, lnop, or stop 0 (all zero insn). */
1848
1849 static bfd_boolean
1850 is_nop (asection *sec, bfd_vma off)
1851 {
1852 unsigned char insn[4];
1853
1854 if (off + 4 > sec->size
1855 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1856 return FALSE;
1857 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1858 return TRUE;
1859 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1860 return TRUE;
1861 return FALSE;
1862 }
1863
1864 /* Extend the range of FUN to cover nop padding up to LIMIT.
1865 Return TRUE iff some instruction other than a NOP was found. */
1866
1867 static bfd_boolean
1868 insns_at_end (struct function_info *fun, bfd_vma limit)
1869 {
1870 bfd_vma off = (fun->hi + 3) & -4;
1871
1872 while (off < limit && is_nop (fun->sec, off))
1873 off += 4;
1874 if (off < limit)
1875 {
1876 fun->hi = off;
1877 return TRUE;
1878 }
1879 fun->hi = limit;
1880 return FALSE;
1881 }
1882
1883 /* Check and fix overlapping function ranges. Return TRUE iff there
1884 are gaps in the current info we have about functions in SEC. */
1885
1886 static bfd_boolean
1887 check_function_ranges (asection *sec, struct bfd_link_info *info)
1888 {
1889 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1890 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1891 int i;
1892 bfd_boolean gaps = FALSE;
1893
1894 if (sinfo == NULL)
1895 return FALSE;
1896
1897 for (i = 1; i < sinfo->num_fun; i++)
1898 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1899 {
1900 /* Fix overlapping symbols. */
1901 const char *f1 = func_name (&sinfo->fun[i - 1]);
1902 const char *f2 = func_name (&sinfo->fun[i]);
1903
1904 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1905 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1906 }
1907 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1908 gaps = TRUE;
1909
1910 if (sinfo->num_fun == 0)
1911 gaps = TRUE;
1912 else
1913 {
1914 if (sinfo->fun[0].lo != 0)
1915 gaps = TRUE;
1916 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1917 {
1918 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1919
1920 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1921 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1922 }
1923 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1924 gaps = TRUE;
1925 }
1926 return gaps;
1927 }
1928
1929 /* Search current function info for a function that contains address
1930 OFFSET in section SEC. */
1931
1932 static struct function_info *
1933 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1934 {
1935 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1936 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1937 int lo, hi, mid;
1938
1939 lo = 0;
1940 hi = sinfo->num_fun;
1941 while (lo < hi)
1942 {
1943 mid = (lo + hi) / 2;
1944 if (offset < sinfo->fun[mid].lo)
1945 hi = mid;
1946 else if (offset >= sinfo->fun[mid].hi)
1947 lo = mid + 1;
1948 else
1949 return &sinfo->fun[mid];
1950 }
1951 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1952 sec, offset);
1953 return NULL;
1954 }
1955
1956 /* Add CALLEE to CALLER call list if not already present. */
1957
1958 static bfd_boolean
1959 insert_callee (struct function_info *caller, struct call_info *callee)
1960 {
1961 struct call_info *p;
1962 for (p = caller->call_list; p != NULL; p = p->next)
1963 if (p->fun == callee->fun)
1964 {
1965 /* Tail calls use less stack than normal calls. Retain entry
1966 for normal call over one for tail call. */
1967 if (p->is_tail > callee->is_tail)
1968 p->is_tail = callee->is_tail;
1969 return FALSE;
1970 }
1971 callee->next = caller->call_list;
1972 caller->call_list = callee;
1973 return TRUE;
1974 }
1975
1976 /* Rummage through the relocs for SEC, looking for function calls.
1977 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1978 mark destination symbols on calls as being functions. Also
1979 look at branches, which may be tail calls or go to hot/cold
1980 section part of same function. */
1981
1982 static bfd_boolean
1983 mark_functions_via_relocs (asection *sec,
1984 struct bfd_link_info *info,
1985 int call_tree)
1986 {
1987 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1988 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1989 Elf_Internal_Sym *syms;
1990 void *psyms;
1991 static bfd_boolean warned;
1992
1993 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1994 info->keep_memory);
1995 if (internal_relocs == NULL)
1996 return FALSE;
1997
1998 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1999 psyms = &symtab_hdr->contents;
2000 syms = *(Elf_Internal_Sym **) psyms;
2001 irela = internal_relocs;
2002 irelaend = irela + sec->reloc_count;
2003 for (; irela < irelaend; irela++)
2004 {
2005 enum elf_spu_reloc_type r_type;
2006 unsigned int r_indx;
2007 asection *sym_sec;
2008 Elf_Internal_Sym *sym;
2009 struct elf_link_hash_entry *h;
2010 bfd_vma val;
2011 unsigned char insn[4];
2012 bfd_boolean is_call;
2013 struct function_info *caller;
2014 struct call_info *callee;
2015
2016 r_type = ELF32_R_TYPE (irela->r_info);
2017 if (r_type != R_SPU_REL16
2018 && r_type != R_SPU_ADDR16)
2019 continue;
2020
2021 r_indx = ELF32_R_SYM (irela->r_info);
2022 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2023 return FALSE;
2024
2025 if (sym_sec == NULL
2026 || sym_sec->output_section == NULL
2027 || sym_sec->output_section->owner != sec->output_section->owner)
2028 continue;
2029
2030 if (!bfd_get_section_contents (sec->owner, sec, insn,
2031 irela->r_offset, 4))
2032 return FALSE;
2033 if (!is_branch (insn))
2034 continue;
2035
2036 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2037 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2038 {
2039 if (!call_tree)
2040 warned = TRUE;
2041 if (!call_tree || !warned)
2042 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
2043 " %B(%A), stack analysis incomplete\n"),
2044 sec->owner, sec, irela->r_offset,
2045 sym_sec->owner, sym_sec);
2046 continue;
2047 }
2048
2049 is_call = (insn[0] & 0xfd) == 0x31;
2050
2051 if (h)
2052 val = h->root.u.def.value;
2053 else
2054 val = sym->st_value;
2055 val += irela->r_addend;
2056
2057 if (!call_tree)
2058 {
2059 struct function_info *fun;
2060
2061 if (irela->r_addend != 0)
2062 {
2063 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2064 if (fake == NULL)
2065 return FALSE;
2066 fake->st_value = val;
2067 fake->st_shndx
2068 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2069 sym = fake;
2070 }
2071 if (sym)
2072 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2073 else
2074 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2075 if (fun == NULL)
2076 return FALSE;
2077 if (irela->r_addend != 0
2078 && fun->u.sym != sym)
2079 free (sym);
2080 continue;
2081 }
2082
2083 caller = find_function (sec, irela->r_offset, info);
2084 if (caller == NULL)
2085 return FALSE;
2086 callee = bfd_malloc (sizeof *callee);
2087 if (callee == NULL)
2088 return FALSE;
2089
2090 callee->fun = find_function (sym_sec, val, info);
2091 if (callee->fun == NULL)
2092 return FALSE;
2093 callee->is_tail = !is_call;
2094 if (!insert_callee (caller, callee))
2095 free (callee);
2096 else if (!is_call
2097 && !callee->fun->is_func
2098 && callee->fun->stack == 0)
2099 {
2100 /* This is either a tail call or a branch from one part of
2101 the function to another, ie. hot/cold section. If the
2102 destination has been called by some other function then
2103 it is a separate function. We also assume that functions
2104 are not split across input files. */
2105 if (callee->fun->start != NULL
2106 || sec->owner != sym_sec->owner)
2107 {
2108 callee->fun->start = NULL;
2109 callee->fun->is_func = TRUE;
2110 }
2111 else
2112 callee->fun->start = caller;
2113 }
2114 }
2115
2116 return TRUE;
2117 }
2118
2119 /* Handle something like .init or .fini, which has a piece of a function.
2120 These sections are pasted together to form a single function. */
2121
2122 static bfd_boolean
2123 pasted_function (asection *sec, struct bfd_link_info *info)
2124 {
2125 struct bfd_link_order *l;
2126 struct _spu_elf_section_data *sec_data;
2127 struct spu_elf_stack_info *sinfo;
2128 Elf_Internal_Sym *fake;
2129 struct function_info *fun, *fun_start;
2130
2131 fake = bfd_zmalloc (sizeof (*fake));
2132 if (fake == NULL)
2133 return FALSE;
2134 fake->st_value = 0;
2135 fake->st_size = sec->size;
2136 fake->st_shndx
2137 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2138 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2139 if (!fun)
2140 return FALSE;
2141
2142 /* Find a function immediately preceding this section. */
2143 fun_start = NULL;
2144 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2145 {
2146 if (l->u.indirect.section == sec)
2147 {
2148 if (fun_start != NULL)
2149 {
2150 if (fun_start->start)
2151 fun_start = fun_start->start;
2152 fun->start = fun_start;
2153 }
2154 return TRUE;
2155 }
2156 if (l->type == bfd_indirect_link_order
2157 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2158 && (sinfo = sec_data->stack_info) != NULL
2159 && sinfo->num_fun != 0)
2160 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2161 }
2162
2163 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2164 return FALSE;
2165 }
2166
2167 /* We're only interested in code sections. */
2168
2169 static bfd_boolean
2170 interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2171 {
2172 return (s != htab->stub
2173 && s->output_section != NULL
2174 && s->output_section->owner == obfd
2175 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2176 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2177 && s->size != 0);
2178 }
2179
2180 /* Map address ranges in code sections to functions. */
2181
2182 static bfd_boolean
2183 discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2184 {
2185 struct spu_link_hash_table *htab = spu_hash_table (info);
2186 bfd *ibfd;
2187 int bfd_idx;
2188 Elf_Internal_Sym ***psym_arr;
2189 asection ***sec_arr;
2190 bfd_boolean gaps = FALSE;
2191
2192 bfd_idx = 0;
2193 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2194 bfd_idx++;
2195
2196 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2197 if (psym_arr == NULL)
2198 return FALSE;
2199 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2200 if (sec_arr == NULL)
2201 return FALSE;
2202
2203
2204 for (ibfd = info->input_bfds, bfd_idx = 0;
2205 ibfd != NULL;
2206 ibfd = ibfd->link_next, bfd_idx++)
2207 {
2208 extern const bfd_target bfd_elf32_spu_vec;
2209 Elf_Internal_Shdr *symtab_hdr;
2210 asection *sec;
2211 size_t symcount;
2212 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2213 asection **psecs, **p;
2214
2215 if (ibfd->xvec != &bfd_elf32_spu_vec)
2216 continue;
2217
2218 /* Read all the symbols. */
2219 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2220 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2221 if (symcount == 0)
2222 continue;
2223
2224 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2225 if (syms == NULL)
2226 {
2227 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2228 NULL, NULL, NULL);
2229 symtab_hdr->contents = (void *) syms;
2230 if (syms == NULL)
2231 return FALSE;
2232 }
2233
2234 /* Select defined function symbols that are going to be output. */
2235 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2236 if (psyms == NULL)
2237 return FALSE;
2238 psym_arr[bfd_idx] = psyms;
2239 psecs = bfd_malloc (symcount * sizeof (*psecs));
2240 if (psecs == NULL)
2241 return FALSE;
2242 sec_arr[bfd_idx] = psecs;
2243 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2244 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2245 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2246 {
2247 asection *s;
2248
2249 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2250 if (s != NULL && interesting_section (s, output_bfd, htab))
2251 *psy++ = sy;
2252 }
2253 symcount = psy - psyms;
2254 *psy = NULL;
2255
2256 /* Sort them by section and offset within section. */
2257 sort_syms_syms = syms;
2258 sort_syms_psecs = psecs;
2259 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2260
2261 /* Now inspect the function symbols. */
2262 for (psy = psyms; psy < psyms + symcount; )
2263 {
2264 asection *s = psecs[*psy - syms];
2265 Elf_Internal_Sym **psy2;
2266
2267 for (psy2 = psy; ++psy2 < psyms + symcount; )
2268 if (psecs[*psy2 - syms] != s)
2269 break;
2270
2271 if (!alloc_stack_info (s, psy2 - psy))
2272 return FALSE;
2273 psy = psy2;
2274 }
2275
2276 /* First install info about properly typed and sized functions.
2277 In an ideal world this will cover all code sections, except
2278 when partitioning functions into hot and cold sections,
2279 and the horrible pasted together .init and .fini functions. */
2280 for (psy = psyms; psy < psyms + symcount; ++psy)
2281 {
2282 sy = *psy;
2283 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2284 {
2285 asection *s = psecs[sy - syms];
2286 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2287 return FALSE;
2288 }
2289 }
2290
2291 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2292 if (interesting_section (sec, output_bfd, htab))
2293 gaps |= check_function_ranges (sec, info);
2294 }
2295
2296 if (gaps)
2297 {
2298 /* See if we can discover more function symbols by looking at
2299 relocations. */
2300 for (ibfd = info->input_bfds, bfd_idx = 0;
2301 ibfd != NULL;
2302 ibfd = ibfd->link_next, bfd_idx++)
2303 {
2304 asection *sec;
2305
2306 if (psym_arr[bfd_idx] == NULL)
2307 continue;
2308
2309 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2310 if (interesting_section (sec, output_bfd, htab)
2311 && sec->reloc_count != 0)
2312 {
2313 if (!mark_functions_via_relocs (sec, info, FALSE))
2314 return FALSE;
2315 }
2316 }
2317
2318 for (ibfd = info->input_bfds, bfd_idx = 0;
2319 ibfd != NULL;
2320 ibfd = ibfd->link_next, bfd_idx++)
2321 {
2322 Elf_Internal_Shdr *symtab_hdr;
2323 asection *sec;
2324 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2325 asection **psecs;
2326
2327 if ((psyms = psym_arr[bfd_idx]) == NULL)
2328 continue;
2329
2330 psecs = sec_arr[bfd_idx];
2331
2332 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2333 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2334
2335 gaps = FALSE;
2336 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2337 if (interesting_section (sec, output_bfd, htab))
2338 gaps |= check_function_ranges (sec, info);
2339 if (!gaps)
2340 continue;
2341
2342 /* Finally, install all globals. */
2343 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2344 {
2345 asection *s;
2346
2347 s = psecs[sy - syms];
2348
2349 /* Global syms might be improperly typed functions. */
2350 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2351 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2352 {
2353 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2354 return FALSE;
2355 }
2356 }
2357
2358 /* Some of the symbols we've installed as marking the
2359 beginning of functions may have a size of zero. Extend
2360 the range of such functions to the beginning of the
2361 next symbol of interest. */
2362 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2363 if (interesting_section (sec, output_bfd, htab))
2364 {
2365 struct _spu_elf_section_data *sec_data;
2366 struct spu_elf_stack_info *sinfo;
2367
2368 sec_data = spu_elf_section_data (sec);
2369 sinfo = sec_data->stack_info;
2370 if (sinfo != NULL)
2371 {
2372 int fun_idx;
2373 bfd_vma hi = sec->size;
2374
2375 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2376 {
2377 sinfo->fun[fun_idx].hi = hi;
2378 hi = sinfo->fun[fun_idx].lo;
2379 }
2380 }
2381 /* No symbols in this section. Must be .init or .fini
2382 or something similar. */
2383 else if (!pasted_function (sec, info))
2384 return FALSE;
2385 }
2386 }
2387 }
2388
2389 for (ibfd = info->input_bfds, bfd_idx = 0;
2390 ibfd != NULL;
2391 ibfd = ibfd->link_next, bfd_idx++)
2392 {
2393 if (psym_arr[bfd_idx] == NULL)
2394 continue;
2395
2396 free (psym_arr[bfd_idx]);
2397 free (sec_arr[bfd_idx]);
2398 }
2399
2400 free (psym_arr);
2401 free (sec_arr);
2402
2403 return TRUE;
2404 }
2405
2406 /* Mark nodes in the call graph that are called by some other node. */
2407
2408 static void
2409 mark_non_root (struct function_info *fun)
2410 {
2411 struct call_info *call;
2412
2413 fun->visit1 = TRUE;
2414 for (call = fun->call_list; call; call = call->next)
2415 {
2416 call->fun->non_root = TRUE;
2417 if (!call->fun->visit1)
2418 mark_non_root (call->fun);
2419 }
2420 }
2421
2422 /* Remove cycles from the call graph. */
2423
2424 static void
2425 call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2426 {
2427 struct call_info **callp, *call;
2428
2429 fun->visit2 = TRUE;
2430 fun->marking = TRUE;
2431
2432 callp = &fun->call_list;
2433 while ((call = *callp) != NULL)
2434 {
2435 if (!call->fun->visit2)
2436 call_graph_traverse (call->fun, info);
2437 else if (call->fun->marking)
2438 {
2439 const char *f1 = func_name (fun);
2440 const char *f2 = func_name (call->fun);
2441
2442 info->callbacks->info (_("Stack analysis will ignore the call "
2443 "from %s to %s\n"),
2444 f1, f2);
2445 *callp = call->next;
2446 continue;
2447 }
2448 callp = &call->next;
2449 }
2450 fun->marking = FALSE;
2451 }
2452
2453 /* Populate call_list for each function. */
2454
2455 static bfd_boolean
2456 build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2457 {
2458 struct spu_link_hash_table *htab = spu_hash_table (info);
2459 bfd *ibfd;
2460
2461 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2462 {
2463 extern const bfd_target bfd_elf32_spu_vec;
2464 asection *sec;
2465
2466 if (ibfd->xvec != &bfd_elf32_spu_vec)
2467 continue;
2468
2469 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2470 {
2471 if (!interesting_section (sec, output_bfd, htab)
2472 || sec->reloc_count == 0)
2473 continue;
2474
2475 if (!mark_functions_via_relocs (sec, info, TRUE))
2476 return FALSE;
2477 }
2478
2479 /* Transfer call info from hot/cold section part of function
2480 to main entry. */
2481 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2482 {
2483 struct _spu_elf_section_data *sec_data;
2484 struct spu_elf_stack_info *sinfo;
2485
2486 if ((sec_data = spu_elf_section_data (sec)) != NULL
2487 && (sinfo = sec_data->stack_info) != NULL)
2488 {
2489 int i;
2490 for (i = 0; i < sinfo->num_fun; ++i)
2491 {
2492 if (sinfo->fun[i].start != NULL)
2493 {
2494 struct call_info *call = sinfo->fun[i].call_list;
2495
2496 while (call != NULL)
2497 {
2498 struct call_info *call_next = call->next;
2499 if (!insert_callee (sinfo->fun[i].start, call))
2500 free (call);
2501 call = call_next;
2502 }
2503 sinfo->fun[i].call_list = NULL;
2504 sinfo->fun[i].non_root = TRUE;
2505 }
2506 }
2507 }
2508 }
2509 }
2510
2511 /* Find the call graph root(s). */
2512 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2513 {
2514 extern const bfd_target bfd_elf32_spu_vec;
2515 asection *sec;
2516
2517 if (ibfd->xvec != &bfd_elf32_spu_vec)
2518 continue;
2519
2520 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2521 {
2522 struct _spu_elf_section_data *sec_data;
2523 struct spu_elf_stack_info *sinfo;
2524
2525 if ((sec_data = spu_elf_section_data (sec)) != NULL
2526 && (sinfo = sec_data->stack_info) != NULL)
2527 {
2528 int i;
2529 for (i = 0; i < sinfo->num_fun; ++i)
2530 if (!sinfo->fun[i].visit1)
2531 mark_non_root (&sinfo->fun[i]);
2532 }
2533 }
2534 }
2535
2536 /* Remove cycles from the call graph. We start from the root node(s)
2537 so that we break cycles in a reasonable place. */
2538 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2539 {
2540 extern const bfd_target bfd_elf32_spu_vec;
2541 asection *sec;
2542
2543 if (ibfd->xvec != &bfd_elf32_spu_vec)
2544 continue;
2545
2546 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2547 {
2548 struct _spu_elf_section_data *sec_data;
2549 struct spu_elf_stack_info *sinfo;
2550
2551 if ((sec_data = spu_elf_section_data (sec)) != NULL
2552 && (sinfo = sec_data->stack_info) != NULL)
2553 {
2554 int i;
2555 for (i = 0; i < sinfo->num_fun; ++i)
2556 if (!sinfo->fun[i].non_root)
2557 call_graph_traverse (&sinfo->fun[i], info);
2558 }
2559 }
2560 }
2561
2562 return TRUE;
2563 }
2564
2565 /* Descend the call graph for FUN, accumulating total stack required. */
2566
2567 static bfd_vma
2568 sum_stack (struct function_info *fun,
2569 struct bfd_link_info *info,
2570 int emit_stack_syms)
2571 {
2572 struct call_info *call;
2573 struct function_info *max = NULL;
2574 bfd_vma max_stack = fun->stack;
2575 bfd_vma stack;
2576 const char *f1;
2577
2578 if (fun->visit3)
2579 return max_stack;
2580
2581 for (call = fun->call_list; call; call = call->next)
2582 {
2583 stack = sum_stack (call->fun, info, emit_stack_syms);
2584 /* Include caller stack for normal calls, don't do so for
2585 tail calls. fun->stack here is local stack usage for
2586 this function. */
2587 if (!call->is_tail)
2588 stack += fun->stack;
2589 if (max_stack < stack)
2590 {
2591 max_stack = stack;
2592 max = call->fun;
2593 }
2594 }
2595
2596 f1 = func_name (fun);
2597 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
2598 f1, (bfd_vma) fun->stack, max_stack);
2599
2600 if (fun->call_list)
2601 {
2602 info->callbacks->minfo (_(" calls:\n"));
2603 for (call = fun->call_list; call; call = call->next)
2604 {
2605 const char *f2 = func_name (call->fun);
2606 const char *ann1 = call->fun == max ? "*" : " ";
2607 const char *ann2 = call->is_tail ? "t" : " ";
2608
2609 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2610 }
2611 }
2612
2613 /* Now fun->stack holds cumulative stack. */
2614 fun->stack = max_stack;
2615 fun->visit3 = TRUE;
2616
2617 if (emit_stack_syms)
2618 {
2619 struct spu_link_hash_table *htab = spu_hash_table (info);
2620 char *name = bfd_malloc (18 + strlen (f1));
2621 struct elf_link_hash_entry *h;
2622
2623 if (name != NULL)
2624 {
2625 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2626 sprintf (name, "__stack_%s", f1);
2627 else
2628 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2629
2630 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2631 free (name);
2632 if (h != NULL
2633 && (h->root.type == bfd_link_hash_new
2634 || h->root.type == bfd_link_hash_undefined
2635 || h->root.type == bfd_link_hash_undefweak))
2636 {
2637 h->root.type = bfd_link_hash_defined;
2638 h->root.u.def.section = bfd_abs_section_ptr;
2639 h->root.u.def.value = max_stack;
2640 h->size = 0;
2641 h->type = 0;
2642 h->ref_regular = 1;
2643 h->def_regular = 1;
2644 h->ref_regular_nonweak = 1;
2645 h->forced_local = 1;
2646 h->non_elf = 0;
2647 }
2648 }
2649 }
2650
2651 return max_stack;
2652 }
2653
2654 /* Provide an estimate of total stack required. */
2655
2656 static bfd_boolean
2657 spu_elf_stack_analysis (bfd *output_bfd,
2658 struct bfd_link_info *info,
2659 int emit_stack_syms)
2660 {
2661 bfd *ibfd;
2662 bfd_vma max_stack = 0;
2663
2664 if (!discover_functions (output_bfd, info))
2665 return FALSE;
2666
2667 if (!build_call_tree (output_bfd, info))
2668 return FALSE;
2669
2670 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2671 info->callbacks->minfo (_("\nStack size for functions. "
2672 "Annotations: '*' max stack, 't' tail call\n"));
2673 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2674 {
2675 extern const bfd_target bfd_elf32_spu_vec;
2676 asection *sec;
2677
2678 if (ibfd->xvec != &bfd_elf32_spu_vec)
2679 continue;
2680
2681 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2682 {
2683 struct _spu_elf_section_data *sec_data;
2684 struct spu_elf_stack_info *sinfo;
2685
2686 if ((sec_data = spu_elf_section_data (sec)) != NULL
2687 && (sinfo = sec_data->stack_info) != NULL)
2688 {
2689 int i;
2690 for (i = 0; i < sinfo->num_fun; ++i)
2691 {
2692 if (!sinfo->fun[i].non_root)
2693 {
2694 bfd_vma stack;
2695 const char *f1;
2696
2697 stack = sum_stack (&sinfo->fun[i], info,
2698 emit_stack_syms);
2699 f1 = func_name (&sinfo->fun[i]);
2700 info->callbacks->info (_(" %s: 0x%v\n"),
2701 f1, stack);
2702 if (max_stack < stack)
2703 max_stack = stack;
2704 }
2705 }
2706 }
2707 }
2708 }
2709
2710 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2711 return TRUE;
2712 }
2713
2714 /* Perform a final link. */
2715
2716 static bfd_boolean
2717 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2718 {
2719 struct spu_link_hash_table *htab = spu_hash_table (info);
2720
2721 if (htab->stack_analysis
2722 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2723 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2724
2725 return bfd_elf_final_link (output_bfd, info);
2726 }
2727
2728 /* Called when not normally emitting relocs, ie. !info->relocatable
2729 and !info->emitrelocations. Returns a count of special relocs
2730 that need to be emitted. */
2731
2732 static unsigned int
2733 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
2734 {
2735 unsigned int count = 0;
2736 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
2737
2738 for (; relocs < relend; relocs++)
2739 {
2740 int r_type = ELF32_R_TYPE (relocs->r_info);
2741 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2742 ++count;
2743 }
2744
2745 return count;
2746 }
2747
2748 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2749
2750 static bfd_boolean
2751 spu_elf_relocate_section (bfd *output_bfd,
2752 struct bfd_link_info *info,
2753 bfd *input_bfd,
2754 asection *input_section,
2755 bfd_byte *contents,
2756 Elf_Internal_Rela *relocs,
2757 Elf_Internal_Sym *local_syms,
2758 asection **local_sections)
2759 {
2760 Elf_Internal_Shdr *symtab_hdr;
2761 struct elf_link_hash_entry **sym_hashes;
2762 Elf_Internal_Rela *rel, *relend;
2763 struct spu_link_hash_table *htab;
2764 bfd_boolean ret = TRUE;
2765 bfd_boolean emit_these_relocs = FALSE;
2766
2767 htab = spu_hash_table (info);
2768 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2769 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2770
2771 rel = relocs;
2772 relend = relocs + input_section->reloc_count;
2773 for (; rel < relend; rel++)
2774 {
2775 int r_type;
2776 reloc_howto_type *howto;
2777 unsigned long r_symndx;
2778 Elf_Internal_Sym *sym;
2779 asection *sec;
2780 struct elf_link_hash_entry *h;
2781 const char *sym_name;
2782 bfd_vma relocation;
2783 bfd_vma addend;
2784 bfd_reloc_status_type r;
2785 bfd_boolean unresolved_reloc;
2786 bfd_boolean warned;
2787 bfd_boolean branch;
2788
2789 r_symndx = ELF32_R_SYM (rel->r_info);
2790 r_type = ELF32_R_TYPE (rel->r_info);
2791 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2792 {
2793 emit_these_relocs = TRUE;
2794 continue;
2795 }
2796
2797 howto = elf_howto_table + r_type;
2798 unresolved_reloc = FALSE;
2799 warned = FALSE;
2800 h = NULL;
2801 sym = NULL;
2802 sec = NULL;
2803 if (r_symndx < symtab_hdr->sh_info)
2804 {
2805 sym = local_syms + r_symndx;
2806 sec = local_sections[r_symndx];
2807 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2808 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2809 }
2810 else
2811 {
2812 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2813 r_symndx, symtab_hdr, sym_hashes,
2814 h, sec, relocation,
2815 unresolved_reloc, warned);
2816 sym_name = h->root.root.string;
2817 }
2818
2819 if (sec != NULL && elf_discarded_section (sec))
2820 {
2821 /* For relocs against symbols from removed linkonce sections,
2822 or sections discarded by a linker script, we just want the
2823 section contents zeroed. Avoid any special processing. */
2824 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2825 rel->r_info = 0;
2826 rel->r_addend = 0;
2827 continue;
2828 }
2829
2830 if (info->relocatable)
2831 continue;
2832
2833 if (unresolved_reloc)
2834 {
2835 (*_bfd_error_handler)
2836 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2837 input_bfd,
2838 bfd_get_section_name (input_bfd, input_section),
2839 (long) rel->r_offset,
2840 howto->name,
2841 sym_name);
2842 ret = FALSE;
2843 }
2844
2845 /* If this symbol is in an overlay area, we may need to relocate
2846 to the overlay stub. */
2847 addend = rel->r_addend;
2848 branch = (is_branch (contents + rel->r_offset)
2849 || is_hint (contents + rel->r_offset));
2850 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
2851 {
2852 char *stub_name;
2853 struct spu_stub_hash_entry *sh;
2854
2855 stub_name = spu_stub_name (sec, h, rel);
2856 if (stub_name == NULL)
2857 return FALSE;
2858
2859 sh = (struct spu_stub_hash_entry *)
2860 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2861 if (sh != NULL)
2862 {
2863 relocation = (htab->stub->output_section->vma
2864 + htab->stub->output_offset
2865 + sh->off);
2866 addend = 0;
2867 }
2868 free (stub_name);
2869 }
2870
2871 r = _bfd_final_link_relocate (howto,
2872 input_bfd,
2873 input_section,
2874 contents,
2875 rel->r_offset, relocation, addend);
2876
2877 if (r != bfd_reloc_ok)
2878 {
2879 const char *msg = (const char *) 0;
2880
2881 switch (r)
2882 {
2883 case bfd_reloc_overflow:
2884 if (!((*info->callbacks->reloc_overflow)
2885 (info, (h ? &h->root : NULL), sym_name, howto->name,
2886 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2887 return FALSE;
2888 break;
2889
2890 case bfd_reloc_undefined:
2891 if (!((*info->callbacks->undefined_symbol)
2892 (info, sym_name, input_bfd, input_section,
2893 rel->r_offset, TRUE)))
2894 return FALSE;
2895 break;
2896
2897 case bfd_reloc_outofrange:
2898 msg = _("internal error: out of range error");
2899 goto common_error;
2900
2901 case bfd_reloc_notsupported:
2902 msg = _("internal error: unsupported relocation error");
2903 goto common_error;
2904
2905 case bfd_reloc_dangerous:
2906 msg = _("internal error: dangerous error");
2907 goto common_error;
2908
2909 default:
2910 msg = _("internal error: unknown error");
2911 /* fall through */
2912
2913 common_error:
2914 if (!((*info->callbacks->warning)
2915 (info, msg, sym_name, input_bfd, input_section,
2916 rel->r_offset)))
2917 return FALSE;
2918 break;
2919 }
2920 }
2921 }
2922
2923 if (ret
2924 && emit_these_relocs
2925 && !info->relocatable
2926 && !info->emitrelocations)
2927 {
2928 Elf_Internal_Rela *wrel;
2929 Elf_Internal_Shdr *rel_hdr;
2930
2931 wrel = rel = relocs;
2932 relend = relocs + input_section->reloc_count;
2933 for (; rel < relend; rel++)
2934 {
2935 int r_type;
2936
2937 r_type = ELF32_R_TYPE (rel->r_info);
2938 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
2939 *wrel++ = *rel;
2940 }
2941 input_section->reloc_count = wrel - relocs;
2942 /* Backflips for _bfd_elf_link_output_relocs. */
2943 rel_hdr = &elf_section_data (input_section)->rel_hdr;
2944 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
2945 ret = 2;
2946 }
2947
2948 return ret;
2949 }
2950
2951 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2952
2953 static bfd_boolean
2954 spu_elf_output_symbol_hook (struct bfd_link_info *info,
2955 const char *sym_name ATTRIBUTE_UNUSED,
2956 Elf_Internal_Sym *sym,
2957 asection *sym_sec ATTRIBUTE_UNUSED,
2958 struct elf_link_hash_entry *h)
2959 {
2960 struct spu_link_hash_table *htab = spu_hash_table (info);
2961
2962 if (!info->relocatable
2963 && htab->num_overlays != 0
2964 && h != NULL
2965 && (h->root.type == bfd_link_hash_defined
2966 || h->root.type == bfd_link_hash_defweak)
2967 && h->def_regular
2968 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2969 {
2970 static Elf_Internal_Rela zero_rel;
2971 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2972 struct spu_stub_hash_entry *sh;
2973
2974 if (stub_name == NULL)
2975 return FALSE;
2976 sh = (struct spu_stub_hash_entry *)
2977 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2978 free (stub_name);
2979 if (sh == NULL)
2980 return TRUE;
2981 sym->st_shndx
2982 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2983 htab->stub->output_section);
2984 sym->st_value = (htab->stub->output_section->vma
2985 + htab->stub->output_offset
2986 + sh->off);
2987 }
2988
2989 return TRUE;
2990 }
2991
2992 static int spu_plugin = 0;
2993
2994 void
2995 spu_elf_plugin (int val)
2996 {
2997 spu_plugin = val;
2998 }
2999
3000 /* Set ELF header e_type for plugins. */
3001
3002 static void
3003 spu_elf_post_process_headers (bfd *abfd,
3004 struct bfd_link_info *info ATTRIBUTE_UNUSED)
3005 {
3006 if (spu_plugin)
3007 {
3008 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
3009
3010 i_ehdrp->e_type = ET_DYN;
3011 }
3012 }
3013
3014 /* We may add an extra PT_LOAD segment for .toe. We also need extra
3015 segments for overlays. */
3016
3017 static int
3018 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
3019 {
3020 struct spu_link_hash_table *htab = spu_hash_table (info);
3021 int extra = htab->num_overlays;
3022 asection *sec;
3023
3024 if (extra)
3025 ++extra;
3026
3027 sec = bfd_get_section_by_name (abfd, ".toe");
3028 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
3029 ++extra;
3030
3031 return extra;
3032 }
3033
3034 /* Remove .toe section from other PT_LOAD segments and put it in
3035 a segment of its own. Put overlays in separate segments too. */
3036
3037 static bfd_boolean
3038 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
3039 {
3040 asection *toe, *s;
3041 struct elf_segment_map *m;
3042 unsigned int i;
3043
3044 if (info == NULL)
3045 return TRUE;
3046
3047 toe = bfd_get_section_by_name (abfd, ".toe");
3048 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3049 if (m->p_type == PT_LOAD && m->count > 1)
3050 for (i = 0; i < m->count; i++)
3051 if ((s = m->sections[i]) == toe
3052 || spu_elf_section_data (s)->ovl_index != 0)
3053 {
3054 struct elf_segment_map *m2;
3055 bfd_vma amt;
3056
3057 if (i + 1 < m->count)
3058 {
3059 amt = sizeof (struct elf_segment_map);
3060 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
3061 m2 = bfd_zalloc (abfd, amt);
3062 if (m2 == NULL)
3063 return FALSE;
3064 m2->count = m->count - (i + 1);
3065 memcpy (m2->sections, m->sections + i + 1,
3066 m2->count * sizeof (m->sections[0]));
3067 m2->p_type = PT_LOAD;
3068 m2->next = m->next;
3069 m->next = m2;
3070 }
3071 m->count = 1;
3072 if (i != 0)
3073 {
3074 m->count = i;
3075 amt = sizeof (struct elf_segment_map);
3076 m2 = bfd_zalloc (abfd, amt);
3077 if (m2 == NULL)
3078 return FALSE;
3079 m2->p_type = PT_LOAD;
3080 m2->count = 1;
3081 m2->sections[0] = s;
3082 m2->next = m->next;
3083 m->next = m2;
3084 }
3085 break;
3086 }
3087
3088 return TRUE;
3089 }
3090
3091 /* Check that all loadable section VMAs lie in the range
3092 LO .. HI inclusive. */
3093
3094 asection *
3095 spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
3096 {
3097 struct elf_segment_map *m;
3098 unsigned int i;
3099
3100 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
3101 if (m->p_type == PT_LOAD)
3102 for (i = 0; i < m->count; i++)
3103 if (m->sections[i]->size != 0
3104 && (m->sections[i]->vma < lo
3105 || m->sections[i]->vma > hi
3106 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
3107 return m->sections[i];
3108
3109 return NULL;
3110 }
3111
3112 /* Tweak the section type of .note.spu_name. */
3113
3114 static bfd_boolean
3115 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
3116 Elf_Internal_Shdr *hdr,
3117 asection *sec)
3118 {
3119 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3120 hdr->sh_type = SHT_NOTE;
3121 return TRUE;
3122 }
3123
3124 /* Tweak phdrs before writing them out. */
3125
3126 static int
3127 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
3128 {
3129 const struct elf_backend_data *bed;
3130 struct elf_obj_tdata *tdata;
3131 Elf_Internal_Phdr *phdr, *last;
3132 struct spu_link_hash_table *htab;
3133 unsigned int count;
3134 unsigned int i;
3135
3136 if (info == NULL)
3137 return TRUE;
3138
3139 bed = get_elf_backend_data (abfd);
3140 tdata = elf_tdata (abfd);
3141 phdr = tdata->phdr;
3142 count = tdata->program_header_size / bed->s->sizeof_phdr;
3143 htab = spu_hash_table (info);
3144 if (htab->num_overlays != 0)
3145 {
3146 struct elf_segment_map *m;
3147 unsigned int o;
3148
3149 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3150 if (m->count != 0
3151 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3152 {
3153 /* Mark this as an overlay header. */
3154 phdr[i].p_flags |= PF_OVERLAY;
3155
3156 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3157 {
3158 bfd_byte *p = htab->ovtab->contents;
3159 unsigned int off = (o - 1) * 16 + 8;
3160
3161 /* Write file_off into _ovly_table. */
3162 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3163 }
3164 }
3165 }
3166
3167 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3168 of 16. This should always be possible when using the standard
3169 linker scripts, but don't create overlapping segments if
3170 someone is playing games with linker scripts. */
3171 last = NULL;
3172 for (i = count; i-- != 0; )
3173 if (phdr[i].p_type == PT_LOAD)
3174 {
3175 unsigned adjust;
3176
3177 adjust = -phdr[i].p_filesz & 15;
3178 if (adjust != 0
3179 && last != NULL
3180 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3181 break;
3182
3183 adjust = -phdr[i].p_memsz & 15;
3184 if (adjust != 0
3185 && last != NULL
3186 && phdr[i].p_filesz != 0
3187 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3188 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3189 break;
3190
3191 if (phdr[i].p_filesz != 0)
3192 last = &phdr[i];
3193 }
3194
3195 if (i == (unsigned int) -1)
3196 for (i = count; i-- != 0; )
3197 if (phdr[i].p_type == PT_LOAD)
3198 {
3199 unsigned adjust;
3200
3201 adjust = -phdr[i].p_filesz & 15;
3202 phdr[i].p_filesz += adjust;
3203
3204 adjust = -phdr[i].p_memsz & 15;
3205 phdr[i].p_memsz += adjust;
3206 }
3207
3208 return TRUE;
3209 }
3210
3211 #define TARGET_BIG_SYM bfd_elf32_spu_vec
3212 #define TARGET_BIG_NAME "elf32-spu"
3213 #define ELF_ARCH bfd_arch_spu
3214 #define ELF_MACHINE_CODE EM_SPU
3215 /* This matches the alignment need for DMA. */
3216 #define ELF_MAXPAGESIZE 0x80
3217 #define elf_backend_rela_normal 1
3218 #define elf_backend_can_gc_sections 1
3219
3220 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
3221 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
3222 #define elf_info_to_howto spu_elf_info_to_howto
3223 #define elf_backend_count_relocs spu_elf_count_relocs
3224 #define elf_backend_relocate_section spu_elf_relocate_section
3225 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
3226 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
3227 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
3228 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3229 #define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3230
3231 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
3232 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
3233 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
3234 #define elf_backend_post_process_headers spu_elf_post_process_headers
3235 #define elf_backend_fake_sections spu_elf_fake_sections
3236 #define elf_backend_special_sections spu_elf_special_sections
3237 #define bfd_elf32_bfd_final_link spu_elf_final_link
3238
3239 #include "elf32-target.h"
This page took 0.094404 seconds and 5 git commands to generate.