e11e1d7663f0412d11afba72b5d18ee994366e1c
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* We use RELA style relocs. Don't define USE_REL. */
31
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 };
92
93 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96 { NULL, 0, 0, 0, 0 }
97 };
98
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101 {
102 switch (code)
103 {
104 default:
105 return R_SPU_NONE;
106 case BFD_RELOC_SPU_IMM10W:
107 return R_SPU_ADDR10;
108 case BFD_RELOC_SPU_IMM16W:
109 return R_SPU_ADDR16;
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
115 return R_SPU_ADDR18;
116 case BFD_RELOC_SPU_PCREL16:
117 return R_SPU_REL16;
118 case BFD_RELOC_SPU_IMM7:
119 return R_SPU_ADDR7;
120 case BFD_RELOC_SPU_IMM8:
121 return R_SPU_NONE;
122 case BFD_RELOC_SPU_PCREL9a:
123 return R_SPU_REL9;
124 case BFD_RELOC_SPU_PCREL9b:
125 return R_SPU_REL9I;
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
130 case BFD_RELOC_32:
131 return R_SPU_ADDR32;
132 case BFD_RELOC_32_PCREL:
133 return R_SPU_REL32;
134 case BFD_RELOC_SPU_PPU32:
135 return R_SPU_PPU32;
136 case BFD_RELOC_SPU_PPU64:
137 return R_SPU_PPU64;
138 }
139 }
140
141 static void
142 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143 arelent *cache_ptr,
144 Elf_Internal_Rela *dst)
145 {
146 enum elf_spu_reloc_type r_type;
147
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 }
152
153 static reloc_howto_type *
154 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
156 {
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
158
159 if (r_type == R_SPU_NONE)
160 return NULL;
161
162 return elf_howto_table + r_type;
163 }
164
165 static reloc_howto_type *
166 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 const char *r_name)
168 {
169 unsigned int i;
170
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
175
176 return NULL;
177 }
178
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
180
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
185 {
186 bfd_size_type octets;
187 bfd_vma val;
188 long insn;
189
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
192 link time. */
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
196
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
200
201 /* Get symbol value. */
202 val = 0;
203 if (!bfd_is_com_section (symbol->section))
204 val = symbol->value;
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
207
208 val += reloc_entry->addend;
209
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
212
213 val >>= 2;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
216
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
218
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225 return bfd_reloc_ok;
226 }
227
228 static bfd_boolean
229 spu_elf_new_section_hook (bfd *abfd, asection *sec)
230 {
231 if (!sec->used_by_bfd)
232 {
233 struct _spu_elf_section_data *sdata;
234
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 if (sdata == NULL)
237 return FALSE;
238 sec->used_by_bfd = sdata;
239 }
240
241 return _bfd_elf_new_section_hook (abfd, sec);
242 }
243
244 /* Set up overlay info for executables. */
245
246 static bfd_boolean
247 spu_elf_object_p (bfd *abfd)
248 {
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
250 {
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
255
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
258 {
259 unsigned int j;
260
261 ++num_ovl;
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264 ++num_buf;
265 last_phdr = phdr;
266 for (j = 1; j < elf_numsections (abfd); j++)
267 {
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
269
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
271 {
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
275 }
276 }
277 }
278 }
279 return TRUE;
280 }
281
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
284
285 static void
286 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
287 {
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
292 }
293
294 /* SPU ELF linker hash table. */
295
296 struct spu_link_hash_table
297 {
298 struct elf_link_hash_table elf;
299
300 /* Shortcuts to overlay sections. */
301 asection *ovtab;
302 asection *toe;
303 asection **ovl_sec;
304
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count;
307
308 /* The stub section for each overlay section. */
309 asection **stub_sec;
310
311 struct elf_link_hash_entry *ovly_load;
312 struct elf_link_hash_entry *ovly_return;
313 unsigned long ovly_load_r_symndx;
314
315 /* Number of overlay buffers. */
316 unsigned int num_buf;
317
318 /* Total number of overlays. */
319 unsigned int num_overlays;
320
321 /* How much memory we have. */
322 unsigned int local_store;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved;
328 /* Count of overlay stubs needed in non-overlay area. */
329 unsigned int non_ovly_stub;
330
331 /* Stash various callbacks for --auto-overlay. */
332 void (*spu_elf_load_ovl_mgr) (void);
333 FILE *(*spu_elf_open_overlay_script) (void);
334 void (*spu_elf_relink) (void);
335
336 /* Bit 0 set if --auto-overlay.
337 Bit 1 set if --auto-relink.
338 Bit 2 set if --overlay-rodata. */
339 unsigned int auto_overlay : 3;
340 #define AUTO_OVERLAY 1
341 #define AUTO_RELINK 2
342 #define OVERLAY_RODATA 4
343
344 /* Set if we should emit symbols for stubs. */
345 unsigned int emit_stub_syms:1;
346
347 /* Set if we want stubs on calls out of overlay regions to
348 non-overlay regions. */
349 unsigned int non_overlay_stubs : 1;
350
351 /* Set on error. */
352 unsigned int stub_err : 1;
353
354 /* Set if stack size analysis should be done. */
355 unsigned int stack_analysis : 1;
356
357 /* Set if __stack_* syms will be emitted. */
358 unsigned int emit_stack_syms : 1;
359 };
360
361 /* Hijack the generic got fields for overlay stub accounting. */
362
363 struct got_entry
364 {
365 struct got_entry *next;
366 unsigned int ovl;
367 bfd_vma addend;
368 bfd_vma stub_addr;
369 };
370
371 #define spu_hash_table(p) \
372 ((struct spu_link_hash_table *) ((p)->hash))
373
374 /* Create a spu ELF linker hash table. */
375
376 static struct bfd_link_hash_table *
377 spu_elf_link_hash_table_create (bfd *abfd)
378 {
379 struct spu_link_hash_table *htab;
380
381 htab = bfd_malloc (sizeof (*htab));
382 if (htab == NULL)
383 return NULL;
384
385 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
386 _bfd_elf_link_hash_newfunc,
387 sizeof (struct elf_link_hash_entry)))
388 {
389 free (htab);
390 return NULL;
391 }
392
393 memset (&htab->ovtab, 0,
394 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
395
396 htab->elf.init_got_refcount.refcount = 0;
397 htab->elf.init_got_refcount.glist = NULL;
398 htab->elf.init_got_offset.offset = 0;
399 htab->elf.init_got_offset.glist = NULL;
400 return &htab->elf.root;
401 }
402
403 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
404 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
405 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
406
407 static bfd_boolean
408 get_sym_h (struct elf_link_hash_entry **hp,
409 Elf_Internal_Sym **symp,
410 asection **symsecp,
411 Elf_Internal_Sym **locsymsp,
412 unsigned long r_symndx,
413 bfd *ibfd)
414 {
415 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
416
417 if (r_symndx >= symtab_hdr->sh_info)
418 {
419 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
420 struct elf_link_hash_entry *h;
421
422 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
423 while (h->root.type == bfd_link_hash_indirect
424 || h->root.type == bfd_link_hash_warning)
425 h = (struct elf_link_hash_entry *) h->root.u.i.link;
426
427 if (hp != NULL)
428 *hp = h;
429
430 if (symp != NULL)
431 *symp = NULL;
432
433 if (symsecp != NULL)
434 {
435 asection *symsec = NULL;
436 if (h->root.type == bfd_link_hash_defined
437 || h->root.type == bfd_link_hash_defweak)
438 symsec = h->root.u.def.section;
439 *symsecp = symsec;
440 }
441 }
442 else
443 {
444 Elf_Internal_Sym *sym;
445 Elf_Internal_Sym *locsyms = *locsymsp;
446
447 if (locsyms == NULL)
448 {
449 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
450 if (locsyms == NULL)
451 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
452 symtab_hdr->sh_info,
453 0, NULL, NULL, NULL);
454 if (locsyms == NULL)
455 return FALSE;
456 *locsymsp = locsyms;
457 }
458 sym = locsyms + r_symndx;
459
460 if (hp != NULL)
461 *hp = NULL;
462
463 if (symp != NULL)
464 *symp = sym;
465
466 if (symsecp != NULL)
467 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
468 }
469
470 return TRUE;
471 }
472
473 /* Create the note section if not already present. This is done early so
474 that the linker maps the sections to the right place in the output. */
475
476 bfd_boolean
477 spu_elf_create_sections (struct bfd_link_info *info,
478 int stack_analysis,
479 int emit_stack_syms)
480 {
481 bfd *ibfd;
482 struct spu_link_hash_table *htab = spu_hash_table (info);
483
484 /* Stash some options away where we can get at them later. */
485 htab->stack_analysis = stack_analysis;
486 htab->emit_stack_syms = emit_stack_syms;
487
488 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
489 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
490 break;
491
492 if (ibfd == NULL)
493 {
494 /* Make SPU_PTNOTE_SPUNAME section. */
495 asection *s;
496 size_t name_len;
497 size_t size;
498 bfd_byte *data;
499 flagword flags;
500
501 ibfd = info->input_bfds;
502 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
503 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
504 if (s == NULL
505 || !bfd_set_section_alignment (ibfd, s, 4))
506 return FALSE;
507
508 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
509 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
510 size += (name_len + 3) & -4;
511
512 if (!bfd_set_section_size (ibfd, s, size))
513 return FALSE;
514
515 data = bfd_zalloc (ibfd, size);
516 if (data == NULL)
517 return FALSE;
518
519 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
520 bfd_put_32 (ibfd, name_len, data + 4);
521 bfd_put_32 (ibfd, 1, data + 8);
522 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
523 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
524 bfd_get_filename (info->output_bfd), name_len);
525 s->contents = data;
526 }
527
528 return TRUE;
529 }
530
531 /* qsort predicate to sort sections by vma. */
532
533 static int
534 sort_sections (const void *a, const void *b)
535 {
536 const asection *const *s1 = a;
537 const asection *const *s2 = b;
538 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
539
540 if (delta != 0)
541 return delta < 0 ? -1 : 1;
542
543 return (*s1)->index - (*s2)->index;
544 }
545
546 /* Identify overlays in the output bfd, and number them. */
547
548 bfd_boolean
549 spu_elf_find_overlays (struct bfd_link_info *info)
550 {
551 struct spu_link_hash_table *htab = spu_hash_table (info);
552 asection **alloc_sec;
553 unsigned int i, n, ovl_index, num_buf;
554 asection *s;
555 bfd_vma ovl_end;
556
557 if (info->output_bfd->section_count < 2)
558 return FALSE;
559
560 alloc_sec
561 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
562 if (alloc_sec == NULL)
563 return FALSE;
564
565 /* Pick out all the alloced sections. */
566 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
567 if ((s->flags & SEC_ALLOC) != 0
568 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
569 && s->size != 0)
570 alloc_sec[n++] = s;
571
572 if (n == 0)
573 {
574 free (alloc_sec);
575 return FALSE;
576 }
577
578 /* Sort them by vma. */
579 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
580
581 /* Look for overlapping vmas. Any with overlap must be overlays.
582 Count them. Also count the number of overlay regions. */
583 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
584 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
585 {
586 s = alloc_sec[i];
587 if (s->vma < ovl_end)
588 {
589 asection *s0 = alloc_sec[i - 1];
590
591 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
592 {
593 alloc_sec[ovl_index] = s0;
594 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
595 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
596 }
597 alloc_sec[ovl_index] = s;
598 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
599 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
600 if (s0->vma != s->vma)
601 {
602 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
603 "do not start at the same address.\n"),
604 s0, s);
605 return FALSE;
606 }
607 if (ovl_end < s->vma + s->size)
608 ovl_end = s->vma + s->size;
609 }
610 else
611 ovl_end = s->vma + s->size;
612 }
613
614 htab->num_overlays = ovl_index;
615 htab->num_buf = num_buf;
616 htab->ovl_sec = alloc_sec;
617 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
618 FALSE, FALSE, FALSE);
619 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
620 FALSE, FALSE, FALSE);
621 return ovl_index != 0;
622 }
623
624 /* Support two sizes of overlay stubs, a slower more compact stub of two
625 intructions, and a faster stub of four instructions. */
626 #ifndef OVL_STUB_SIZE
627 /* Default to faster. */
628 #define OVL_STUB_SIZE 16
629 /* #define OVL_STUB_SIZE 8 */
630 #endif
631 #define BRSL 0x33000000
632 #define BR 0x32000000
633 #define NOP 0x40200000
634 #define LNOP 0x00200000
635 #define ILA 0x42000000
636
637 /* Return true for all relative and absolute branch instructions.
638 bra 00110000 0..
639 brasl 00110001 0..
640 br 00110010 0..
641 brsl 00110011 0..
642 brz 00100000 0..
643 brnz 00100001 0..
644 brhz 00100010 0..
645 brhnz 00100011 0.. */
646
647 static bfd_boolean
648 is_branch (const unsigned char *insn)
649 {
650 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
651 }
652
653 /* Return true for all indirect branch instructions.
654 bi 00110101 000
655 bisl 00110101 001
656 iret 00110101 010
657 bisled 00110101 011
658 biz 00100101 000
659 binz 00100101 001
660 bihz 00100101 010
661 bihnz 00100101 011 */
662
663 static bfd_boolean
664 is_indirect_branch (const unsigned char *insn)
665 {
666 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
667 }
668
669 /* Return true for branch hint instructions.
670 hbra 0001000..
671 hbrr 0001001.. */
672
673 static bfd_boolean
674 is_hint (const unsigned char *insn)
675 {
676 return (insn[0] & 0xfc) == 0x10;
677 }
678
679 /* True if INPUT_SECTION might need overlay stubs. */
680
681 static bfd_boolean
682 maybe_needs_stubs (asection *input_section, bfd *output_bfd)
683 {
684 /* No stubs for debug sections and suchlike. */
685 if ((input_section->flags & SEC_ALLOC) == 0)
686 return FALSE;
687
688 /* No stubs for link-once sections that will be discarded. */
689 if (input_section->output_section == NULL
690 || input_section->output_section->owner != output_bfd)
691 return FALSE;
692
693 /* Don't create stubs for .eh_frame references. */
694 if (strcmp (input_section->name, ".eh_frame") == 0)
695 return FALSE;
696
697 return TRUE;
698 }
699
700 enum _stub_type
701 {
702 no_stub,
703 ovl_stub,
704 nonovl_stub,
705 stub_error
706 };
707
708 /* Return non-zero if this reloc symbol should go via an overlay stub.
709 Return 2 if the stub must be in non-overlay area. */
710
711 static enum _stub_type
712 needs_ovl_stub (struct elf_link_hash_entry *h,
713 Elf_Internal_Sym *sym,
714 asection *sym_sec,
715 asection *input_section,
716 Elf_Internal_Rela *irela,
717 bfd_byte *contents,
718 struct bfd_link_info *info)
719 {
720 struct spu_link_hash_table *htab = spu_hash_table (info);
721 enum elf_spu_reloc_type r_type;
722 unsigned int sym_type;
723 bfd_boolean branch;
724 enum _stub_type ret = no_stub;
725
726 if (sym_sec == NULL
727 || sym_sec->output_section == NULL
728 || sym_sec->output_section->owner != info->output_bfd
729 || spu_elf_section_data (sym_sec->output_section) == NULL)
730 return ret;
731
732 if (h != NULL)
733 {
734 /* Ensure no stubs for user supplied overlay manager syms. */
735 if (h == htab->ovly_load || h == htab->ovly_return)
736 return ret;
737
738 /* setjmp always goes via an overlay stub, because then the return
739 and hence the longjmp goes via __ovly_return. That magically
740 makes setjmp/longjmp between overlays work. */
741 if (strncmp (h->root.root.string, "setjmp", 6) == 0
742 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
743 ret = ovl_stub;
744 }
745
746 /* Usually, symbols in non-overlay sections don't need stubs. */
747 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
748 && !htab->non_overlay_stubs)
749 return ret;
750
751 if (h != NULL)
752 sym_type = h->type;
753 else
754 sym_type = ELF_ST_TYPE (sym->st_info);
755
756 r_type = ELF32_R_TYPE (irela->r_info);
757 branch = FALSE;
758 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
759 {
760 bfd_byte insn[4];
761
762 if (contents == NULL)
763 {
764 contents = insn;
765 if (!bfd_get_section_contents (input_section->owner,
766 input_section,
767 contents,
768 irela->r_offset, 4))
769 return stub_error;
770 }
771 else
772 contents += irela->r_offset;
773
774 if (is_branch (contents) || is_hint (contents))
775 {
776 branch = TRUE;
777 if ((contents[0] & 0xfd) == 0x31
778 && sym_type != STT_FUNC
779 && contents != insn)
780 {
781 /* It's common for people to write assembly and forget
782 to give function symbols the right type. Handle
783 calls to such symbols, but warn so that (hopefully)
784 people will fix their code. We need the symbol
785 type to be correct to distinguish function pointer
786 initialisation from other pointer initialisations. */
787 const char *sym_name;
788
789 if (h != NULL)
790 sym_name = h->root.root.string;
791 else
792 {
793 Elf_Internal_Shdr *symtab_hdr;
794 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
795 sym_name = bfd_elf_sym_name (input_section->owner,
796 symtab_hdr,
797 sym,
798 sym_sec);
799 }
800 (*_bfd_error_handler) (_("warning: call to non-function"
801 " symbol %s defined in %B"),
802 sym_sec->owner, sym_name);
803
804 }
805 }
806 }
807
808 if (sym_type != STT_FUNC
809 && !branch
810 && (sym_sec->flags & SEC_CODE) == 0)
811 return ret;
812
813 /* A reference from some other section to a symbol in an overlay
814 section needs a stub. */
815 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
816 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
817 return ovl_stub;
818
819 /* If this insn isn't a branch then we are possibly taking the
820 address of a function and passing it out somehow. */
821 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
822 }
823
824 static bfd_boolean
825 count_stub (struct spu_link_hash_table *htab,
826 bfd *ibfd,
827 asection *isec,
828 enum _stub_type stub_type,
829 struct elf_link_hash_entry *h,
830 const Elf_Internal_Rela *irela)
831 {
832 unsigned int ovl = 0;
833 struct got_entry *g, **head;
834 bfd_vma addend;
835
836 /* If this instruction is a branch or call, we need a stub
837 for it. One stub per function per overlay.
838 If it isn't a branch, then we are taking the address of
839 this function so need a stub in the non-overlay area
840 for it. One stub per function. */
841 if (stub_type != nonovl_stub)
842 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
843
844 if (h != NULL)
845 head = &h->got.glist;
846 else
847 {
848 if (elf_local_got_ents (ibfd) == NULL)
849 {
850 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
851 * sizeof (*elf_local_got_ents (ibfd)));
852 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
853 if (elf_local_got_ents (ibfd) == NULL)
854 return FALSE;
855 }
856 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
857 }
858
859 addend = 0;
860 if (irela != NULL)
861 addend = irela->r_addend;
862
863 if (ovl == 0)
864 {
865 struct got_entry *gnext;
866
867 for (g = *head; g != NULL; g = g->next)
868 if (g->addend == addend && g->ovl == 0)
869 break;
870
871 if (g == NULL)
872 {
873 /* Need a new non-overlay area stub. Zap other stubs. */
874 for (g = *head; g != NULL; g = gnext)
875 {
876 gnext = g->next;
877 if (g->addend == addend)
878 {
879 htab->stub_count[g->ovl] -= 1;
880 free (g);
881 }
882 }
883 }
884 }
885 else
886 {
887 for (g = *head; g != NULL; g = g->next)
888 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
889 break;
890 }
891
892 if (g == NULL)
893 {
894 g = bfd_malloc (sizeof *g);
895 if (g == NULL)
896 return FALSE;
897 g->ovl = ovl;
898 g->addend = addend;
899 g->stub_addr = (bfd_vma) -1;
900 g->next = *head;
901 *head = g;
902
903 htab->stub_count[ovl] += 1;
904 }
905
906 return TRUE;
907 }
908
909 /* Two instruction overlay stubs look like:
910
911 brsl $75,__ovly_load
912 .word target_ovl_and_address
913
914 ovl_and_address is a word with the overlay number in the top 14 bits
915 and local store address in the bottom 18 bits.
916
917 Four instruction overlay stubs look like:
918
919 ila $78,ovl_number
920 lnop
921 ila $79,target_address
922 br __ovly_load */
923
924 static bfd_boolean
925 build_stub (struct spu_link_hash_table *htab,
926 bfd *ibfd,
927 asection *isec,
928 enum _stub_type stub_type,
929 struct elf_link_hash_entry *h,
930 const Elf_Internal_Rela *irela,
931 bfd_vma dest,
932 asection *dest_sec)
933 {
934 unsigned int ovl;
935 struct got_entry *g, **head;
936 asection *sec;
937 bfd_vma addend, val, from, to;
938
939 ovl = 0;
940 if (stub_type != nonovl_stub)
941 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
942
943 if (h != NULL)
944 head = &h->got.glist;
945 else
946 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
947
948 addend = 0;
949 if (irela != NULL)
950 addend = irela->r_addend;
951
952 for (g = *head; g != NULL; g = g->next)
953 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
954 break;
955 if (g == NULL)
956 abort ();
957
958 if (g->ovl == 0 && ovl != 0)
959 return TRUE;
960
961 if (g->stub_addr != (bfd_vma) -1)
962 return TRUE;
963
964 sec = htab->stub_sec[ovl];
965 dest += dest_sec->output_offset + dest_sec->output_section->vma;
966 from = sec->size + sec->output_offset + sec->output_section->vma;
967 g->stub_addr = from;
968 to = (htab->ovly_load->root.u.def.value
969 + htab->ovly_load->root.u.def.section->output_offset
970 + htab->ovly_load->root.u.def.section->output_section->vma);
971 val = to - from;
972 if (OVL_STUB_SIZE == 16)
973 val -= 12;
974 if (((dest | to | from) & 3) != 0
975 || val + 0x20000 >= 0x40000)
976 {
977 htab->stub_err = 1;
978 return FALSE;
979 }
980 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
981
982 if (OVL_STUB_SIZE == 16)
983 {
984 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
985 sec->contents + sec->size);
986 bfd_put_32 (sec->owner, LNOP,
987 sec->contents + sec->size + 4);
988 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
989 sec->contents + sec->size + 8);
990 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
991 sec->contents + sec->size + 12);
992 }
993 else if (OVL_STUB_SIZE == 8)
994 {
995 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
996 sec->contents + sec->size);
997
998 val = (dest & 0x3ffff) | (ovl << 18);
999 bfd_put_32 (sec->owner, val,
1000 sec->contents + sec->size + 4);
1001 }
1002 else
1003 abort ();
1004 sec->size += OVL_STUB_SIZE;
1005
1006 if (htab->emit_stub_syms)
1007 {
1008 size_t len;
1009 char *name;
1010 int add;
1011
1012 len = 8 + sizeof (".ovl_call.") - 1;
1013 if (h != NULL)
1014 len += strlen (h->root.root.string);
1015 else
1016 len += 8 + 1 + 8;
1017 add = 0;
1018 if (irela != NULL)
1019 add = (int) irela->r_addend & 0xffffffff;
1020 if (add != 0)
1021 len += 1 + 8;
1022 name = bfd_malloc (len);
1023 if (name == NULL)
1024 return FALSE;
1025
1026 sprintf (name, "%08x.ovl_call.", g->ovl);
1027 if (h != NULL)
1028 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1029 else
1030 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1031 dest_sec->id & 0xffffffff,
1032 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1033 if (add != 0)
1034 sprintf (name + len - 9, "+%x", add);
1035
1036 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1037 free (name);
1038 if (h == NULL)
1039 return FALSE;
1040 if (h->root.type == bfd_link_hash_new)
1041 {
1042 h->root.type = bfd_link_hash_defined;
1043 h->root.u.def.section = sec;
1044 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
1045 h->size = OVL_STUB_SIZE;
1046 h->type = STT_FUNC;
1047 h->ref_regular = 1;
1048 h->def_regular = 1;
1049 h->ref_regular_nonweak = 1;
1050 h->forced_local = 1;
1051 h->non_elf = 0;
1052 }
1053 }
1054
1055 return TRUE;
1056 }
1057
1058 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1059 symbols. */
1060
1061 static bfd_boolean
1062 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1063 {
1064 /* Symbols starting with _SPUEAR_ need a stub because they may be
1065 invoked by the PPU. */
1066 struct bfd_link_info *info = inf;
1067 struct spu_link_hash_table *htab = spu_hash_table (info);
1068 asection *sym_sec;
1069
1070 if ((h->root.type == bfd_link_hash_defined
1071 || h->root.type == bfd_link_hash_defweak)
1072 && h->def_regular
1073 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1074 && (sym_sec = h->root.u.def.section) != NULL
1075 && sym_sec->output_section != NULL
1076 && sym_sec->output_section->owner == info->output_bfd
1077 && spu_elf_section_data (sym_sec->output_section) != NULL
1078 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1079 || htab->non_overlay_stubs))
1080 {
1081 count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1082 }
1083
1084 return TRUE;
1085 }
1086
1087 static bfd_boolean
1088 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1089 {
1090 /* Symbols starting with _SPUEAR_ need a stub because they may be
1091 invoked by the PPU. */
1092 struct bfd_link_info *info = inf;
1093 struct spu_link_hash_table *htab = spu_hash_table (info);
1094 asection *sym_sec;
1095
1096 if ((h->root.type == bfd_link_hash_defined
1097 || h->root.type == bfd_link_hash_defweak)
1098 && h->def_regular
1099 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1100 && (sym_sec = h->root.u.def.section) != NULL
1101 && sym_sec->output_section != NULL
1102 && sym_sec->output_section->owner == info->output_bfd
1103 && spu_elf_section_data (sym_sec->output_section) != NULL
1104 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1105 || htab->non_overlay_stubs))
1106 {
1107 build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1108 h->root.u.def.value, sym_sec);
1109 }
1110
1111 return TRUE;
1112 }
1113
1114 /* Size or build stubs. */
1115
1116 static bfd_boolean
1117 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1118 {
1119 struct spu_link_hash_table *htab = spu_hash_table (info);
1120 bfd *ibfd;
1121
1122 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1123 {
1124 extern const bfd_target bfd_elf32_spu_vec;
1125 Elf_Internal_Shdr *symtab_hdr;
1126 asection *isec;
1127 Elf_Internal_Sym *local_syms = NULL;
1128
1129 if (ibfd->xvec != &bfd_elf32_spu_vec)
1130 continue;
1131
1132 /* We'll need the symbol table in a second. */
1133 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1134 if (symtab_hdr->sh_info == 0)
1135 continue;
1136
1137 /* Walk over each section attached to the input bfd. */
1138 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1139 {
1140 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1141
1142 /* If there aren't any relocs, then there's nothing more to do. */
1143 if ((isec->flags & SEC_RELOC) == 0
1144 || isec->reloc_count == 0)
1145 continue;
1146
1147 if (!maybe_needs_stubs (isec, info->output_bfd))
1148 continue;
1149
1150 /* Get the relocs. */
1151 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1152 info->keep_memory);
1153 if (internal_relocs == NULL)
1154 goto error_ret_free_local;
1155
1156 /* Now examine each relocation. */
1157 irela = internal_relocs;
1158 irelaend = irela + isec->reloc_count;
1159 for (; irela < irelaend; irela++)
1160 {
1161 enum elf_spu_reloc_type r_type;
1162 unsigned int r_indx;
1163 asection *sym_sec;
1164 Elf_Internal_Sym *sym;
1165 struct elf_link_hash_entry *h;
1166 enum _stub_type stub_type;
1167
1168 r_type = ELF32_R_TYPE (irela->r_info);
1169 r_indx = ELF32_R_SYM (irela->r_info);
1170
1171 if (r_type >= R_SPU_max)
1172 {
1173 bfd_set_error (bfd_error_bad_value);
1174 error_ret_free_internal:
1175 if (elf_section_data (isec)->relocs != internal_relocs)
1176 free (internal_relocs);
1177 error_ret_free_local:
1178 if (local_syms != NULL
1179 && (symtab_hdr->contents
1180 != (unsigned char *) local_syms))
1181 free (local_syms);
1182 return FALSE;
1183 }
1184
1185 /* Determine the reloc target section. */
1186 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1187 goto error_ret_free_internal;
1188
1189 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1190 NULL, info);
1191 if (stub_type == no_stub)
1192 continue;
1193 else if (stub_type == stub_error)
1194 goto error_ret_free_internal;
1195
1196 if (htab->stub_count == NULL)
1197 {
1198 bfd_size_type amt;
1199 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1200 htab->stub_count = bfd_zmalloc (amt);
1201 if (htab->stub_count == NULL)
1202 goto error_ret_free_internal;
1203 }
1204
1205 if (!build)
1206 {
1207 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1208 goto error_ret_free_internal;
1209 }
1210 else
1211 {
1212 bfd_vma dest;
1213
1214 if (h != NULL)
1215 dest = h->root.u.def.value;
1216 else
1217 dest = sym->st_value;
1218 dest += irela->r_addend;
1219 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
1220 dest, sym_sec))
1221 goto error_ret_free_internal;
1222 }
1223 }
1224
1225 /* We're done with the internal relocs, free them. */
1226 if (elf_section_data (isec)->relocs != internal_relocs)
1227 free (internal_relocs);
1228 }
1229
1230 if (local_syms != NULL
1231 && symtab_hdr->contents != (unsigned char *) local_syms)
1232 {
1233 if (!info->keep_memory)
1234 free (local_syms);
1235 else
1236 symtab_hdr->contents = (unsigned char *) local_syms;
1237 }
1238 }
1239
1240 return TRUE;
1241 }
1242
1243 /* Allocate space for overlay call and return stubs. */
1244
1245 int
1246 spu_elf_size_stubs (struct bfd_link_info *info,
1247 void (*place_spu_section) (asection *, asection *,
1248 const char *),
1249 int non_overlay_stubs)
1250 {
1251 struct spu_link_hash_table *htab = spu_hash_table (info);
1252 bfd *ibfd;
1253 bfd_size_type amt;
1254 flagword flags;
1255 unsigned int i;
1256 asection *stub;
1257
1258 htab->non_overlay_stubs = non_overlay_stubs;
1259 if (!process_stubs (info, FALSE))
1260 return 0;
1261
1262 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1263 if (htab->stub_err)
1264 return 0;
1265
1266 if (htab->stub_count == NULL)
1267 return 1;
1268
1269 ibfd = info->input_bfds;
1270 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1271 htab->stub_sec = bfd_zmalloc (amt);
1272 if (htab->stub_sec == NULL)
1273 return 0;
1274
1275 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1276 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1277 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1278 htab->stub_sec[0] = stub;
1279 if (stub == NULL
1280 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1281 return 0;
1282 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1283 (*place_spu_section) (stub, NULL, ".text");
1284
1285 for (i = 0; i < htab->num_overlays; ++i)
1286 {
1287 asection *osec = htab->ovl_sec[i];
1288 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1289 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1290 htab->stub_sec[ovl] = stub;
1291 if (stub == NULL
1292 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1293 return 0;
1294 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1295 (*place_spu_section) (stub, osec, NULL);
1296 }
1297
1298 /* htab->ovtab consists of two arrays.
1299 . struct {
1300 . u32 vma;
1301 . u32 size;
1302 . u32 file_off;
1303 . u32 buf;
1304 . } _ovly_table[];
1305 .
1306 . struct {
1307 . u32 mapped;
1308 . } _ovly_buf_table[];
1309 . */
1310
1311 flags = (SEC_ALLOC | SEC_LOAD
1312 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1313 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1314 if (htab->ovtab == NULL
1315 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1316 return 0;
1317
1318 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1319 (*place_spu_section) (htab->ovtab, NULL, ".data");
1320
1321 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1322 if (htab->toe == NULL
1323 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1324 return 0;
1325 htab->toe->size = 16;
1326 (*place_spu_section) (htab->toe, NULL, ".toe");
1327
1328 return 2;
1329 }
1330
1331 /* Functions to handle embedded spu_ovl.o object. */
1332
1333 static void *
1334 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1335 {
1336 return stream;
1337 }
1338
1339 static file_ptr
1340 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1341 void *stream,
1342 void *buf,
1343 file_ptr nbytes,
1344 file_ptr offset)
1345 {
1346 struct _ovl_stream *os;
1347 size_t count;
1348 size_t max;
1349
1350 os = (struct _ovl_stream *) stream;
1351 max = (const char *) os->end - (const char *) os->start;
1352
1353 if ((ufile_ptr) offset >= max)
1354 return 0;
1355
1356 count = nbytes;
1357 if (count > max - offset)
1358 count = max - offset;
1359
1360 memcpy (buf, (const char *) os->start + offset, count);
1361 return count;
1362 }
1363
1364 bfd_boolean
1365 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1366 {
1367 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1368 "elf32-spu",
1369 ovl_mgr_open,
1370 (void *) stream,
1371 ovl_mgr_pread,
1372 NULL,
1373 NULL);
1374 return *ovl_bfd != NULL;
1375 }
1376
1377 /* Define an STT_OBJECT symbol. */
1378
1379 static struct elf_link_hash_entry *
1380 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1381 {
1382 struct elf_link_hash_entry *h;
1383
1384 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1385 if (h == NULL)
1386 return NULL;
1387
1388 if (h->root.type != bfd_link_hash_defined
1389 || !h->def_regular)
1390 {
1391 h->root.type = bfd_link_hash_defined;
1392 h->root.u.def.section = htab->ovtab;
1393 h->type = STT_OBJECT;
1394 h->ref_regular = 1;
1395 h->def_regular = 1;
1396 h->ref_regular_nonweak = 1;
1397 h->non_elf = 0;
1398 }
1399 else
1400 {
1401 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1402 h->root.u.def.section->owner,
1403 h->root.root.string);
1404 bfd_set_error (bfd_error_bad_value);
1405 return NULL;
1406 }
1407
1408 return h;
1409 }
1410
1411 /* Fill in all stubs and the overlay tables. */
1412
1413 bfd_boolean
1414 spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1415 {
1416 struct spu_link_hash_table *htab = spu_hash_table (info);
1417 struct elf_link_hash_entry *h;
1418 bfd_byte *p;
1419 asection *s;
1420 bfd *obfd;
1421 unsigned int i;
1422
1423 htab->emit_stub_syms = emit_syms;
1424 if (htab->stub_count == NULL)
1425 return TRUE;
1426
1427 for (i = 0; i <= htab->num_overlays; i++)
1428 if (htab->stub_sec[i]->size != 0)
1429 {
1430 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1431 htab->stub_sec[i]->size);
1432 if (htab->stub_sec[i]->contents == NULL)
1433 return FALSE;
1434 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1435 htab->stub_sec[i]->size = 0;
1436 }
1437
1438 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1439 htab->ovly_load = h;
1440 BFD_ASSERT (h != NULL
1441 && (h->root.type == bfd_link_hash_defined
1442 || h->root.type == bfd_link_hash_defweak)
1443 && h->def_regular);
1444
1445 s = h->root.u.def.section->output_section;
1446 if (spu_elf_section_data (s)->u.o.ovl_index)
1447 {
1448 (*_bfd_error_handler) (_("%s in overlay section"),
1449 h->root.root.string);
1450 bfd_set_error (bfd_error_bad_value);
1451 return FALSE;
1452 }
1453
1454 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1455 htab->ovly_return = h;
1456
1457 /* Fill in all the stubs. */
1458 process_stubs (info, TRUE);
1459
1460 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1461 if (htab->stub_err)
1462 return FALSE;
1463
1464 for (i = 0; i <= htab->num_overlays; i++)
1465 {
1466 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1467 {
1468 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1469 bfd_set_error (bfd_error_bad_value);
1470 return FALSE;
1471 }
1472 htab->stub_sec[i]->rawsize = 0;
1473 }
1474
1475 if (htab->stub_err)
1476 {
1477 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1478 bfd_set_error (bfd_error_bad_value);
1479 return FALSE;
1480 }
1481
1482 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1483 if (htab->ovtab->contents == NULL)
1484 return FALSE;
1485
1486 /* Write out _ovly_table. */
1487 p = htab->ovtab->contents;
1488 /* set low bit of .size to mark non-overlay area as present. */
1489 p[7] = 1;
1490 obfd = htab->ovtab->output_section->owner;
1491 for (s = obfd->sections; s != NULL; s = s->next)
1492 {
1493 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1494
1495 if (ovl_index != 0)
1496 {
1497 unsigned long off = ovl_index * 16;
1498 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1499
1500 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1501 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1502 /* file_off written later in spu_elf_modify_program_headers. */
1503 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1504 }
1505 }
1506
1507 h = define_ovtab_symbol (htab, "_ovly_table");
1508 if (h == NULL)
1509 return FALSE;
1510 h->root.u.def.value = 16;
1511 h->size = htab->num_overlays * 16;
1512
1513 h = define_ovtab_symbol (htab, "_ovly_table_end");
1514 if (h == NULL)
1515 return FALSE;
1516 h->root.u.def.value = htab->num_overlays * 16 + 16;
1517 h->size = 0;
1518
1519 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1520 if (h == NULL)
1521 return FALSE;
1522 h->root.u.def.value = htab->num_overlays * 16 + 16;
1523 h->size = htab->num_buf * 4;
1524
1525 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1526 if (h == NULL)
1527 return FALSE;
1528 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1529 h->size = 0;
1530
1531 h = define_ovtab_symbol (htab, "_EAR_");
1532 if (h == NULL)
1533 return FALSE;
1534 h->root.u.def.section = htab->toe;
1535 h->root.u.def.value = 0;
1536 h->size = 16;
1537
1538 return TRUE;
1539 }
1540
1541 /* Check that all loadable section VMAs lie in the range
1542 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1543
1544 asection *
1545 spu_elf_check_vma (struct bfd_link_info *info,
1546 int auto_overlay,
1547 unsigned int lo,
1548 unsigned int hi,
1549 unsigned int overlay_fixed,
1550 unsigned int reserved,
1551 void (*spu_elf_load_ovl_mgr) (void),
1552 FILE *(*spu_elf_open_overlay_script) (void),
1553 void (*spu_elf_relink) (void))
1554 {
1555 struct elf_segment_map *m;
1556 unsigned int i;
1557 struct spu_link_hash_table *htab = spu_hash_table (info);
1558 bfd *abfd = info->output_bfd;
1559
1560 if (auto_overlay & AUTO_OVERLAY)
1561 htab->auto_overlay = auto_overlay;
1562 htab->local_store = hi + 1 - lo;
1563 htab->overlay_fixed = overlay_fixed;
1564 htab->reserved = reserved;
1565 htab->spu_elf_load_ovl_mgr = spu_elf_load_ovl_mgr;
1566 htab->spu_elf_open_overlay_script = spu_elf_open_overlay_script;
1567 htab->spu_elf_relink = spu_elf_relink;
1568
1569 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1570 if (m->p_type == PT_LOAD)
1571 for (i = 0; i < m->count; i++)
1572 if (m->sections[i]->size != 0
1573 && (m->sections[i]->vma < lo
1574 || m->sections[i]->vma > hi
1575 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1576 return m->sections[i];
1577
1578 /* No need for overlays if it all fits. */
1579 htab->auto_overlay = 0;
1580 return NULL;
1581 }
1582
1583 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1584 Search for stack adjusting insns, and return the sp delta. */
1585
1586 static int
1587 find_function_stack_adjust (asection *sec, bfd_vma offset)
1588 {
1589 int unrecog;
1590 int reg[128];
1591
1592 memset (reg, 0, sizeof (reg));
1593 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1594 {
1595 unsigned char buf[4];
1596 int rt, ra;
1597 int imm;
1598
1599 /* Assume no relocs on stack adjusing insns. */
1600 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1601 break;
1602
1603 if (buf[0] == 0x24 /* stqd */)
1604 continue;
1605
1606 rt = buf[3] & 0x7f;
1607 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1608 /* Partly decoded immediate field. */
1609 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1610
1611 if (buf[0] == 0x1c /* ai */)
1612 {
1613 imm >>= 7;
1614 imm = (imm ^ 0x200) - 0x200;
1615 reg[rt] = reg[ra] + imm;
1616
1617 if (rt == 1 /* sp */)
1618 {
1619 if (imm > 0)
1620 break;
1621 return reg[rt];
1622 }
1623 }
1624 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1625 {
1626 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1627
1628 reg[rt] = reg[ra] + reg[rb];
1629 if (rt == 1)
1630 return reg[rt];
1631 }
1632 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1633 {
1634 if (buf[0] >= 0x42 /* ila */)
1635 imm |= (buf[0] & 1) << 17;
1636 else
1637 {
1638 imm &= 0xffff;
1639
1640 if (buf[0] == 0x40 /* il */)
1641 {
1642 if ((buf[1] & 0x80) == 0)
1643 goto unknown_insn;
1644 imm = (imm ^ 0x8000) - 0x8000;
1645 }
1646 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1647 imm <<= 16;
1648 }
1649 reg[rt] = imm;
1650 continue;
1651 }
1652 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1653 {
1654 reg[rt] |= imm & 0xffff;
1655 continue;
1656 }
1657 else if (buf[0] == 0x04 /* ori */)
1658 {
1659 imm >>= 7;
1660 imm = (imm ^ 0x200) - 0x200;
1661 reg[rt] = reg[ra] | imm;
1662 continue;
1663 }
1664 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1665 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1666 {
1667 /* Used in pic reg load. Say rt is trashed. */
1668 reg[rt] = 0;
1669 continue;
1670 }
1671 else if (is_branch (buf) || is_indirect_branch (buf))
1672 /* If we hit a branch then we must be out of the prologue. */
1673 break;
1674 unknown_insn:
1675 ++unrecog;
1676 }
1677
1678 return 0;
1679 }
1680
1681 /* qsort predicate to sort symbols by section and value. */
1682
1683 static Elf_Internal_Sym *sort_syms_syms;
1684 static asection **sort_syms_psecs;
1685
1686 static int
1687 sort_syms (const void *a, const void *b)
1688 {
1689 Elf_Internal_Sym *const *s1 = a;
1690 Elf_Internal_Sym *const *s2 = b;
1691 asection *sec1,*sec2;
1692 bfd_signed_vma delta;
1693
1694 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1695 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1696
1697 if (sec1 != sec2)
1698 return sec1->index - sec2->index;
1699
1700 delta = (*s1)->st_value - (*s2)->st_value;
1701 if (delta != 0)
1702 return delta < 0 ? -1 : 1;
1703
1704 delta = (*s2)->st_size - (*s1)->st_size;
1705 if (delta != 0)
1706 return delta < 0 ? -1 : 1;
1707
1708 return *s1 < *s2 ? -1 : 1;
1709 }
1710
1711 struct call_info
1712 {
1713 struct function_info *fun;
1714 struct call_info *next;
1715 unsigned int count;
1716 unsigned int max_depth;
1717 unsigned int is_tail : 1;
1718 unsigned int is_pasted : 1;
1719 };
1720
1721 struct function_info
1722 {
1723 /* List of functions called. Also branches to hot/cold part of
1724 function. */
1725 struct call_info *call_list;
1726 /* For hot/cold part of function, point to owner. */
1727 struct function_info *start;
1728 /* Symbol at start of function. */
1729 union {
1730 Elf_Internal_Sym *sym;
1731 struct elf_link_hash_entry *h;
1732 } u;
1733 /* Function section. */
1734 asection *sec;
1735 asection *rodata;
1736 /* Where last called from, and number of sections called from. */
1737 asection *last_caller;
1738 unsigned int call_count;
1739 /* Address range of (this part of) function. */
1740 bfd_vma lo, hi;
1741 /* Stack usage. */
1742 int stack;
1743 /* Distance from root of call tree. Tail and hot/cold branches
1744 count as one deeper. We aren't counting stack frames here. */
1745 unsigned int depth;
1746 /* Set if global symbol. */
1747 unsigned int global : 1;
1748 /* Set if known to be start of function (as distinct from a hunk
1749 in hot/cold section. */
1750 unsigned int is_func : 1;
1751 /* Set if not a root node. */
1752 unsigned int non_root : 1;
1753 /* Flags used during call tree traversal. It's cheaper to replicate
1754 the visit flags than have one which needs clearing after a traversal. */
1755 unsigned int visit1 : 1;
1756 unsigned int visit2 : 1;
1757 unsigned int marking : 1;
1758 unsigned int visit3 : 1;
1759 unsigned int visit4 : 1;
1760 unsigned int visit5 : 1;
1761 unsigned int visit6 : 1;
1762 unsigned int visit7 : 1;
1763 };
1764
1765 struct spu_elf_stack_info
1766 {
1767 int num_fun;
1768 int max_fun;
1769 /* Variable size array describing functions, one per contiguous
1770 address range belonging to a function. */
1771 struct function_info fun[1];
1772 };
1773
1774 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1775 entries for section SEC. */
1776
1777 static struct spu_elf_stack_info *
1778 alloc_stack_info (asection *sec, int max_fun)
1779 {
1780 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1781 bfd_size_type amt;
1782
1783 amt = sizeof (struct spu_elf_stack_info);
1784 amt += (max_fun - 1) * sizeof (struct function_info);
1785 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1786 if (sec_data->u.i.stack_info != NULL)
1787 sec_data->u.i.stack_info->max_fun = max_fun;
1788 return sec_data->u.i.stack_info;
1789 }
1790
1791 /* Add a new struct function_info describing a (part of a) function
1792 starting at SYM_H. Keep the array sorted by address. */
1793
1794 static struct function_info *
1795 maybe_insert_function (asection *sec,
1796 void *sym_h,
1797 bfd_boolean global,
1798 bfd_boolean is_func)
1799 {
1800 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1801 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1802 int i;
1803 bfd_vma off, size;
1804
1805 if (sinfo == NULL)
1806 {
1807 sinfo = alloc_stack_info (sec, 20);
1808 if (sinfo == NULL)
1809 return NULL;
1810 }
1811
1812 if (!global)
1813 {
1814 Elf_Internal_Sym *sym = sym_h;
1815 off = sym->st_value;
1816 size = sym->st_size;
1817 }
1818 else
1819 {
1820 struct elf_link_hash_entry *h = sym_h;
1821 off = h->root.u.def.value;
1822 size = h->size;
1823 }
1824
1825 for (i = sinfo->num_fun; --i >= 0; )
1826 if (sinfo->fun[i].lo <= off)
1827 break;
1828
1829 if (i >= 0)
1830 {
1831 /* Don't add another entry for an alias, but do update some
1832 info. */
1833 if (sinfo->fun[i].lo == off)
1834 {
1835 /* Prefer globals over local syms. */
1836 if (global && !sinfo->fun[i].global)
1837 {
1838 sinfo->fun[i].global = TRUE;
1839 sinfo->fun[i].u.h = sym_h;
1840 }
1841 if (is_func)
1842 sinfo->fun[i].is_func = TRUE;
1843 return &sinfo->fun[i];
1844 }
1845 /* Ignore a zero-size symbol inside an existing function. */
1846 else if (sinfo->fun[i].hi > off && size == 0)
1847 return &sinfo->fun[i];
1848 }
1849
1850 if (sinfo->num_fun >= sinfo->max_fun)
1851 {
1852 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1853 bfd_size_type old = amt;
1854
1855 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1856 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1857 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1858 sinfo = bfd_realloc (sinfo, amt);
1859 if (sinfo == NULL)
1860 return NULL;
1861 memset ((char *) sinfo + old, 0, amt - old);
1862 sec_data->u.i.stack_info = sinfo;
1863 }
1864
1865 if (++i < sinfo->num_fun)
1866 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1867 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1868 sinfo->fun[i].is_func = is_func;
1869 sinfo->fun[i].global = global;
1870 sinfo->fun[i].sec = sec;
1871 if (global)
1872 sinfo->fun[i].u.h = sym_h;
1873 else
1874 sinfo->fun[i].u.sym = sym_h;
1875 sinfo->fun[i].lo = off;
1876 sinfo->fun[i].hi = off + size;
1877 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1878 sinfo->num_fun += 1;
1879 return &sinfo->fun[i];
1880 }
1881
1882 /* Return the name of FUN. */
1883
1884 static const char *
1885 func_name (struct function_info *fun)
1886 {
1887 asection *sec;
1888 bfd *ibfd;
1889 Elf_Internal_Shdr *symtab_hdr;
1890
1891 while (fun->start != NULL)
1892 fun = fun->start;
1893
1894 if (fun->global)
1895 return fun->u.h->root.root.string;
1896
1897 sec = fun->sec;
1898 if (fun->u.sym->st_name == 0)
1899 {
1900 size_t len = strlen (sec->name);
1901 char *name = bfd_malloc (len + 10);
1902 if (name == NULL)
1903 return "(null)";
1904 sprintf (name, "%s+%lx", sec->name,
1905 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1906 return name;
1907 }
1908 ibfd = sec->owner;
1909 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1910 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1911 }
1912
1913 /* Read the instruction at OFF in SEC. Return true iff the instruction
1914 is a nop, lnop, or stop 0 (all zero insn). */
1915
1916 static bfd_boolean
1917 is_nop (asection *sec, bfd_vma off)
1918 {
1919 unsigned char insn[4];
1920
1921 if (off + 4 > sec->size
1922 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1923 return FALSE;
1924 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1925 return TRUE;
1926 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1927 return TRUE;
1928 return FALSE;
1929 }
1930
1931 /* Extend the range of FUN to cover nop padding up to LIMIT.
1932 Return TRUE iff some instruction other than a NOP was found. */
1933
1934 static bfd_boolean
1935 insns_at_end (struct function_info *fun, bfd_vma limit)
1936 {
1937 bfd_vma off = (fun->hi + 3) & -4;
1938
1939 while (off < limit && is_nop (fun->sec, off))
1940 off += 4;
1941 if (off < limit)
1942 {
1943 fun->hi = off;
1944 return TRUE;
1945 }
1946 fun->hi = limit;
1947 return FALSE;
1948 }
1949
1950 /* Check and fix overlapping function ranges. Return TRUE iff there
1951 are gaps in the current info we have about functions in SEC. */
1952
1953 static bfd_boolean
1954 check_function_ranges (asection *sec, struct bfd_link_info *info)
1955 {
1956 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1957 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1958 int i;
1959 bfd_boolean gaps = FALSE;
1960
1961 if (sinfo == NULL)
1962 return FALSE;
1963
1964 for (i = 1; i < sinfo->num_fun; i++)
1965 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1966 {
1967 /* Fix overlapping symbols. */
1968 const char *f1 = func_name (&sinfo->fun[i - 1]);
1969 const char *f2 = func_name (&sinfo->fun[i]);
1970
1971 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1972 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1973 }
1974 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1975 gaps = TRUE;
1976
1977 if (sinfo->num_fun == 0)
1978 gaps = TRUE;
1979 else
1980 {
1981 if (sinfo->fun[0].lo != 0)
1982 gaps = TRUE;
1983 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1984 {
1985 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1986
1987 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1988 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1989 }
1990 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1991 gaps = TRUE;
1992 }
1993 return gaps;
1994 }
1995
1996 /* Search current function info for a function that contains address
1997 OFFSET in section SEC. */
1998
1999 static struct function_info *
2000 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2001 {
2002 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2003 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2004 int lo, hi, mid;
2005
2006 lo = 0;
2007 hi = sinfo->num_fun;
2008 while (lo < hi)
2009 {
2010 mid = (lo + hi) / 2;
2011 if (offset < sinfo->fun[mid].lo)
2012 hi = mid;
2013 else if (offset >= sinfo->fun[mid].hi)
2014 lo = mid + 1;
2015 else
2016 return &sinfo->fun[mid];
2017 }
2018 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2019 sec, offset);
2020 return NULL;
2021 }
2022
2023 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2024 if CALLEE was new. If this function return FALSE, CALLEE should
2025 be freed. */
2026
2027 static bfd_boolean
2028 insert_callee (struct function_info *caller, struct call_info *callee)
2029 {
2030 struct call_info **pp, *p;
2031
2032 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2033 if (p->fun == callee->fun)
2034 {
2035 /* Tail calls use less stack than normal calls. Retain entry
2036 for normal call over one for tail call. */
2037 p->is_tail &= callee->is_tail;
2038 if (!p->is_tail)
2039 {
2040 p->fun->start = NULL;
2041 p->fun->is_func = TRUE;
2042 }
2043 p->count += 1;
2044 /* Reorder list so most recent call is first. */
2045 *pp = p->next;
2046 p->next = caller->call_list;
2047 caller->call_list = p;
2048 return FALSE;
2049 }
2050 callee->next = caller->call_list;
2051 callee->count += 1;
2052 caller->call_list = callee;
2053 return TRUE;
2054 }
2055
2056 /* Copy CALL and insert the copy into CALLER. */
2057
2058 static bfd_boolean
2059 copy_callee (struct function_info *caller, const struct call_info *call)
2060 {
2061 struct call_info *callee;
2062 callee = bfd_malloc (sizeof (*callee));
2063 if (callee == NULL)
2064 return FALSE;
2065 *callee = *call;
2066 if (!insert_callee (caller, callee))
2067 free (callee);
2068 return TRUE;
2069 }
2070
2071 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2072 overlay stub sections. */
2073
2074 static bfd_boolean
2075 interesting_section (asection *s, bfd *obfd)
2076 {
2077 return (s->output_section != NULL
2078 && s->output_section->owner == obfd
2079 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2080 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2081 && s->size != 0);
2082 }
2083
2084 /* Rummage through the relocs for SEC, looking for function calls.
2085 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2086 mark destination symbols on calls as being functions. Also
2087 look at branches, which may be tail calls or go to hot/cold
2088 section part of same function. */
2089
2090 static bfd_boolean
2091 mark_functions_via_relocs (asection *sec,
2092 struct bfd_link_info *info,
2093 int call_tree)
2094 {
2095 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2096 Elf_Internal_Shdr *symtab_hdr;
2097 void *psyms;
2098 static bfd_boolean warned;
2099
2100 if (!interesting_section (sec, info->output_bfd)
2101 || sec->reloc_count == 0)
2102 return TRUE;
2103
2104 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2105 info->keep_memory);
2106 if (internal_relocs == NULL)
2107 return FALSE;
2108
2109 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2110 psyms = &symtab_hdr->contents;
2111 irela = internal_relocs;
2112 irelaend = irela + sec->reloc_count;
2113 for (; irela < irelaend; irela++)
2114 {
2115 enum elf_spu_reloc_type r_type;
2116 unsigned int r_indx;
2117 asection *sym_sec;
2118 Elf_Internal_Sym *sym;
2119 struct elf_link_hash_entry *h;
2120 bfd_vma val;
2121 bfd_boolean reject, is_call;
2122 struct function_info *caller;
2123 struct call_info *callee;
2124
2125 reject = FALSE;
2126 r_type = ELF32_R_TYPE (irela->r_info);
2127 if (r_type != R_SPU_REL16
2128 && r_type != R_SPU_ADDR16)
2129 {
2130 reject = TRUE;
2131 if (!(call_tree && spu_hash_table (info)->auto_overlay))
2132 continue;
2133 }
2134
2135 r_indx = ELF32_R_SYM (irela->r_info);
2136 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2137 return FALSE;
2138
2139 if (sym_sec == NULL
2140 || sym_sec->output_section == NULL
2141 || sym_sec->output_section->owner != info->output_bfd)
2142 continue;
2143
2144 is_call = FALSE;
2145 if (!reject)
2146 {
2147 unsigned char insn[4];
2148
2149 if (!bfd_get_section_contents (sec->owner, sec, insn,
2150 irela->r_offset, 4))
2151 return FALSE;
2152 if (is_branch (insn))
2153 {
2154 is_call = (insn[0] & 0xfd) == 0x31;
2155 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2156 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2157 {
2158 if (!warned)
2159 info->callbacks->einfo
2160 (_("%B(%A+0x%v): call to non-code section"
2161 " %B(%A), analysis incomplete\n"),
2162 sec->owner, sec, irela->r_offset,
2163 sym_sec->owner, sym_sec);
2164 warned = TRUE;
2165 continue;
2166 }
2167 }
2168 else
2169 {
2170 reject = TRUE;
2171 if (!(call_tree && spu_hash_table (info)->auto_overlay)
2172 || is_hint (insn))
2173 continue;
2174 }
2175 }
2176
2177 if (reject)
2178 {
2179 /* For --auto-overlay, count possible stubs we need for
2180 function pointer references. */
2181 unsigned int sym_type;
2182 if (h)
2183 sym_type = h->type;
2184 else
2185 sym_type = ELF_ST_TYPE (sym->st_info);
2186 if (sym_type == STT_FUNC)
2187 spu_hash_table (info)->non_ovly_stub += 1;
2188 continue;
2189 }
2190
2191 if (h)
2192 val = h->root.u.def.value;
2193 else
2194 val = sym->st_value;
2195 val += irela->r_addend;
2196
2197 if (!call_tree)
2198 {
2199 struct function_info *fun;
2200
2201 if (irela->r_addend != 0)
2202 {
2203 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2204 if (fake == NULL)
2205 return FALSE;
2206 fake->st_value = val;
2207 fake->st_shndx
2208 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2209 sym = fake;
2210 }
2211 if (sym)
2212 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2213 else
2214 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2215 if (fun == NULL)
2216 return FALSE;
2217 if (irela->r_addend != 0
2218 && fun->u.sym != sym)
2219 free (sym);
2220 continue;
2221 }
2222
2223 caller = find_function (sec, irela->r_offset, info);
2224 if (caller == NULL)
2225 return FALSE;
2226 callee = bfd_malloc (sizeof *callee);
2227 if (callee == NULL)
2228 return FALSE;
2229
2230 callee->fun = find_function (sym_sec, val, info);
2231 if (callee->fun == NULL)
2232 return FALSE;
2233 callee->is_tail = !is_call;
2234 callee->is_pasted = FALSE;
2235 callee->count = 0;
2236 if (callee->fun->last_caller != sec)
2237 {
2238 callee->fun->last_caller = sec;
2239 callee->fun->call_count += 1;
2240 }
2241 if (!insert_callee (caller, callee))
2242 free (callee);
2243 else if (!is_call
2244 && !callee->fun->is_func
2245 && callee->fun->stack == 0)
2246 {
2247 /* This is either a tail call or a branch from one part of
2248 the function to another, ie. hot/cold section. If the
2249 destination has been called by some other function then
2250 it is a separate function. We also assume that functions
2251 are not split across input files. */
2252 if (sec->owner != sym_sec->owner)
2253 {
2254 callee->fun->start = NULL;
2255 callee->fun->is_func = TRUE;
2256 }
2257 else if (callee->fun->start == NULL)
2258 callee->fun->start = caller;
2259 else
2260 {
2261 struct function_info *callee_start;
2262 struct function_info *caller_start;
2263 callee_start = callee->fun;
2264 while (callee_start->start)
2265 callee_start = callee_start->start;
2266 caller_start = caller;
2267 while (caller_start->start)
2268 caller_start = caller_start->start;
2269 if (caller_start != callee_start)
2270 {
2271 callee->fun->start = NULL;
2272 callee->fun->is_func = TRUE;
2273 }
2274 }
2275 }
2276 }
2277
2278 return TRUE;
2279 }
2280
2281 /* Handle something like .init or .fini, which has a piece of a function.
2282 These sections are pasted together to form a single function. */
2283
2284 static bfd_boolean
2285 pasted_function (asection *sec, struct bfd_link_info *info)
2286 {
2287 struct bfd_link_order *l;
2288 struct _spu_elf_section_data *sec_data;
2289 struct spu_elf_stack_info *sinfo;
2290 Elf_Internal_Sym *fake;
2291 struct function_info *fun, *fun_start;
2292
2293 fake = bfd_zmalloc (sizeof (*fake));
2294 if (fake == NULL)
2295 return FALSE;
2296 fake->st_value = 0;
2297 fake->st_size = sec->size;
2298 fake->st_shndx
2299 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2300 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2301 if (!fun)
2302 return FALSE;
2303
2304 /* Find a function immediately preceding this section. */
2305 fun_start = NULL;
2306 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2307 {
2308 if (l->u.indirect.section == sec)
2309 {
2310 if (fun_start != NULL)
2311 {
2312 struct call_info *callee = bfd_malloc (sizeof *callee);
2313 if (callee == NULL)
2314 return FALSE;
2315
2316 fun->start = fun_start;
2317 callee->fun = fun;
2318 callee->is_tail = TRUE;
2319 callee->is_pasted = TRUE;
2320 callee->count = 0;
2321 if (!insert_callee (fun_start, callee))
2322 free (callee);
2323 return TRUE;
2324 }
2325 break;
2326 }
2327 if (l->type == bfd_indirect_link_order
2328 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2329 && (sinfo = sec_data->u.i.stack_info) != NULL
2330 && sinfo->num_fun != 0)
2331 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2332 }
2333
2334 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2335 return FALSE;
2336 }
2337
2338 /* Map address ranges in code sections to functions. */
2339
2340 static bfd_boolean
2341 discover_functions (struct bfd_link_info *info)
2342 {
2343 bfd *ibfd;
2344 int bfd_idx;
2345 Elf_Internal_Sym ***psym_arr;
2346 asection ***sec_arr;
2347 bfd_boolean gaps = FALSE;
2348
2349 bfd_idx = 0;
2350 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2351 bfd_idx++;
2352
2353 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2354 if (psym_arr == NULL)
2355 return FALSE;
2356 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2357 if (sec_arr == NULL)
2358 return FALSE;
2359
2360
2361 for (ibfd = info->input_bfds, bfd_idx = 0;
2362 ibfd != NULL;
2363 ibfd = ibfd->link_next, bfd_idx++)
2364 {
2365 extern const bfd_target bfd_elf32_spu_vec;
2366 Elf_Internal_Shdr *symtab_hdr;
2367 asection *sec;
2368 size_t symcount;
2369 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2370 asection **psecs, **p;
2371
2372 if (ibfd->xvec != &bfd_elf32_spu_vec)
2373 continue;
2374
2375 /* Read all the symbols. */
2376 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2377 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2378 if (symcount == 0)
2379 {
2380 if (!gaps)
2381 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2382 if (interesting_section (sec, info->output_bfd))
2383 {
2384 gaps = TRUE;
2385 break;
2386 }
2387 continue;
2388 }
2389
2390 if (symtab_hdr->contents != NULL)
2391 {
2392 /* Don't use cached symbols since the generic ELF linker
2393 code only reads local symbols, and we need globals too. */
2394 free (symtab_hdr->contents);
2395 symtab_hdr->contents = NULL;
2396 }
2397 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2398 NULL, NULL, NULL);
2399 symtab_hdr->contents = (void *) syms;
2400 if (syms == NULL)
2401 return FALSE;
2402
2403 /* Select defined function symbols that are going to be output. */
2404 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2405 if (psyms == NULL)
2406 return FALSE;
2407 psym_arr[bfd_idx] = psyms;
2408 psecs = bfd_malloc (symcount * sizeof (*psecs));
2409 if (psecs == NULL)
2410 return FALSE;
2411 sec_arr[bfd_idx] = psecs;
2412 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2413 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2414 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2415 {
2416 asection *s;
2417
2418 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2419 if (s != NULL && interesting_section (s, info->output_bfd))
2420 *psy++ = sy;
2421 }
2422 symcount = psy - psyms;
2423 *psy = NULL;
2424
2425 /* Sort them by section and offset within section. */
2426 sort_syms_syms = syms;
2427 sort_syms_psecs = psecs;
2428 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2429
2430 /* Now inspect the function symbols. */
2431 for (psy = psyms; psy < psyms + symcount; )
2432 {
2433 asection *s = psecs[*psy - syms];
2434 Elf_Internal_Sym **psy2;
2435
2436 for (psy2 = psy; ++psy2 < psyms + symcount; )
2437 if (psecs[*psy2 - syms] != s)
2438 break;
2439
2440 if (!alloc_stack_info (s, psy2 - psy))
2441 return FALSE;
2442 psy = psy2;
2443 }
2444
2445 /* First install info about properly typed and sized functions.
2446 In an ideal world this will cover all code sections, except
2447 when partitioning functions into hot and cold sections,
2448 and the horrible pasted together .init and .fini functions. */
2449 for (psy = psyms; psy < psyms + symcount; ++psy)
2450 {
2451 sy = *psy;
2452 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2453 {
2454 asection *s = psecs[sy - syms];
2455 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2456 return FALSE;
2457 }
2458 }
2459
2460 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2461 if (interesting_section (sec, info->output_bfd))
2462 gaps |= check_function_ranges (sec, info);
2463 }
2464
2465 if (gaps)
2466 {
2467 /* See if we can discover more function symbols by looking at
2468 relocations. */
2469 for (ibfd = info->input_bfds, bfd_idx = 0;
2470 ibfd != NULL;
2471 ibfd = ibfd->link_next, bfd_idx++)
2472 {
2473 asection *sec;
2474
2475 if (psym_arr[bfd_idx] == NULL)
2476 continue;
2477
2478 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2479 if (!mark_functions_via_relocs (sec, info, FALSE))
2480 return FALSE;
2481 }
2482
2483 for (ibfd = info->input_bfds, bfd_idx = 0;
2484 ibfd != NULL;
2485 ibfd = ibfd->link_next, bfd_idx++)
2486 {
2487 Elf_Internal_Shdr *symtab_hdr;
2488 asection *sec;
2489 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2490 asection **psecs;
2491
2492 if ((psyms = psym_arr[bfd_idx]) == NULL)
2493 continue;
2494
2495 psecs = sec_arr[bfd_idx];
2496
2497 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2498 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2499
2500 gaps = FALSE;
2501 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2502 if (interesting_section (sec, info->output_bfd))
2503 gaps |= check_function_ranges (sec, info);
2504 if (!gaps)
2505 continue;
2506
2507 /* Finally, install all globals. */
2508 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2509 {
2510 asection *s;
2511
2512 s = psecs[sy - syms];
2513
2514 /* Global syms might be improperly typed functions. */
2515 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2516 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2517 {
2518 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2519 return FALSE;
2520 }
2521 }
2522 }
2523
2524 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2525 {
2526 extern const bfd_target bfd_elf32_spu_vec;
2527 asection *sec;
2528
2529 if (ibfd->xvec != &bfd_elf32_spu_vec)
2530 continue;
2531
2532 /* Some of the symbols we've installed as marking the
2533 beginning of functions may have a size of zero. Extend
2534 the range of such functions to the beginning of the
2535 next symbol of interest. */
2536 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2537 if (interesting_section (sec, info->output_bfd))
2538 {
2539 struct _spu_elf_section_data *sec_data;
2540 struct spu_elf_stack_info *sinfo;
2541
2542 sec_data = spu_elf_section_data (sec);
2543 sinfo = sec_data->u.i.stack_info;
2544 if (sinfo != NULL)
2545 {
2546 int fun_idx;
2547 bfd_vma hi = sec->size;
2548
2549 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2550 {
2551 sinfo->fun[fun_idx].hi = hi;
2552 hi = sinfo->fun[fun_idx].lo;
2553 }
2554 }
2555 /* No symbols in this section. Must be .init or .fini
2556 or something similar. */
2557 else if (!pasted_function (sec, info))
2558 return FALSE;
2559 }
2560 }
2561 }
2562
2563 for (ibfd = info->input_bfds, bfd_idx = 0;
2564 ibfd != NULL;
2565 ibfd = ibfd->link_next, bfd_idx++)
2566 {
2567 if (psym_arr[bfd_idx] == NULL)
2568 continue;
2569
2570 free (psym_arr[bfd_idx]);
2571 free (sec_arr[bfd_idx]);
2572 }
2573
2574 free (psym_arr);
2575 free (sec_arr);
2576
2577 return TRUE;
2578 }
2579
2580 /* Iterate over all function_info we have collected, calling DOIT on
2581 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2582 if ROOT_ONLY. */
2583
2584 static bfd_boolean
2585 for_each_node (bfd_boolean (*doit) (struct function_info *,
2586 struct bfd_link_info *,
2587 void *),
2588 struct bfd_link_info *info,
2589 void *param,
2590 int root_only)
2591 {
2592 bfd *ibfd;
2593
2594 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2595 {
2596 extern const bfd_target bfd_elf32_spu_vec;
2597 asection *sec;
2598
2599 if (ibfd->xvec != &bfd_elf32_spu_vec)
2600 continue;
2601
2602 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2603 {
2604 struct _spu_elf_section_data *sec_data;
2605 struct spu_elf_stack_info *sinfo;
2606
2607 if ((sec_data = spu_elf_section_data (sec)) != NULL
2608 && (sinfo = sec_data->u.i.stack_info) != NULL)
2609 {
2610 int i;
2611 for (i = 0; i < sinfo->num_fun; ++i)
2612 if (!root_only || !sinfo->fun[i].non_root)
2613 if (!doit (&sinfo->fun[i], info, param))
2614 return FALSE;
2615 }
2616 }
2617 }
2618 return TRUE;
2619 }
2620
2621 /* Transfer call info attached to struct function_info entries for
2622 all of a given function's sections to the first entry. */
2623
2624 static bfd_boolean
2625 transfer_calls (struct function_info *fun,
2626 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2627 void *param ATTRIBUTE_UNUSED)
2628 {
2629 struct function_info *start = fun->start;
2630
2631 if (start != NULL)
2632 {
2633 struct call_info *call, *call_next;
2634
2635 while (start->start != NULL)
2636 start = start->start;
2637 for (call = fun->call_list; call != NULL; call = call_next)
2638 {
2639 call_next = call->next;
2640 if (!insert_callee (start, call))
2641 free (call);
2642 }
2643 fun->call_list = NULL;
2644 }
2645 return TRUE;
2646 }
2647
2648 /* Mark nodes in the call graph that are called by some other node. */
2649
2650 static bfd_boolean
2651 mark_non_root (struct function_info *fun,
2652 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2653 void *param ATTRIBUTE_UNUSED)
2654 {
2655 struct call_info *call;
2656
2657 if (fun->visit1)
2658 return TRUE;
2659 fun->visit1 = TRUE;
2660 for (call = fun->call_list; call; call = call->next)
2661 {
2662 call->fun->non_root = TRUE;
2663 mark_non_root (call->fun, 0, 0);
2664 }
2665 return TRUE;
2666 }
2667
2668 /* Remove cycles from the call graph. Set depth of nodes. */
2669
2670 static bfd_boolean
2671 remove_cycles (struct function_info *fun,
2672 struct bfd_link_info *info,
2673 void *param)
2674 {
2675 struct call_info **callp, *call;
2676 unsigned int depth = *(unsigned int *) param;
2677 unsigned int max_depth = depth;
2678
2679 fun->depth = depth;
2680 fun->visit2 = TRUE;
2681 fun->marking = TRUE;
2682
2683 callp = &fun->call_list;
2684 while ((call = *callp) != NULL)
2685 {
2686 if (!call->fun->visit2)
2687 {
2688 call->max_depth = depth + !call->is_pasted;
2689 if (!remove_cycles (call->fun, info, &call->max_depth))
2690 return FALSE;
2691 if (max_depth < call->max_depth)
2692 max_depth = call->max_depth;
2693 }
2694 else if (call->fun->marking)
2695 {
2696 if (!spu_hash_table (info)->auto_overlay)
2697 {
2698 const char *f1 = func_name (fun);
2699 const char *f2 = func_name (call->fun);
2700
2701 info->callbacks->info (_("Stack analysis will ignore the call "
2702 "from %s to %s\n"),
2703 f1, f2);
2704 }
2705 *callp = call->next;
2706 free (call);
2707 continue;
2708 }
2709 callp = &call->next;
2710 }
2711 fun->marking = FALSE;
2712 *(unsigned int *) param = max_depth;
2713 return TRUE;
2714 }
2715
2716 /* Populate call_list for each function. */
2717
2718 static bfd_boolean
2719 build_call_tree (struct bfd_link_info *info)
2720 {
2721 bfd *ibfd;
2722 unsigned int depth;
2723
2724 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2725 {
2726 extern const bfd_target bfd_elf32_spu_vec;
2727 asection *sec;
2728
2729 if (ibfd->xvec != &bfd_elf32_spu_vec)
2730 continue;
2731
2732 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2733 if (!mark_functions_via_relocs (sec, info, TRUE))
2734 return FALSE;
2735 }
2736
2737 /* Transfer call info from hot/cold section part of function
2738 to main entry. */
2739 if (!spu_hash_table (info)->auto_overlay
2740 && !for_each_node (transfer_calls, info, 0, FALSE))
2741 return FALSE;
2742
2743 /* Find the call graph root(s). */
2744 if (!for_each_node (mark_non_root, info, 0, FALSE))
2745 return FALSE;
2746
2747 /* Remove cycles from the call graph. We start from the root node(s)
2748 so that we break cycles in a reasonable place. */
2749 depth = 0;
2750 return for_each_node (remove_cycles, info, &depth, TRUE);
2751 }
2752
2753 /* qsort predicate to sort calls by max_depth then count. */
2754
2755 static int
2756 sort_calls (const void *a, const void *b)
2757 {
2758 struct call_info *const *c1 = a;
2759 struct call_info *const *c2 = b;
2760 int delta;
2761
2762 delta = (*c2)->max_depth - (*c1)->max_depth;
2763 if (delta != 0)
2764 return delta;
2765
2766 delta = (*c2)->count - (*c1)->count;
2767 if (delta != 0)
2768 return delta;
2769
2770 return c1 - c2;
2771 }
2772
2773 struct _mos_param {
2774 unsigned int max_overlay_size;
2775 };
2776
2777 /* Set linker_mark and gc_mark on any sections that we will put in
2778 overlays. These flags are used by the generic ELF linker, but we
2779 won't be continuing on to bfd_elf_final_link so it is OK to use
2780 them. linker_mark is clear before we get here. Set segment_mark
2781 on sections that are part of a pasted function (excluding the last
2782 section).
2783
2784 Set up function rodata section if --overlay-rodata. We don't
2785 currently include merged string constant rodata sections since
2786
2787 Sort the call graph so that the deepest nodes will be visited
2788 first. */
2789
2790 static bfd_boolean
2791 mark_overlay_section (struct function_info *fun,
2792 struct bfd_link_info *info,
2793 void *param)
2794 {
2795 struct call_info *call;
2796 unsigned int count;
2797 struct _mos_param *mos_param = param;
2798
2799 if (fun->visit4)
2800 return TRUE;
2801
2802 fun->visit4 = TRUE;
2803 if (!fun->sec->linker_mark)
2804 {
2805 fun->sec->linker_mark = 1;
2806 fun->sec->gc_mark = 1;
2807 fun->sec->segment_mark = 0;
2808 /* Ensure SEC_CODE is set on this text section (it ought to
2809 be!), and SEC_CODE is clear on rodata sections. We use
2810 this flag to differentiate the two overlay section types. */
2811 fun->sec->flags |= SEC_CODE;
2812 if (spu_hash_table (info)->auto_overlay & OVERLAY_RODATA)
2813 {
2814 char *name = NULL;
2815 unsigned int size;
2816
2817 /* Find the rodata section corresponding to this function's
2818 text section. */
2819 if (strcmp (fun->sec->name, ".text") == 0)
2820 {
2821 name = bfd_malloc (sizeof (".rodata"));
2822 if (name == NULL)
2823 return FALSE;
2824 memcpy (name, ".rodata", sizeof (".rodata"));
2825 }
2826 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2827 {
2828 size_t len = strlen (fun->sec->name);
2829 name = bfd_malloc (len + 3);
2830 if (name == NULL)
2831 return FALSE;
2832 memcpy (name, ".rodata", sizeof (".rodata"));
2833 memcpy (name + 7, fun->sec->name + 5, len - 4);
2834 }
2835 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2836 {
2837 size_t len = strlen (fun->sec->name) + 1;
2838 name = bfd_malloc (len);
2839 if (name == NULL)
2840 return FALSE;
2841 memcpy (name, fun->sec->name, len);
2842 name[14] = 'r';
2843 }
2844
2845 if (name != NULL)
2846 {
2847 asection *rodata = NULL;
2848 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2849 if (group_sec == NULL)
2850 rodata = bfd_get_section_by_name (fun->sec->owner, name);
2851 else
2852 while (group_sec != NULL && group_sec != fun->sec)
2853 {
2854 if (strcmp (group_sec->name, name) == 0)
2855 {
2856 rodata = group_sec;
2857 break;
2858 }
2859 group_sec = elf_section_data (group_sec)->next_in_group;
2860 }
2861 fun->rodata = rodata;
2862 if (fun->rodata)
2863 {
2864 fun->rodata->linker_mark = 1;
2865 fun->rodata->gc_mark = 1;
2866 fun->rodata->flags &= ~SEC_CODE;
2867 }
2868 free (name);
2869 }
2870 size = fun->sec->size;
2871 if (fun->rodata)
2872 size += fun->rodata->size;
2873 if (mos_param->max_overlay_size < size)
2874 mos_param->max_overlay_size = size;
2875 }
2876 }
2877
2878 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2879 count += 1;
2880
2881 if (count > 1)
2882 {
2883 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2884 if (calls == NULL)
2885 return FALSE;
2886
2887 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2888 calls[count++] = call;
2889
2890 qsort (calls, count, sizeof (*calls), sort_calls);
2891
2892 fun->call_list = NULL;
2893 while (count != 0)
2894 {
2895 --count;
2896 calls[count]->next = fun->call_list;
2897 fun->call_list = calls[count];
2898 }
2899 free (calls);
2900 }
2901
2902 for (call = fun->call_list; call != NULL; call = call->next)
2903 {
2904 if (call->is_pasted)
2905 {
2906 /* There can only be one is_pasted call per function_info. */
2907 BFD_ASSERT (!fun->sec->segment_mark);
2908 fun->sec->segment_mark = 1;
2909 }
2910 if (!mark_overlay_section (call->fun, info, param))
2911 return FALSE;
2912 }
2913
2914 /* Don't put entry code into an overlay. The overlay manager needs
2915 a stack! */
2916 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2917 == info->output_bfd->start_address)
2918 {
2919 fun->sec->linker_mark = 0;
2920 if (fun->rodata != NULL)
2921 fun->rodata->linker_mark = 0;
2922 }
2923 return TRUE;
2924 }
2925
2926 struct _uos_param {
2927 asection *exclude_input_section;
2928 asection *exclude_output_section;
2929 unsigned long clearing;
2930 };
2931
2932 /* Undo some of mark_overlay_section's work. */
2933
2934 static bfd_boolean
2935 unmark_overlay_section (struct function_info *fun,
2936 struct bfd_link_info *info,
2937 void *param)
2938 {
2939 struct call_info *call;
2940 struct _uos_param *uos_param = param;
2941 unsigned int excluded = 0;
2942
2943 if (fun->visit5)
2944 return TRUE;
2945
2946 fun->visit5 = TRUE;
2947
2948 excluded = 0;
2949 if (fun->sec == uos_param->exclude_input_section
2950 || fun->sec->output_section == uos_param->exclude_output_section)
2951 excluded = 1;
2952
2953 uos_param->clearing += excluded;
2954
2955 if (uos_param->clearing)
2956 {
2957 fun->sec->linker_mark = 0;
2958 if (fun->rodata)
2959 fun->rodata->linker_mark = 0;
2960 }
2961
2962 for (call = fun->call_list; call != NULL; call = call->next)
2963 if (!unmark_overlay_section (call->fun, info, param))
2964 return FALSE;
2965
2966 uos_param->clearing -= excluded;
2967 return TRUE;
2968 }
2969
2970 struct _cl_param {
2971 unsigned int lib_size;
2972 asection **lib_sections;
2973 };
2974
2975 /* Add sections we have marked as belonging to overlays to an array
2976 for consideration as non-overlay sections. The array consist of
2977 pairs of sections, (text,rodata), for functions in the call graph. */
2978
2979 static bfd_boolean
2980 collect_lib_sections (struct function_info *fun,
2981 struct bfd_link_info *info,
2982 void *param)
2983 {
2984 struct _cl_param *lib_param = param;
2985 struct call_info *call;
2986 unsigned int size;
2987
2988 if (fun->visit6)
2989 return TRUE;
2990
2991 fun->visit6 = TRUE;
2992 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
2993 return TRUE;
2994
2995 size = fun->sec->size;
2996 if (fun->rodata)
2997 size += fun->rodata->size;
2998 if (size > lib_param->lib_size)
2999 return TRUE;
3000
3001 *lib_param->lib_sections++ = fun->sec;
3002 fun->sec->gc_mark = 0;
3003 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3004 {
3005 *lib_param->lib_sections++ = fun->rodata;
3006 fun->rodata->gc_mark = 0;
3007 }
3008 else
3009 *lib_param->lib_sections++ = NULL;
3010
3011 for (call = fun->call_list; call != NULL; call = call->next)
3012 collect_lib_sections (call->fun, info, param);
3013
3014 return TRUE;
3015 }
3016
3017 /* qsort predicate to sort sections by call count. */
3018
3019 static int
3020 sort_lib (const void *a, const void *b)
3021 {
3022 asection *const *s1 = a;
3023 asection *const *s2 = b;
3024 struct _spu_elf_section_data *sec_data;
3025 struct spu_elf_stack_info *sinfo;
3026 int delta;
3027
3028 delta = 0;
3029 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3030 && (sinfo = sec_data->u.i.stack_info) != NULL)
3031 {
3032 int i;
3033 for (i = 0; i < sinfo->num_fun; ++i)
3034 delta -= sinfo->fun[i].call_count;
3035 }
3036
3037 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3038 && (sinfo = sec_data->u.i.stack_info) != NULL)
3039 {
3040 int i;
3041 for (i = 0; i < sinfo->num_fun; ++i)
3042 delta += sinfo->fun[i].call_count;
3043 }
3044
3045 if (delta != 0)
3046 return delta;
3047
3048 return s1 - s2;
3049 }
3050
3051 /* Remove some sections from those marked to be in overlays. Choose
3052 those that are called from many places, likely library functions. */
3053
3054 static unsigned int
3055 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3056 {
3057 bfd *ibfd;
3058 asection **lib_sections;
3059 unsigned int i, lib_count;
3060 struct _cl_param collect_lib_param;
3061 struct function_info dummy_caller;
3062
3063 memset (&dummy_caller, 0, sizeof (dummy_caller));
3064 lib_count = 0;
3065 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3066 {
3067 extern const bfd_target bfd_elf32_spu_vec;
3068 asection *sec;
3069
3070 if (ibfd->xvec != &bfd_elf32_spu_vec)
3071 continue;
3072
3073 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3074 if (sec->linker_mark
3075 && sec->size < lib_size
3076 && (sec->flags & SEC_CODE) != 0)
3077 lib_count += 1;
3078 }
3079 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3080 if (lib_sections == NULL)
3081 return (unsigned int) -1;
3082 collect_lib_param.lib_size = lib_size;
3083 collect_lib_param.lib_sections = lib_sections;
3084 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3085 TRUE))
3086 return (unsigned int) -1;
3087 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3088
3089 /* Sort sections so that those with the most calls are first. */
3090 if (lib_count > 1)
3091 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3092
3093 for (i = 0; i < lib_count; i++)
3094 {
3095 unsigned int tmp, stub_size;
3096 asection *sec;
3097 struct _spu_elf_section_data *sec_data;
3098 struct spu_elf_stack_info *sinfo;
3099
3100 sec = lib_sections[2 * i];
3101 /* If this section is OK, its size must be less than lib_size. */
3102 tmp = sec->size;
3103 /* If it has a rodata section, then add that too. */
3104 if (lib_sections[2 * i + 1])
3105 tmp += lib_sections[2 * i + 1]->size;
3106 /* Add any new overlay call stubs needed by the section. */
3107 stub_size = 0;
3108 if (tmp < lib_size
3109 && (sec_data = spu_elf_section_data (sec)) != NULL
3110 && (sinfo = sec_data->u.i.stack_info) != NULL)
3111 {
3112 int k;
3113 struct call_info *call;
3114
3115 for (k = 0; k < sinfo->num_fun; ++k)
3116 for (call = sinfo->fun[k].call_list; call; call = call->next)
3117 if (call->fun->sec->linker_mark)
3118 {
3119 struct call_info *p;
3120 for (p = dummy_caller.call_list; p; p = p->next)
3121 if (p->fun == call->fun)
3122 break;
3123 if (!p)
3124 stub_size += OVL_STUB_SIZE;
3125 }
3126 }
3127 if (tmp + stub_size < lib_size)
3128 {
3129 struct call_info **pp, *p;
3130
3131 /* This section fits. Mark it as non-overlay. */
3132 lib_sections[2 * i]->linker_mark = 0;
3133 if (lib_sections[2 * i + 1])
3134 lib_sections[2 * i + 1]->linker_mark = 0;
3135 lib_size -= tmp + stub_size;
3136 /* Call stubs to the section we just added are no longer
3137 needed. */
3138 pp = &dummy_caller.call_list;
3139 while ((p = *pp) != NULL)
3140 if (!p->fun->sec->linker_mark)
3141 {
3142 lib_size += OVL_STUB_SIZE;
3143 *pp = p->next;
3144 free (p);
3145 }
3146 else
3147 pp = &p->next;
3148 /* Add new call stubs to dummy_caller. */
3149 if ((sec_data = spu_elf_section_data (sec)) != NULL
3150 && (sinfo = sec_data->u.i.stack_info) != NULL)
3151 {
3152 int k;
3153 struct call_info *call;
3154
3155 for (k = 0; k < sinfo->num_fun; ++k)
3156 for (call = sinfo->fun[k].call_list;
3157 call;
3158 call = call->next)
3159 if (call->fun->sec->linker_mark)
3160 {
3161 struct call_info *callee;
3162 callee = bfd_malloc (sizeof (*callee));
3163 if (callee == NULL)
3164 return (unsigned int) -1;
3165 *callee = *call;
3166 if (!insert_callee (&dummy_caller, callee))
3167 free (callee);
3168 }
3169 }
3170 }
3171 }
3172 while (dummy_caller.call_list != NULL)
3173 {
3174 struct call_info *call = dummy_caller.call_list;
3175 dummy_caller.call_list = call->next;
3176 free (call);
3177 }
3178 for (i = 0; i < 2 * lib_count; i++)
3179 if (lib_sections[i])
3180 lib_sections[i]->gc_mark = 1;
3181 free (lib_sections);
3182 return lib_size;
3183 }
3184
3185 /* Build an array of overlay sections. The deepest node's section is
3186 added first, then its parent node's section, then everything called
3187 from the parent section. The idea being to group sections to
3188 minimise calls between different overlays. */
3189
3190 static bfd_boolean
3191 collect_overlays (struct function_info *fun,
3192 struct bfd_link_info *info,
3193 void *param)
3194 {
3195 struct call_info *call;
3196 bfd_boolean added_fun;
3197 asection ***ovly_sections = param;
3198
3199 if (fun->visit7)
3200 return TRUE;
3201
3202 fun->visit7 = TRUE;
3203 for (call = fun->call_list; call != NULL; call = call->next)
3204 if (!call->is_pasted)
3205 {
3206 if (!collect_overlays (call->fun, info, ovly_sections))
3207 return FALSE;
3208 break;
3209 }
3210
3211 added_fun = FALSE;
3212 if (fun->sec->linker_mark && fun->sec->gc_mark)
3213 {
3214 fun->sec->gc_mark = 0;
3215 *(*ovly_sections)++ = fun->sec;
3216 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3217 {
3218 fun->rodata->gc_mark = 0;
3219 *(*ovly_sections)++ = fun->rodata;
3220 }
3221 else
3222 *(*ovly_sections)++ = NULL;
3223 added_fun = TRUE;
3224
3225 /* Pasted sections must stay with the first section. We don't
3226 put pasted sections in the array, just the first section.
3227 Mark subsequent sections as already considered. */
3228 if (fun->sec->segment_mark)
3229 {
3230 struct function_info *call_fun = fun;
3231 do
3232 {
3233 for (call = call_fun->call_list; call != NULL; call = call->next)
3234 if (call->is_pasted)
3235 {
3236 call_fun = call->fun;
3237 call_fun->sec->gc_mark = 0;
3238 if (call_fun->rodata)
3239 call_fun->rodata->gc_mark = 0;
3240 break;
3241 }
3242 if (call == NULL)
3243 abort ();
3244 }
3245 while (call_fun->sec->segment_mark);
3246 }
3247 }
3248
3249 for (call = fun->call_list; call != NULL; call = call->next)
3250 if (!collect_overlays (call->fun, info, ovly_sections))
3251 return FALSE;
3252
3253 if (added_fun)
3254 {
3255 struct _spu_elf_section_data *sec_data;
3256 struct spu_elf_stack_info *sinfo;
3257
3258 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3259 && (sinfo = sec_data->u.i.stack_info) != NULL)
3260 {
3261 int i;
3262 for (i = 0; i < sinfo->num_fun; ++i)
3263 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3264 return FALSE;
3265 }
3266 }
3267
3268 return TRUE;
3269 }
3270
3271 struct _sum_stack_param {
3272 size_t cum_stack;
3273 size_t overall_stack;
3274 bfd_boolean emit_stack_syms;
3275 };
3276
3277 /* Descend the call graph for FUN, accumulating total stack required. */
3278
3279 static bfd_boolean
3280 sum_stack (struct function_info *fun,
3281 struct bfd_link_info *info,
3282 void *param)
3283 {
3284 struct call_info *call;
3285 struct function_info *max;
3286 size_t stack, cum_stack;
3287 const char *f1;
3288 bfd_boolean has_call;
3289 struct _sum_stack_param *sum_stack_param = param;
3290 struct spu_link_hash_table *htab;
3291
3292 cum_stack = fun->stack;
3293 sum_stack_param->cum_stack = cum_stack;
3294 if (fun->visit3)
3295 return TRUE;
3296
3297 has_call = FALSE;
3298 max = NULL;
3299 for (call = fun->call_list; call; call = call->next)
3300 {
3301 if (!call->is_pasted)
3302 has_call = TRUE;
3303 if (!sum_stack (call->fun, info, sum_stack_param))
3304 return FALSE;
3305 stack = sum_stack_param->cum_stack;
3306 /* Include caller stack for normal calls, don't do so for
3307 tail calls. fun->stack here is local stack usage for
3308 this function. */
3309 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3310 stack += fun->stack;
3311 if (cum_stack < stack)
3312 {
3313 cum_stack = stack;
3314 max = call->fun;
3315 }
3316 }
3317
3318 sum_stack_param->cum_stack = cum_stack;
3319 stack = fun->stack;
3320 /* Now fun->stack holds cumulative stack. */
3321 fun->stack = cum_stack;
3322 fun->visit3 = TRUE;
3323
3324 if (!fun->non_root
3325 && sum_stack_param->overall_stack < cum_stack)
3326 sum_stack_param->overall_stack = cum_stack;
3327
3328 htab = spu_hash_table (info);
3329 if (htab->auto_overlay)
3330 return TRUE;
3331
3332 f1 = func_name (fun);
3333 if (!fun->non_root)
3334 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3335 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3336 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3337
3338 if (has_call)
3339 {
3340 info->callbacks->minfo (_(" calls:\n"));
3341 for (call = fun->call_list; call; call = call->next)
3342 if (!call->is_pasted)
3343 {
3344 const char *f2 = func_name (call->fun);
3345 const char *ann1 = call->fun == max ? "*" : " ";
3346 const char *ann2 = call->is_tail ? "t" : " ";
3347
3348 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3349 }
3350 }
3351
3352 if (sum_stack_param->emit_stack_syms)
3353 {
3354 char *name = bfd_malloc (18 + strlen (f1));
3355 struct elf_link_hash_entry *h;
3356
3357 if (name == NULL)
3358 return FALSE;
3359
3360 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3361 sprintf (name, "__stack_%s", f1);
3362 else
3363 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3364
3365 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3366 free (name);
3367 if (h != NULL
3368 && (h->root.type == bfd_link_hash_new
3369 || h->root.type == bfd_link_hash_undefined
3370 || h->root.type == bfd_link_hash_undefweak))
3371 {
3372 h->root.type = bfd_link_hash_defined;
3373 h->root.u.def.section = bfd_abs_section_ptr;
3374 h->root.u.def.value = cum_stack;
3375 h->size = 0;
3376 h->type = 0;
3377 h->ref_regular = 1;
3378 h->def_regular = 1;
3379 h->ref_regular_nonweak = 1;
3380 h->forced_local = 1;
3381 h->non_elf = 0;
3382 }
3383 }
3384
3385 return TRUE;
3386 }
3387
3388 /* SEC is part of a pasted function. Return the call_info for the
3389 next section of this function. */
3390
3391 static struct call_info *
3392 find_pasted_call (asection *sec)
3393 {
3394 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3395 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3396 struct call_info *call;
3397 int k;
3398
3399 for (k = 0; k < sinfo->num_fun; ++k)
3400 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3401 if (call->is_pasted)
3402 return call;
3403 abort ();
3404 return 0;
3405 }
3406
3407 /* qsort predicate to sort bfds by file name. */
3408
3409 static int
3410 sort_bfds (const void *a, const void *b)
3411 {
3412 bfd *const *abfd1 = a;
3413 bfd *const *abfd2 = b;
3414
3415 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3416 }
3417
3418 /* Handle --auto-overlay. */
3419
3420 static void spu_elf_auto_overlay (struct bfd_link_info *, void (*) (void))
3421 ATTRIBUTE_NORETURN;
3422
3423 static void
3424 spu_elf_auto_overlay (struct bfd_link_info *info,
3425 void (*spu_elf_load_ovl_mgr) (void))
3426 {
3427 bfd *ibfd;
3428 bfd **bfd_arr;
3429 struct elf_segment_map *m;
3430 unsigned int fixed_size, lo, hi;
3431 struct spu_link_hash_table *htab;
3432 unsigned int base, i, count, bfd_count;
3433 int ovlynum;
3434 asection **ovly_sections, **ovly_p;
3435 FILE *script;
3436 unsigned int total_overlay_size, overlay_size;
3437 struct elf_link_hash_entry *h;
3438 struct _mos_param mos_param;
3439 struct _uos_param uos_param;
3440 struct function_info dummy_caller;
3441
3442 /* Find the extents of our loadable image. */
3443 lo = (unsigned int) -1;
3444 hi = 0;
3445 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3446 if (m->p_type == PT_LOAD)
3447 for (i = 0; i < m->count; i++)
3448 if (m->sections[i]->size != 0)
3449 {
3450 if (m->sections[i]->vma < lo)
3451 lo = m->sections[i]->vma;
3452 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3453 hi = m->sections[i]->vma + m->sections[i]->size - 1;
3454 }
3455 fixed_size = hi + 1 - lo;
3456
3457 if (!discover_functions (info))
3458 goto err_exit;
3459
3460 if (!build_call_tree (info))
3461 goto err_exit;
3462
3463 uos_param.exclude_input_section = 0;
3464 uos_param.exclude_output_section
3465 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3466
3467 htab = spu_hash_table (info);
3468 h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3469 FALSE, FALSE, FALSE);
3470 if (h != NULL
3471 && (h->root.type == bfd_link_hash_defined
3472 || h->root.type == bfd_link_hash_defweak)
3473 && h->def_regular)
3474 {
3475 /* We have a user supplied overlay manager. */
3476 uos_param.exclude_input_section = h->root.u.def.section;
3477 }
3478 else
3479 {
3480 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3481 builtin version to .text, and will adjust .text size. */
3482 asection *text = bfd_get_section_by_name (info->output_bfd, ".text");
3483 if (text != NULL)
3484 fixed_size -= text->size;
3485 spu_elf_load_ovl_mgr ();
3486 text = bfd_get_section_by_name (info->output_bfd, ".text");
3487 if (text != NULL)
3488 fixed_size += text->size;
3489 }
3490
3491 /* Mark overlay sections, and find max overlay section size. */
3492 mos_param.max_overlay_size = 0;
3493 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3494 goto err_exit;
3495
3496 /* We can't put the overlay manager or interrupt routines in
3497 overlays. */
3498 uos_param.clearing = 0;
3499 if ((uos_param.exclude_input_section
3500 || uos_param.exclude_output_section)
3501 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3502 goto err_exit;
3503
3504 bfd_count = 0;
3505 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3506 ++bfd_count;
3507 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3508 if (bfd_arr == NULL)
3509 goto err_exit;
3510
3511 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3512 count = 0;
3513 bfd_count = 0;
3514 total_overlay_size = 0;
3515 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3516 {
3517 extern const bfd_target bfd_elf32_spu_vec;
3518 asection *sec;
3519 unsigned int old_count;
3520
3521 if (ibfd->xvec != &bfd_elf32_spu_vec)
3522 continue;
3523
3524 old_count = count;
3525 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3526 if (sec->linker_mark)
3527 {
3528 if ((sec->flags & SEC_CODE) != 0)
3529 count += 1;
3530 fixed_size -= sec->size;
3531 total_overlay_size += sec->size;
3532 }
3533 if (count != old_count)
3534 bfd_arr[bfd_count++] = ibfd;
3535 }
3536
3537 /* Since the overlay link script selects sections by file name and
3538 section name, ensure that file names are unique. */
3539 if (bfd_count > 1)
3540 {
3541 bfd_boolean ok = TRUE;
3542
3543 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3544 for (i = 1; i < bfd_count; ++i)
3545 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3546 {
3547 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
3548 {
3549 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
3550 info->callbacks->einfo (_("%s duplicated in %s\n"),
3551 bfd_arr[i]->filename,
3552 bfd_arr[i]->my_archive->filename);
3553 else
3554 info->callbacks->einfo (_("%s duplicated\n"),
3555 bfd_arr[i]->filename);
3556 ok = FALSE;
3557 }
3558 }
3559 if (!ok)
3560 {
3561 info->callbacks->einfo (_("sorry, no support for duplicate "
3562 "object files in auto-overlay script\n"));
3563 bfd_set_error (bfd_error_bad_value);
3564 goto err_exit;
3565 }
3566 }
3567 free (bfd_arr);
3568
3569 if (htab->reserved == 0)
3570 {
3571 struct _sum_stack_param sum_stack_param;
3572
3573 sum_stack_param.emit_stack_syms = 0;
3574 sum_stack_param.overall_stack = 0;
3575 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3576 goto err_exit;
3577 htab->reserved = sum_stack_param.overall_stack;
3578 }
3579 fixed_size += htab->reserved;
3580 fixed_size += htab->non_ovly_stub * OVL_STUB_SIZE;
3581 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3582 {
3583 /* Guess number of overlays. Assuming overlay buffer is on
3584 average only half full should be conservative. */
3585 ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3586 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3587 fixed_size += ovlynum * 16 + 16 + 4 + 16;
3588 }
3589
3590 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
3591 info->callbacks->einfo (_("non-overlay plus maximum overlay size "
3592 "of 0x%x exceeds local store\n"),
3593 fixed_size + mos_param.max_overlay_size);
3594
3595 /* Now see if we should put some functions in the non-overlay area. */
3596 if (fixed_size < htab->overlay_fixed
3597 && htab->overlay_fixed + mos_param.max_overlay_size < htab->local_store)
3598 {
3599 unsigned int lib_size = htab->overlay_fixed - fixed_size;
3600 lib_size = auto_ovl_lib_functions (info, lib_size);
3601 if (lib_size == (unsigned int) -1)
3602 goto err_exit;
3603 fixed_size = htab->overlay_fixed - lib_size;
3604 }
3605
3606 /* Build an array of sections, suitably sorted to place into
3607 overlays. */
3608 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3609 if (ovly_sections == NULL)
3610 goto err_exit;
3611 ovly_p = ovly_sections;
3612 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3613 goto err_exit;
3614 count = (size_t) (ovly_p - ovly_sections) / 2;
3615
3616 script = htab->spu_elf_open_overlay_script ();
3617
3618 if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3619 goto file_err;
3620
3621 memset (&dummy_caller, 0, sizeof (dummy_caller));
3622 overlay_size = htab->local_store - fixed_size;
3623 base = 0;
3624 ovlynum = 0;
3625 while (base < count)
3626 {
3627 unsigned int size = 0;
3628 unsigned int j;
3629
3630 for (i = base; i < count; i++)
3631 {
3632 asection *sec;
3633 unsigned int tmp;
3634 unsigned int stub_size;
3635 struct call_info *call, *pasty;
3636 struct _spu_elf_section_data *sec_data;
3637 struct spu_elf_stack_info *sinfo;
3638 int k;
3639
3640 /* See whether we can add this section to the current
3641 overlay without overflowing our overlay buffer. */
3642 sec = ovly_sections[2 * i];
3643 tmp = size + sec->size;
3644 if (ovly_sections[2 * i + 1])
3645 tmp += ovly_sections[2 * i + 1]->size;
3646 if (tmp > overlay_size)
3647 break;
3648 if (sec->segment_mark)
3649 {
3650 /* Pasted sections must stay together, so add their
3651 sizes too. */
3652 struct call_info *pasty = find_pasted_call (sec);
3653 while (pasty != NULL)
3654 {
3655 struct function_info *call_fun = pasty->fun;
3656 tmp += call_fun->sec->size;
3657 if (call_fun->rodata)
3658 tmp += call_fun->rodata->size;
3659 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3660 if (pasty->is_pasted)
3661 break;
3662 }
3663 }
3664 if (tmp > overlay_size)
3665 break;
3666
3667 /* If we add this section, we might need new overlay call
3668 stubs. Add any overlay section calls to dummy_call. */
3669 pasty = NULL;
3670 sec_data = spu_elf_section_data (sec);
3671 sinfo = sec_data->u.i.stack_info;
3672 for (k = 0; k < sinfo->num_fun; ++k)
3673 for (call = sinfo->fun[k].call_list; call; call = call->next)
3674 if (call->is_pasted)
3675 {
3676 BFD_ASSERT (pasty == NULL);
3677 pasty = call;
3678 }
3679 else if (call->fun->sec->linker_mark)
3680 {
3681 if (!copy_callee (&dummy_caller, call))
3682 goto err_exit;
3683 }
3684 while (pasty != NULL)
3685 {
3686 struct function_info *call_fun = pasty->fun;
3687 pasty = NULL;
3688 for (call = call_fun->call_list; call; call = call->next)
3689 if (call->is_pasted)
3690 {
3691 BFD_ASSERT (pasty == NULL);
3692 pasty = call;
3693 }
3694 else if (!copy_callee (&dummy_caller, call))
3695 goto err_exit;
3696 }
3697
3698 /* Calculate call stub size. */
3699 stub_size = 0;
3700 for (call = dummy_caller.call_list; call; call = call->next)
3701 {
3702 unsigned int k;
3703
3704 stub_size += OVL_STUB_SIZE;
3705 /* If the call is within this overlay, we won't need a
3706 stub. */
3707 for (k = base; k < i + 1; k++)
3708 if (call->fun->sec == ovly_sections[2 * k])
3709 {
3710 stub_size -= OVL_STUB_SIZE;
3711 break;
3712 }
3713 }
3714 if (tmp + stub_size > overlay_size)
3715 break;
3716
3717 size = tmp;
3718 }
3719
3720 if (i == base)
3721 {
3722 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3723 ovly_sections[2 * i]->owner,
3724 ovly_sections[2 * i],
3725 ovly_sections[2 * i + 1] ? " + rodata" : "");
3726 bfd_set_error (bfd_error_bad_value);
3727 goto err_exit;
3728 }
3729
3730 if (fprintf (script, " .ovly%d {\n", ++ovlynum) <= 0)
3731 goto file_err;
3732 for (j = base; j < i; j++)
3733 {
3734 asection *sec = ovly_sections[2 * j];
3735
3736 if (fprintf (script, " %s%c%s (%s)\n",
3737 (sec->owner->my_archive != NULL
3738 ? sec->owner->my_archive->filename : ""),
3739 info->path_separator,
3740 sec->owner->filename,
3741 sec->name) <= 0)
3742 goto file_err;
3743 if (sec->segment_mark)
3744 {
3745 struct call_info *call = find_pasted_call (sec);
3746 while (call != NULL)
3747 {
3748 struct function_info *call_fun = call->fun;
3749 sec = call_fun->sec;
3750 if (fprintf (script, " %s%c%s (%s)\n",
3751 (sec->owner->my_archive != NULL
3752 ? sec->owner->my_archive->filename : ""),
3753 info->path_separator,
3754 sec->owner->filename,
3755 sec->name) <= 0)
3756 goto file_err;
3757 for (call = call_fun->call_list; call; call = call->next)
3758 if (call->is_pasted)
3759 break;
3760 }
3761 }
3762 }
3763
3764 for (j = base; j < i; j++)
3765 {
3766 asection *sec = ovly_sections[2 * j + 1];
3767 if (sec != NULL
3768 && fprintf (script, " %s%c%s (%s)\n",
3769 (sec->owner->my_archive != NULL
3770 ? sec->owner->my_archive->filename : ""),
3771 info->path_separator,
3772 sec->owner->filename,
3773 sec->name) <= 0)
3774 goto file_err;
3775
3776 sec = ovly_sections[2 * j];
3777 if (sec->segment_mark)
3778 {
3779 struct call_info *call = find_pasted_call (sec);
3780 while (call != NULL)
3781 {
3782 struct function_info *call_fun = call->fun;
3783 sec = call_fun->rodata;
3784 if (sec != NULL
3785 && fprintf (script, " %s%c%s (%s)\n",
3786 (sec->owner->my_archive != NULL
3787 ? sec->owner->my_archive->filename : ""),
3788 info->path_separator,
3789 sec->owner->filename,
3790 sec->name) <= 0)
3791 goto file_err;
3792 for (call = call_fun->call_list; call; call = call->next)
3793 if (call->is_pasted)
3794 break;
3795 }
3796 }
3797 }
3798
3799 if (fprintf (script, " }\n") <= 0)
3800 goto file_err;
3801
3802 while (dummy_caller.call_list != NULL)
3803 {
3804 struct call_info *call = dummy_caller.call_list;
3805 dummy_caller.call_list = call->next;
3806 free (call);
3807 }
3808
3809 base = i;
3810 }
3811 free (ovly_sections);
3812
3813 if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3814 goto file_err;
3815 if (fclose (script) != 0)
3816 goto file_err;
3817
3818 if (htab->auto_overlay & AUTO_RELINK)
3819 htab->spu_elf_relink ();
3820
3821 xexit (0);
3822
3823 file_err:
3824 bfd_set_error (bfd_error_system_call);
3825 err_exit:
3826 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3827 xexit (1);
3828 }
3829
3830 /* Provide an estimate of total stack required. */
3831
3832 static bfd_boolean
3833 spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
3834 {
3835 struct _sum_stack_param sum_stack_param;
3836
3837 if (!discover_functions (info))
3838 return FALSE;
3839
3840 if (!build_call_tree (info))
3841 return FALSE;
3842
3843 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3844 info->callbacks->minfo (_("\nStack size for functions. "
3845 "Annotations: '*' max stack, 't' tail call\n"));
3846
3847 sum_stack_param.emit_stack_syms = emit_stack_syms;
3848 sum_stack_param.overall_stack = 0;
3849 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3850 return FALSE;
3851
3852 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3853 (bfd_vma) sum_stack_param.overall_stack);
3854 return TRUE;
3855 }
3856
3857 /* Perform a final link. */
3858
3859 static bfd_boolean
3860 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3861 {
3862 struct spu_link_hash_table *htab = spu_hash_table (info);
3863
3864 if (htab->auto_overlay)
3865 spu_elf_auto_overlay (info, htab->spu_elf_load_ovl_mgr);
3866
3867 if (htab->stack_analysis
3868 && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
3869 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3870
3871 return bfd_elf_final_link (output_bfd, info);
3872 }
3873
3874 /* Called when not normally emitting relocs, ie. !info->relocatable
3875 and !info->emitrelocations. Returns a count of special relocs
3876 that need to be emitted. */
3877
3878 static unsigned int
3879 spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
3880 {
3881 unsigned int count = 0;
3882 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3883
3884 for (; relocs < relend; relocs++)
3885 {
3886 int r_type = ELF32_R_TYPE (relocs->r_info);
3887 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3888 ++count;
3889 }
3890
3891 return count;
3892 }
3893
3894 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3895
3896 static int
3897 spu_elf_relocate_section (bfd *output_bfd,
3898 struct bfd_link_info *info,
3899 bfd *input_bfd,
3900 asection *input_section,
3901 bfd_byte *contents,
3902 Elf_Internal_Rela *relocs,
3903 Elf_Internal_Sym *local_syms,
3904 asection **local_sections)
3905 {
3906 Elf_Internal_Shdr *symtab_hdr;
3907 struct elf_link_hash_entry **sym_hashes;
3908 Elf_Internal_Rela *rel, *relend;
3909 struct spu_link_hash_table *htab;
3910 asection *ea = bfd_get_section_by_name (output_bfd, "._ea");
3911 int ret = TRUE;
3912 bfd_boolean emit_these_relocs = FALSE;
3913 bfd_boolean is_ea_sym;
3914 bfd_boolean stubs;
3915
3916 htab = spu_hash_table (info);
3917 stubs = (htab->stub_sec != NULL
3918 && maybe_needs_stubs (input_section, output_bfd));
3919 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3920 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3921
3922 rel = relocs;
3923 relend = relocs + input_section->reloc_count;
3924 for (; rel < relend; rel++)
3925 {
3926 int r_type;
3927 reloc_howto_type *howto;
3928 unsigned int r_symndx;
3929 Elf_Internal_Sym *sym;
3930 asection *sec;
3931 struct elf_link_hash_entry *h;
3932 const char *sym_name;
3933 bfd_vma relocation;
3934 bfd_vma addend;
3935 bfd_reloc_status_type r;
3936 bfd_boolean unresolved_reloc;
3937 bfd_boolean warned;
3938 enum _stub_type stub_type;
3939
3940 r_symndx = ELF32_R_SYM (rel->r_info);
3941 r_type = ELF32_R_TYPE (rel->r_info);
3942 howto = elf_howto_table + r_type;
3943 unresolved_reloc = FALSE;
3944 warned = FALSE;
3945 h = NULL;
3946 sym = NULL;
3947 sec = NULL;
3948 if (r_symndx < symtab_hdr->sh_info)
3949 {
3950 sym = local_syms + r_symndx;
3951 sec = local_sections[r_symndx];
3952 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
3953 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
3954 }
3955 else
3956 {
3957 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3958 r_symndx, symtab_hdr, sym_hashes,
3959 h, sec, relocation,
3960 unresolved_reloc, warned);
3961 sym_name = h->root.root.string;
3962 }
3963
3964 if (sec != NULL && elf_discarded_section (sec))
3965 {
3966 /* For relocs against symbols from removed linkonce sections,
3967 or sections discarded by a linker script, we just want the
3968 section contents zeroed. Avoid any special processing. */
3969 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
3970 rel->r_info = 0;
3971 rel->r_addend = 0;
3972 continue;
3973 }
3974
3975 if (info->relocatable)
3976 continue;
3977
3978 is_ea_sym = (ea != NULL
3979 && sec != NULL
3980 && sec->output_section == ea);
3981
3982 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3983 {
3984 if (is_ea_sym)
3985 {
3986 /* ._ea is a special section that isn't allocated in SPU
3987 memory, but rather occupies space in PPU memory as
3988 part of an embedded ELF image. If this reloc is
3989 against a symbol defined in ._ea, then transform the
3990 reloc into an equivalent one without a symbol
3991 relative to the start of the ELF image. */
3992 rel->r_addend += (relocation
3993 - ea->vma
3994 + elf_section_data (ea)->this_hdr.sh_offset);
3995 rel->r_info = ELF32_R_INFO (0, r_type);
3996 }
3997 emit_these_relocs = TRUE;
3998 continue;
3999 }
4000
4001 if (is_ea_sym)
4002 unresolved_reloc = TRUE;
4003
4004 if (unresolved_reloc)
4005 {
4006 (*_bfd_error_handler)
4007 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4008 input_bfd,
4009 bfd_get_section_name (input_bfd, input_section),
4010 (long) rel->r_offset,
4011 howto->name,
4012 sym_name);
4013 ret = FALSE;
4014 }
4015
4016 /* If this symbol is in an overlay area, we may need to relocate
4017 to the overlay stub. */
4018 addend = rel->r_addend;
4019 if (stubs
4020 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4021 contents, info)) != no_stub)
4022 {
4023 unsigned int ovl = 0;
4024 struct got_entry *g, **head;
4025
4026 if (stub_type != nonovl_stub)
4027 ovl = (spu_elf_section_data (input_section->output_section)
4028 ->u.o.ovl_index);
4029
4030 if (h != NULL)
4031 head = &h->got.glist;
4032 else
4033 head = elf_local_got_ents (input_bfd) + r_symndx;
4034
4035 for (g = *head; g != NULL; g = g->next)
4036 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4037 break;
4038 if (g == NULL)
4039 abort ();
4040
4041 relocation = g->stub_addr;
4042 addend = 0;
4043 }
4044
4045 r = _bfd_final_link_relocate (howto,
4046 input_bfd,
4047 input_section,
4048 contents,
4049 rel->r_offset, relocation, addend);
4050
4051 if (r != bfd_reloc_ok)
4052 {
4053 const char *msg = (const char *) 0;
4054
4055 switch (r)
4056 {
4057 case bfd_reloc_overflow:
4058 if (!((*info->callbacks->reloc_overflow)
4059 (info, (h ? &h->root : NULL), sym_name, howto->name,
4060 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4061 return FALSE;
4062 break;
4063
4064 case bfd_reloc_undefined:
4065 if (!((*info->callbacks->undefined_symbol)
4066 (info, sym_name, input_bfd, input_section,
4067 rel->r_offset, TRUE)))
4068 return FALSE;
4069 break;
4070
4071 case bfd_reloc_outofrange:
4072 msg = _("internal error: out of range error");
4073 goto common_error;
4074
4075 case bfd_reloc_notsupported:
4076 msg = _("internal error: unsupported relocation error");
4077 goto common_error;
4078
4079 case bfd_reloc_dangerous:
4080 msg = _("internal error: dangerous error");
4081 goto common_error;
4082
4083 default:
4084 msg = _("internal error: unknown error");
4085 /* fall through */
4086
4087 common_error:
4088 ret = FALSE;
4089 if (!((*info->callbacks->warning)
4090 (info, msg, sym_name, input_bfd, input_section,
4091 rel->r_offset)))
4092 return FALSE;
4093 break;
4094 }
4095 }
4096 }
4097
4098 if (ret
4099 && emit_these_relocs
4100 && !info->emitrelocations)
4101 {
4102 Elf_Internal_Rela *wrel;
4103 Elf_Internal_Shdr *rel_hdr;
4104
4105 wrel = rel = relocs;
4106 relend = relocs + input_section->reloc_count;
4107 for (; rel < relend; rel++)
4108 {
4109 int r_type;
4110
4111 r_type = ELF32_R_TYPE (rel->r_info);
4112 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4113 *wrel++ = *rel;
4114 }
4115 input_section->reloc_count = wrel - relocs;
4116 /* Backflips for _bfd_elf_link_output_relocs. */
4117 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4118 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4119 ret = 2;
4120 }
4121
4122 return ret;
4123 }
4124
4125 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4126
4127 static bfd_boolean
4128 spu_elf_output_symbol_hook (struct bfd_link_info *info,
4129 const char *sym_name ATTRIBUTE_UNUSED,
4130 Elf_Internal_Sym *sym,
4131 asection *sym_sec ATTRIBUTE_UNUSED,
4132 struct elf_link_hash_entry *h)
4133 {
4134 struct spu_link_hash_table *htab = spu_hash_table (info);
4135
4136 if (!info->relocatable
4137 && htab->stub_sec != NULL
4138 && h != NULL
4139 && (h->root.type == bfd_link_hash_defined
4140 || h->root.type == bfd_link_hash_defweak)
4141 && h->def_regular
4142 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4143 {
4144 struct got_entry *g;
4145
4146 for (g = h->got.glist; g != NULL; g = g->next)
4147 if (g->addend == 0 && g->ovl == 0)
4148 {
4149 sym->st_shndx = (_bfd_elf_section_from_bfd_section
4150 (htab->stub_sec[0]->output_section->owner,
4151 htab->stub_sec[0]->output_section));
4152 sym->st_value = g->stub_addr;
4153 break;
4154 }
4155 }
4156
4157 return TRUE;
4158 }
4159
4160 static int spu_plugin = 0;
4161
4162 void
4163 spu_elf_plugin (int val)
4164 {
4165 spu_plugin = val;
4166 }
4167
4168 /* Set ELF header e_type for plugins. */
4169
4170 static void
4171 spu_elf_post_process_headers (bfd *abfd,
4172 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4173 {
4174 if (spu_plugin)
4175 {
4176 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4177
4178 i_ehdrp->e_type = ET_DYN;
4179 }
4180 }
4181
4182 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4183 segments for overlays. */
4184
4185 static int
4186 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4187 {
4188 struct spu_link_hash_table *htab = spu_hash_table (info);
4189 int extra = htab->num_overlays;
4190 asection *sec;
4191
4192 if (extra)
4193 ++extra;
4194
4195 sec = bfd_get_section_by_name (abfd, ".toe");
4196 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4197 ++extra;
4198
4199 return extra;
4200 }
4201
4202 /* Remove .toe section from other PT_LOAD segments and put it in
4203 a segment of its own. Put overlays in separate segments too. */
4204
4205 static bfd_boolean
4206 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4207 {
4208 asection *toe, *s;
4209 struct elf_segment_map *m;
4210 unsigned int i;
4211
4212 if (info == NULL)
4213 return TRUE;
4214
4215 toe = bfd_get_section_by_name (abfd, ".toe");
4216 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4217 if (m->p_type == PT_LOAD && m->count > 1)
4218 for (i = 0; i < m->count; i++)
4219 if ((s = m->sections[i]) == toe
4220 || spu_elf_section_data (s)->u.o.ovl_index != 0)
4221 {
4222 struct elf_segment_map *m2;
4223 bfd_vma amt;
4224
4225 if (i + 1 < m->count)
4226 {
4227 amt = sizeof (struct elf_segment_map);
4228 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4229 m2 = bfd_zalloc (abfd, amt);
4230 if (m2 == NULL)
4231 return FALSE;
4232 m2->count = m->count - (i + 1);
4233 memcpy (m2->sections, m->sections + i + 1,
4234 m2->count * sizeof (m->sections[0]));
4235 m2->p_type = PT_LOAD;
4236 m2->next = m->next;
4237 m->next = m2;
4238 }
4239 m->count = 1;
4240 if (i != 0)
4241 {
4242 m->count = i;
4243 amt = sizeof (struct elf_segment_map);
4244 m2 = bfd_zalloc (abfd, amt);
4245 if (m2 == NULL)
4246 return FALSE;
4247 m2->p_type = PT_LOAD;
4248 m2->count = 1;
4249 m2->sections[0] = s;
4250 m2->next = m->next;
4251 m->next = m2;
4252 }
4253 break;
4254 }
4255
4256 return TRUE;
4257 }
4258
4259 /* Tweak the section type of .note.spu_name. */
4260
4261 static bfd_boolean
4262 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4263 Elf_Internal_Shdr *hdr,
4264 asection *sec)
4265 {
4266 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4267 hdr->sh_type = SHT_NOTE;
4268 return TRUE;
4269 }
4270
4271 /* Tweak phdrs before writing them out. */
4272
4273 static int
4274 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4275 {
4276 const struct elf_backend_data *bed;
4277 struct elf_obj_tdata *tdata;
4278 Elf_Internal_Phdr *phdr, *last;
4279 struct spu_link_hash_table *htab;
4280 unsigned int count;
4281 unsigned int i;
4282
4283 if (info == NULL)
4284 return TRUE;
4285
4286 bed = get_elf_backend_data (abfd);
4287 tdata = elf_tdata (abfd);
4288 phdr = tdata->phdr;
4289 count = tdata->program_header_size / bed->s->sizeof_phdr;
4290 htab = spu_hash_table (info);
4291 if (htab->num_overlays != 0)
4292 {
4293 struct elf_segment_map *m;
4294 unsigned int o;
4295
4296 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4297 if (m->count != 0
4298 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
4299 {
4300 /* Mark this as an overlay header. */
4301 phdr[i].p_flags |= PF_OVERLAY;
4302
4303 if (htab->ovtab != NULL && htab->ovtab->size != 0)
4304 {
4305 bfd_byte *p = htab->ovtab->contents;
4306 unsigned int off = o * 16 + 8;
4307
4308 /* Write file_off into _ovly_table. */
4309 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4310 }
4311 }
4312 }
4313
4314 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4315 of 16. This should always be possible when using the standard
4316 linker scripts, but don't create overlapping segments if
4317 someone is playing games with linker scripts. */
4318 last = NULL;
4319 for (i = count; i-- != 0; )
4320 if (phdr[i].p_type == PT_LOAD)
4321 {
4322 unsigned adjust;
4323
4324 adjust = -phdr[i].p_filesz & 15;
4325 if (adjust != 0
4326 && last != NULL
4327 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4328 break;
4329
4330 adjust = -phdr[i].p_memsz & 15;
4331 if (adjust != 0
4332 && last != NULL
4333 && phdr[i].p_filesz != 0
4334 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4335 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4336 break;
4337
4338 if (phdr[i].p_filesz != 0)
4339 last = &phdr[i];
4340 }
4341
4342 if (i == (unsigned int) -1)
4343 for (i = count; i-- != 0; )
4344 if (phdr[i].p_type == PT_LOAD)
4345 {
4346 unsigned adjust;
4347
4348 adjust = -phdr[i].p_filesz & 15;
4349 phdr[i].p_filesz += adjust;
4350
4351 adjust = -phdr[i].p_memsz & 15;
4352 phdr[i].p_memsz += adjust;
4353 }
4354
4355 return TRUE;
4356 }
4357
4358 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4359 #define TARGET_BIG_NAME "elf32-spu"
4360 #define ELF_ARCH bfd_arch_spu
4361 #define ELF_MACHINE_CODE EM_SPU
4362 /* This matches the alignment need for DMA. */
4363 #define ELF_MAXPAGESIZE 0x80
4364 #define elf_backend_rela_normal 1
4365 #define elf_backend_can_gc_sections 1
4366
4367 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4368 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4369 #define elf_info_to_howto spu_elf_info_to_howto
4370 #define elf_backend_count_relocs spu_elf_count_relocs
4371 #define elf_backend_relocate_section spu_elf_relocate_section
4372 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4373 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4374 #define elf_backend_object_p spu_elf_object_p
4375 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4376 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4377
4378 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4379 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4380 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4381 #define elf_backend_post_process_headers spu_elf_post_process_headers
4382 #define elf_backend_fake_sections spu_elf_fake_sections
4383 #define elf_backend_special_sections spu_elf_special_sections
4384 #define bfd_elf32_bfd_final_link spu_elf_final_link
4385
4386 #include "elf32-target.h"
This page took 0.121585 seconds and 3 git commands to generate.