bfd/
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* We use RELA style relocs. Don't define USE_REL. */
31
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 };
92
93 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96 { NULL, 0, 0, 0, 0 }
97 };
98
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101 {
102 switch (code)
103 {
104 default:
105 return R_SPU_NONE;
106 case BFD_RELOC_SPU_IMM10W:
107 return R_SPU_ADDR10;
108 case BFD_RELOC_SPU_IMM16W:
109 return R_SPU_ADDR16;
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
115 return R_SPU_ADDR18;
116 case BFD_RELOC_SPU_PCREL16:
117 return R_SPU_REL16;
118 case BFD_RELOC_SPU_IMM7:
119 return R_SPU_ADDR7;
120 case BFD_RELOC_SPU_IMM8:
121 return R_SPU_NONE;
122 case BFD_RELOC_SPU_PCREL9a:
123 return R_SPU_REL9;
124 case BFD_RELOC_SPU_PCREL9b:
125 return R_SPU_REL9I;
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
130 case BFD_RELOC_32:
131 return R_SPU_ADDR32;
132 case BFD_RELOC_32_PCREL:
133 return R_SPU_REL32;
134 case BFD_RELOC_SPU_PPU32:
135 return R_SPU_PPU32;
136 case BFD_RELOC_SPU_PPU64:
137 return R_SPU_PPU64;
138 }
139 }
140
141 static void
142 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143 arelent *cache_ptr,
144 Elf_Internal_Rela *dst)
145 {
146 enum elf_spu_reloc_type r_type;
147
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
151 }
152
153 static reloc_howto_type *
154 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
156 {
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
158
159 if (r_type == R_SPU_NONE)
160 return NULL;
161
162 return elf_howto_table + r_type;
163 }
164
165 static reloc_howto_type *
166 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 const char *r_name)
168 {
169 unsigned int i;
170
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
175
176 return NULL;
177 }
178
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
180
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
185 {
186 bfd_size_type octets;
187 bfd_vma val;
188 long insn;
189
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
192 link time. */
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
196
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
200
201 /* Get symbol value. */
202 val = 0;
203 if (!bfd_is_com_section (symbol->section))
204 val = symbol->value;
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
207
208 val += reloc_entry->addend;
209
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
212
213 val >>= 2;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
216
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
218
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225 return bfd_reloc_ok;
226 }
227
228 static bfd_boolean
229 spu_elf_new_section_hook (bfd *abfd, asection *sec)
230 {
231 if (!sec->used_by_bfd)
232 {
233 struct _spu_elf_section_data *sdata;
234
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 if (sdata == NULL)
237 return FALSE;
238 sec->used_by_bfd = sdata;
239 }
240
241 return _bfd_elf_new_section_hook (abfd, sec);
242 }
243
244 /* Set up overlay info for executables. */
245
246 static bfd_boolean
247 spu_elf_object_p (bfd *abfd)
248 {
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
250 {
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
255
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
258 {
259 unsigned int j;
260
261 ++num_ovl;
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264 ++num_buf;
265 last_phdr = phdr;
266 for (j = 1; j < elf_numsections (abfd); j++)
267 {
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
269
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
271 {
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
275 }
276 }
277 }
278 }
279 return TRUE;
280 }
281
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
284
285 static void
286 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
287 {
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
292 }
293
294 /* SPU ELF linker hash table. */
295
296 struct spu_link_hash_table
297 {
298 struct elf_link_hash_table elf;
299
300 struct spu_elf_params *params;
301
302 /* Shortcuts to overlay sections. */
303 asection *ovtab;
304 asection *toe;
305 asection **ovl_sec;
306
307 /* Count of stubs in each overlay section. */
308 unsigned int *stub_count;
309
310 /* The stub section for each overlay section. */
311 asection **stub_sec;
312
313 struct elf_link_hash_entry *ovly_load;
314 struct elf_link_hash_entry *ovly_return;
315 unsigned long ovly_load_r_symndx;
316
317 /* Number of overlay buffers. */
318 unsigned int num_buf;
319
320 /* Total number of overlays. */
321 unsigned int num_overlays;
322
323 /* How much memory we have. */
324 unsigned int local_store;
325 /* Local store --auto-overlay should reserve for non-overlay
326 functions and data. */
327 unsigned int overlay_fixed;
328 /* Local store --auto-overlay should reserve for stack and heap. */
329 unsigned int reserved;
330 /* If reserved is not specified, stack analysis will calculate a value
331 for the stack. This parameter adjusts that value to allow for
332 negative sp access (the ABI says 2000 bytes below sp are valid,
333 and the overlay manager uses some of this area). */
334 int extra_stack_space;
335 /* Count of overlay stubs needed in non-overlay area. */
336 unsigned int non_ovly_stub;
337
338 /* Set on error. */
339 unsigned int stub_err : 1;
340 };
341
342 /* Hijack the generic got fields for overlay stub accounting. */
343
344 struct got_entry
345 {
346 struct got_entry *next;
347 unsigned int ovl;
348 bfd_vma addend;
349 bfd_vma stub_addr;
350 };
351
352 #define spu_hash_table(p) \
353 ((struct spu_link_hash_table *) ((p)->hash))
354
355 struct call_info
356 {
357 struct function_info *fun;
358 struct call_info *next;
359 unsigned int count;
360 unsigned int max_depth;
361 unsigned int is_tail : 1;
362 unsigned int is_pasted : 1;
363 };
364
365 struct function_info
366 {
367 /* List of functions called. Also branches to hot/cold part of
368 function. */
369 struct call_info *call_list;
370 /* For hot/cold part of function, point to owner. */
371 struct function_info *start;
372 /* Symbol at start of function. */
373 union {
374 Elf_Internal_Sym *sym;
375 struct elf_link_hash_entry *h;
376 } u;
377 /* Function section. */
378 asection *sec;
379 asection *rodata;
380 /* Where last called from, and number of sections called from. */
381 asection *last_caller;
382 unsigned int call_count;
383 /* Address range of (this part of) function. */
384 bfd_vma lo, hi;
385 /* Stack usage. */
386 int stack;
387 /* Distance from root of call tree. Tail and hot/cold branches
388 count as one deeper. We aren't counting stack frames here. */
389 unsigned int depth;
390 /* Set if global symbol. */
391 unsigned int global : 1;
392 /* Set if known to be start of function (as distinct from a hunk
393 in hot/cold section. */
394 unsigned int is_func : 1;
395 /* Set if not a root node. */
396 unsigned int non_root : 1;
397 /* Flags used during call tree traversal. It's cheaper to replicate
398 the visit flags than have one which needs clearing after a traversal. */
399 unsigned int visit1 : 1;
400 unsigned int visit2 : 1;
401 unsigned int marking : 1;
402 unsigned int visit3 : 1;
403 unsigned int visit4 : 1;
404 unsigned int visit5 : 1;
405 unsigned int visit6 : 1;
406 unsigned int visit7 : 1;
407 };
408
409 struct spu_elf_stack_info
410 {
411 int num_fun;
412 int max_fun;
413 /* Variable size array describing functions, one per contiguous
414 address range belonging to a function. */
415 struct function_info fun[1];
416 };
417
418 /* Create a spu ELF linker hash table. */
419
420 static struct bfd_link_hash_table *
421 spu_elf_link_hash_table_create (bfd *abfd)
422 {
423 struct spu_link_hash_table *htab;
424
425 htab = bfd_malloc (sizeof (*htab));
426 if (htab == NULL)
427 return NULL;
428
429 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
430 _bfd_elf_link_hash_newfunc,
431 sizeof (struct elf_link_hash_entry)))
432 {
433 free (htab);
434 return NULL;
435 }
436
437 memset (&htab->ovtab, 0,
438 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
439
440 htab->elf.init_got_refcount.refcount = 0;
441 htab->elf.init_got_refcount.glist = NULL;
442 htab->elf.init_got_offset.offset = 0;
443 htab->elf.init_got_offset.glist = NULL;
444 return &htab->elf.root;
445 }
446
447 void
448 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
449 {
450 struct spu_link_hash_table *htab = spu_hash_table (info);
451 htab->params = params;
452 }
453
454 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
455 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
456 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
457
458 static bfd_boolean
459 get_sym_h (struct elf_link_hash_entry **hp,
460 Elf_Internal_Sym **symp,
461 asection **symsecp,
462 Elf_Internal_Sym **locsymsp,
463 unsigned long r_symndx,
464 bfd *ibfd)
465 {
466 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
467
468 if (r_symndx >= symtab_hdr->sh_info)
469 {
470 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
471 struct elf_link_hash_entry *h;
472
473 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
474 while (h->root.type == bfd_link_hash_indirect
475 || h->root.type == bfd_link_hash_warning)
476 h = (struct elf_link_hash_entry *) h->root.u.i.link;
477
478 if (hp != NULL)
479 *hp = h;
480
481 if (symp != NULL)
482 *symp = NULL;
483
484 if (symsecp != NULL)
485 {
486 asection *symsec = NULL;
487 if (h->root.type == bfd_link_hash_defined
488 || h->root.type == bfd_link_hash_defweak)
489 symsec = h->root.u.def.section;
490 *symsecp = symsec;
491 }
492 }
493 else
494 {
495 Elf_Internal_Sym *sym;
496 Elf_Internal_Sym *locsyms = *locsymsp;
497
498 if (locsyms == NULL)
499 {
500 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
501 if (locsyms == NULL)
502 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
503 symtab_hdr->sh_info,
504 0, NULL, NULL, NULL);
505 if (locsyms == NULL)
506 return FALSE;
507 *locsymsp = locsyms;
508 }
509 sym = locsyms + r_symndx;
510
511 if (hp != NULL)
512 *hp = NULL;
513
514 if (symp != NULL)
515 *symp = sym;
516
517 if (symsecp != NULL)
518 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
519 }
520
521 return TRUE;
522 }
523
524 /* Create the note section if not already present. This is done early so
525 that the linker maps the sections to the right place in the output. */
526
527 bfd_boolean
528 spu_elf_create_sections (struct bfd_link_info *info)
529 {
530 bfd *ibfd;
531
532 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
533 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
534 break;
535
536 if (ibfd == NULL)
537 {
538 /* Make SPU_PTNOTE_SPUNAME section. */
539 asection *s;
540 size_t name_len;
541 size_t size;
542 bfd_byte *data;
543 flagword flags;
544
545 ibfd = info->input_bfds;
546 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
547 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
548 if (s == NULL
549 || !bfd_set_section_alignment (ibfd, s, 4))
550 return FALSE;
551
552 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
553 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
554 size += (name_len + 3) & -4;
555
556 if (!bfd_set_section_size (ibfd, s, size))
557 return FALSE;
558
559 data = bfd_zalloc (ibfd, size);
560 if (data == NULL)
561 return FALSE;
562
563 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
564 bfd_put_32 (ibfd, name_len, data + 4);
565 bfd_put_32 (ibfd, 1, data + 8);
566 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
567 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
568 bfd_get_filename (info->output_bfd), name_len);
569 s->contents = data;
570 }
571
572 return TRUE;
573 }
574
575 /* qsort predicate to sort sections by vma. */
576
577 static int
578 sort_sections (const void *a, const void *b)
579 {
580 const asection *const *s1 = a;
581 const asection *const *s2 = b;
582 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
583
584 if (delta != 0)
585 return delta < 0 ? -1 : 1;
586
587 return (*s1)->index - (*s2)->index;
588 }
589
590 /* Identify overlays in the output bfd, and number them. */
591
592 bfd_boolean
593 spu_elf_find_overlays (struct bfd_link_info *info)
594 {
595 struct spu_link_hash_table *htab = spu_hash_table (info);
596 asection **alloc_sec;
597 unsigned int i, n, ovl_index, num_buf;
598 asection *s;
599 bfd_vma ovl_end;
600
601 if (info->output_bfd->section_count < 2)
602 return FALSE;
603
604 alloc_sec
605 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
606 if (alloc_sec == NULL)
607 return FALSE;
608
609 /* Pick out all the alloced sections. */
610 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
611 if ((s->flags & SEC_ALLOC) != 0
612 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
613 && s->size != 0)
614 alloc_sec[n++] = s;
615
616 if (n == 0)
617 {
618 free (alloc_sec);
619 return FALSE;
620 }
621
622 /* Sort them by vma. */
623 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
624
625 /* Look for overlapping vmas. Any with overlap must be overlays.
626 Count them. Also count the number of overlay regions. */
627 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
628 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
629 {
630 s = alloc_sec[i];
631 if (s->vma < ovl_end)
632 {
633 asection *s0 = alloc_sec[i - 1];
634
635 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
636 {
637 alloc_sec[ovl_index] = s0;
638 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
639 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
640 }
641 alloc_sec[ovl_index] = s;
642 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
643 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
644 if (s0->vma != s->vma)
645 {
646 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
647 "do not start at the same address.\n"),
648 s0, s);
649 return FALSE;
650 }
651 if (ovl_end < s->vma + s->size)
652 ovl_end = s->vma + s->size;
653 }
654 else
655 ovl_end = s->vma + s->size;
656 }
657
658 htab->num_overlays = ovl_index;
659 htab->num_buf = num_buf;
660 htab->ovl_sec = alloc_sec;
661 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
662 FALSE, FALSE, FALSE);
663 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
664 FALSE, FALSE, FALSE);
665 return ovl_index != 0;
666 }
667
668 #define BRSL 0x33000000
669 #define BR 0x32000000
670 #define NOP 0x40200000
671 #define LNOP 0x00200000
672 #define ILA 0x42000000
673
674 /* Return true for all relative and absolute branch instructions.
675 bra 00110000 0..
676 brasl 00110001 0..
677 br 00110010 0..
678 brsl 00110011 0..
679 brz 00100000 0..
680 brnz 00100001 0..
681 brhz 00100010 0..
682 brhnz 00100011 0.. */
683
684 static bfd_boolean
685 is_branch (const unsigned char *insn)
686 {
687 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
688 }
689
690 /* Return true for all indirect branch instructions.
691 bi 00110101 000
692 bisl 00110101 001
693 iret 00110101 010
694 bisled 00110101 011
695 biz 00100101 000
696 binz 00100101 001
697 bihz 00100101 010
698 bihnz 00100101 011 */
699
700 static bfd_boolean
701 is_indirect_branch (const unsigned char *insn)
702 {
703 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
704 }
705
706 /* Return true for branch hint instructions.
707 hbra 0001000..
708 hbrr 0001001.. */
709
710 static bfd_boolean
711 is_hint (const unsigned char *insn)
712 {
713 return (insn[0] & 0xfc) == 0x10;
714 }
715
716 /* True if INPUT_SECTION might need overlay stubs. */
717
718 static bfd_boolean
719 maybe_needs_stubs (asection *input_section)
720 {
721 /* No stubs for debug sections and suchlike. */
722 if ((input_section->flags & SEC_ALLOC) == 0)
723 return FALSE;
724
725 /* No stubs for link-once sections that will be discarded. */
726 if (input_section->output_section == bfd_abs_section_ptr)
727 return FALSE;
728
729 /* Don't create stubs for .eh_frame references. */
730 if (strcmp (input_section->name, ".eh_frame") == 0)
731 return FALSE;
732
733 return TRUE;
734 }
735
736 enum _stub_type
737 {
738 no_stub,
739 ovl_stub,
740 nonovl_stub,
741 stub_error
742 };
743
744 /* Return non-zero if this reloc symbol should go via an overlay stub.
745 Return 2 if the stub must be in non-overlay area. */
746
747 static enum _stub_type
748 needs_ovl_stub (struct elf_link_hash_entry *h,
749 Elf_Internal_Sym *sym,
750 asection *sym_sec,
751 asection *input_section,
752 Elf_Internal_Rela *irela,
753 bfd_byte *contents,
754 struct bfd_link_info *info)
755 {
756 struct spu_link_hash_table *htab = spu_hash_table (info);
757 enum elf_spu_reloc_type r_type;
758 unsigned int sym_type;
759 bfd_boolean branch;
760 enum _stub_type ret = no_stub;
761
762 if (sym_sec == NULL
763 || sym_sec->output_section == bfd_abs_section_ptr
764 || spu_elf_section_data (sym_sec->output_section) == NULL)
765 return ret;
766
767 if (h != NULL)
768 {
769 /* Ensure no stubs for user supplied overlay manager syms. */
770 if (h == htab->ovly_load || h == htab->ovly_return)
771 return ret;
772
773 /* setjmp always goes via an overlay stub, because then the return
774 and hence the longjmp goes via __ovly_return. That magically
775 makes setjmp/longjmp between overlays work. */
776 if (strncmp (h->root.root.string, "setjmp", 6) == 0
777 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
778 ret = ovl_stub;
779 }
780
781 /* Usually, symbols in non-overlay sections don't need stubs. */
782 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
783 && !htab->params->non_overlay_stubs)
784 return ret;
785
786 if (h != NULL)
787 sym_type = h->type;
788 else
789 sym_type = ELF_ST_TYPE (sym->st_info);
790
791 r_type = ELF32_R_TYPE (irela->r_info);
792 branch = FALSE;
793 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
794 {
795 bfd_byte insn[4];
796
797 if (contents == NULL)
798 {
799 contents = insn;
800 if (!bfd_get_section_contents (input_section->owner,
801 input_section,
802 contents,
803 irela->r_offset, 4))
804 return stub_error;
805 }
806 else
807 contents += irela->r_offset;
808
809 if (is_branch (contents) || is_hint (contents))
810 {
811 branch = TRUE;
812 if ((contents[0] & 0xfd) == 0x31
813 && sym_type != STT_FUNC
814 && contents != insn)
815 {
816 /* It's common for people to write assembly and forget
817 to give function symbols the right type. Handle
818 calls to such symbols, but warn so that (hopefully)
819 people will fix their code. We need the symbol
820 type to be correct to distinguish function pointer
821 initialisation from other pointer initialisations. */
822 const char *sym_name;
823
824 if (h != NULL)
825 sym_name = h->root.root.string;
826 else
827 {
828 Elf_Internal_Shdr *symtab_hdr;
829 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
830 sym_name = bfd_elf_sym_name (input_section->owner,
831 symtab_hdr,
832 sym,
833 sym_sec);
834 }
835 (*_bfd_error_handler) (_("warning: call to non-function"
836 " symbol %s defined in %B"),
837 sym_sec->owner, sym_name);
838
839 }
840 }
841 }
842
843 if (sym_type != STT_FUNC
844 && !branch
845 && (sym_sec->flags & SEC_CODE) == 0)
846 return ret;
847
848 /* A reference from some other section to a symbol in an overlay
849 section needs a stub. */
850 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
851 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
852 ret = ovl_stub;
853
854 /* If this insn isn't a branch then we are possibly taking the
855 address of a function and passing it out somehow. */
856 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
857 }
858
859 static bfd_boolean
860 count_stub (struct spu_link_hash_table *htab,
861 bfd *ibfd,
862 asection *isec,
863 enum _stub_type stub_type,
864 struct elf_link_hash_entry *h,
865 const Elf_Internal_Rela *irela)
866 {
867 unsigned int ovl = 0;
868 struct got_entry *g, **head;
869 bfd_vma addend;
870
871 /* If this instruction is a branch or call, we need a stub
872 for it. One stub per function per overlay.
873 If it isn't a branch, then we are taking the address of
874 this function so need a stub in the non-overlay area
875 for it. One stub per function. */
876 if (stub_type != nonovl_stub)
877 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
878
879 if (h != NULL)
880 head = &h->got.glist;
881 else
882 {
883 if (elf_local_got_ents (ibfd) == NULL)
884 {
885 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
886 * sizeof (*elf_local_got_ents (ibfd)));
887 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
888 if (elf_local_got_ents (ibfd) == NULL)
889 return FALSE;
890 }
891 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
892 }
893
894 addend = 0;
895 if (irela != NULL)
896 addend = irela->r_addend;
897
898 if (ovl == 0)
899 {
900 struct got_entry *gnext;
901
902 for (g = *head; g != NULL; g = g->next)
903 if (g->addend == addend && g->ovl == 0)
904 break;
905
906 if (g == NULL)
907 {
908 /* Need a new non-overlay area stub. Zap other stubs. */
909 for (g = *head; g != NULL; g = gnext)
910 {
911 gnext = g->next;
912 if (g->addend == addend)
913 {
914 htab->stub_count[g->ovl] -= 1;
915 free (g);
916 }
917 }
918 }
919 }
920 else
921 {
922 for (g = *head; g != NULL; g = g->next)
923 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
924 break;
925 }
926
927 if (g == NULL)
928 {
929 g = bfd_malloc (sizeof *g);
930 if (g == NULL)
931 return FALSE;
932 g->ovl = ovl;
933 g->addend = addend;
934 g->stub_addr = (bfd_vma) -1;
935 g->next = *head;
936 *head = g;
937
938 htab->stub_count[ovl] += 1;
939 }
940
941 return TRUE;
942 }
943
944 /* Support two sizes of overlay stubs, a slower more compact stub of two
945 intructions, and a faster stub of four instructions. */
946
947 static unsigned int
948 ovl_stub_size (enum _ovly_flavour ovly_flavour)
949 {
950 return 8 << ovly_flavour;
951 }
952
953 /* Two instruction overlay stubs look like:
954
955 brsl $75,__ovly_load
956 .word target_ovl_and_address
957
958 ovl_and_address is a word with the overlay number in the top 14 bits
959 and local store address in the bottom 18 bits.
960
961 Four instruction overlay stubs look like:
962
963 ila $78,ovl_number
964 lnop
965 ila $79,target_address
966 br __ovly_load */
967
968 static bfd_boolean
969 build_stub (struct spu_link_hash_table *htab,
970 bfd *ibfd,
971 asection *isec,
972 enum _stub_type stub_type,
973 struct elf_link_hash_entry *h,
974 const Elf_Internal_Rela *irela,
975 bfd_vma dest,
976 asection *dest_sec)
977 {
978 unsigned int ovl, dest_ovl;
979 struct got_entry *g, **head;
980 asection *sec;
981 bfd_vma addend, from, to;
982
983 ovl = 0;
984 if (stub_type != nonovl_stub)
985 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
986
987 if (h != NULL)
988 head = &h->got.glist;
989 else
990 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
991
992 addend = 0;
993 if (irela != NULL)
994 addend = irela->r_addend;
995
996 for (g = *head; g != NULL; g = g->next)
997 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
998 break;
999 if (g == NULL)
1000 abort ();
1001
1002 if (g->ovl == 0 && ovl != 0)
1003 return TRUE;
1004
1005 if (g->stub_addr != (bfd_vma) -1)
1006 return TRUE;
1007
1008 sec = htab->stub_sec[ovl];
1009 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1010 from = sec->size + sec->output_offset + sec->output_section->vma;
1011 g->stub_addr = from;
1012 to = (htab->ovly_load->root.u.def.value
1013 + htab->ovly_load->root.u.def.section->output_offset
1014 + htab->ovly_load->root.u.def.section->output_section->vma);
1015
1016 if (((dest | to | from) & 3) != 0)
1017 {
1018 htab->stub_err = 1;
1019 return FALSE;
1020 }
1021 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1022
1023 switch (htab->params->ovly_flavour)
1024 {
1025 case ovly_normal:
1026 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1027 sec->contents + sec->size);
1028 bfd_put_32 (sec->owner, LNOP,
1029 sec->contents + sec->size + 4);
1030 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1031 sec->contents + sec->size + 8);
1032 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1033 sec->contents + sec->size + 12);
1034 break;
1035
1036 case ovly_compact:
1037 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1038 sec->contents + sec->size);
1039 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1040 sec->contents + sec->size + 4);
1041 break;
1042
1043 default:
1044 abort ();
1045 }
1046 sec->size += ovl_stub_size (htab->params->ovly_flavour);
1047
1048 if (htab->params->emit_stub_syms)
1049 {
1050 size_t len;
1051 char *name;
1052 int add;
1053
1054 len = 8 + sizeof (".ovl_call.") - 1;
1055 if (h != NULL)
1056 len += strlen (h->root.root.string);
1057 else
1058 len += 8 + 1 + 8;
1059 add = 0;
1060 if (irela != NULL)
1061 add = (int) irela->r_addend & 0xffffffff;
1062 if (add != 0)
1063 len += 1 + 8;
1064 name = bfd_malloc (len);
1065 if (name == NULL)
1066 return FALSE;
1067
1068 sprintf (name, "%08x.ovl_call.", g->ovl);
1069 if (h != NULL)
1070 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1071 else
1072 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1073 dest_sec->id & 0xffffffff,
1074 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1075 if (add != 0)
1076 sprintf (name + len - 9, "+%x", add);
1077
1078 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1079 free (name);
1080 if (h == NULL)
1081 return FALSE;
1082 if (h->root.type == bfd_link_hash_new)
1083 {
1084 h->root.type = bfd_link_hash_defined;
1085 h->root.u.def.section = sec;
1086 h->size = ovl_stub_size (htab->params->ovly_flavour);
1087 h->root.u.def.value = sec->size - h->size;
1088 h->type = STT_FUNC;
1089 h->ref_regular = 1;
1090 h->def_regular = 1;
1091 h->ref_regular_nonweak = 1;
1092 h->forced_local = 1;
1093 h->non_elf = 0;
1094 }
1095 }
1096
1097 return TRUE;
1098 }
1099
1100 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1101 symbols. */
1102
1103 static bfd_boolean
1104 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1105 {
1106 /* Symbols starting with _SPUEAR_ need a stub because they may be
1107 invoked by the PPU. */
1108 struct bfd_link_info *info = inf;
1109 struct spu_link_hash_table *htab = spu_hash_table (info);
1110 asection *sym_sec;
1111
1112 if ((h->root.type == bfd_link_hash_defined
1113 || h->root.type == bfd_link_hash_defweak)
1114 && h->def_regular
1115 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1116 && (sym_sec = h->root.u.def.section) != NULL
1117 && sym_sec->output_section != bfd_abs_section_ptr
1118 && spu_elf_section_data (sym_sec->output_section) != NULL
1119 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1120 || htab->params->non_overlay_stubs))
1121 {
1122 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1123 }
1124
1125 return TRUE;
1126 }
1127
1128 static bfd_boolean
1129 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1130 {
1131 /* Symbols starting with _SPUEAR_ need a stub because they may be
1132 invoked by the PPU. */
1133 struct bfd_link_info *info = inf;
1134 struct spu_link_hash_table *htab = spu_hash_table (info);
1135 asection *sym_sec;
1136
1137 if ((h->root.type == bfd_link_hash_defined
1138 || h->root.type == bfd_link_hash_defweak)
1139 && h->def_regular
1140 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1141 && (sym_sec = h->root.u.def.section) != NULL
1142 && sym_sec->output_section != bfd_abs_section_ptr
1143 && spu_elf_section_data (sym_sec->output_section) != NULL
1144 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1145 || htab->params->non_overlay_stubs))
1146 {
1147 return build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1148 h->root.u.def.value, sym_sec);
1149 }
1150
1151 return TRUE;
1152 }
1153
1154 /* Size or build stubs. */
1155
1156 static bfd_boolean
1157 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1158 {
1159 struct spu_link_hash_table *htab = spu_hash_table (info);
1160 bfd *ibfd;
1161
1162 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1163 {
1164 extern const bfd_target bfd_elf32_spu_vec;
1165 Elf_Internal_Shdr *symtab_hdr;
1166 asection *isec;
1167 Elf_Internal_Sym *local_syms = NULL;
1168
1169 if (ibfd->xvec != &bfd_elf32_spu_vec)
1170 continue;
1171
1172 /* We'll need the symbol table in a second. */
1173 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1174 if (symtab_hdr->sh_info == 0)
1175 continue;
1176
1177 /* Walk over each section attached to the input bfd. */
1178 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1179 {
1180 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1181
1182 /* If there aren't any relocs, then there's nothing more to do. */
1183 if ((isec->flags & SEC_RELOC) == 0
1184 || isec->reloc_count == 0)
1185 continue;
1186
1187 if (!maybe_needs_stubs (isec))
1188 continue;
1189
1190 /* Get the relocs. */
1191 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1192 info->keep_memory);
1193 if (internal_relocs == NULL)
1194 goto error_ret_free_local;
1195
1196 /* Now examine each relocation. */
1197 irela = internal_relocs;
1198 irelaend = irela + isec->reloc_count;
1199 for (; irela < irelaend; irela++)
1200 {
1201 enum elf_spu_reloc_type r_type;
1202 unsigned int r_indx;
1203 asection *sym_sec;
1204 Elf_Internal_Sym *sym;
1205 struct elf_link_hash_entry *h;
1206 enum _stub_type stub_type;
1207
1208 r_type = ELF32_R_TYPE (irela->r_info);
1209 r_indx = ELF32_R_SYM (irela->r_info);
1210
1211 if (r_type >= R_SPU_max)
1212 {
1213 bfd_set_error (bfd_error_bad_value);
1214 error_ret_free_internal:
1215 if (elf_section_data (isec)->relocs != internal_relocs)
1216 free (internal_relocs);
1217 error_ret_free_local:
1218 if (local_syms != NULL
1219 && (symtab_hdr->contents
1220 != (unsigned char *) local_syms))
1221 free (local_syms);
1222 return FALSE;
1223 }
1224
1225 /* Determine the reloc target section. */
1226 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1227 goto error_ret_free_internal;
1228
1229 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1230 NULL, info);
1231 if (stub_type == no_stub)
1232 continue;
1233 else if (stub_type == stub_error)
1234 goto error_ret_free_internal;
1235
1236 if (htab->stub_count == NULL)
1237 {
1238 bfd_size_type amt;
1239 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1240 htab->stub_count = bfd_zmalloc (amt);
1241 if (htab->stub_count == NULL)
1242 goto error_ret_free_internal;
1243 }
1244
1245 if (!build)
1246 {
1247 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1248 goto error_ret_free_internal;
1249 }
1250 else
1251 {
1252 bfd_vma dest;
1253
1254 if (h != NULL)
1255 dest = h->root.u.def.value;
1256 else
1257 dest = sym->st_value;
1258 dest += irela->r_addend;
1259 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
1260 dest, sym_sec))
1261 goto error_ret_free_internal;
1262 }
1263 }
1264
1265 /* We're done with the internal relocs, free them. */
1266 if (elf_section_data (isec)->relocs != internal_relocs)
1267 free (internal_relocs);
1268 }
1269
1270 if (local_syms != NULL
1271 && symtab_hdr->contents != (unsigned char *) local_syms)
1272 {
1273 if (!info->keep_memory)
1274 free (local_syms);
1275 else
1276 symtab_hdr->contents = (unsigned char *) local_syms;
1277 }
1278 }
1279
1280 return TRUE;
1281 }
1282
1283 /* Allocate space for overlay call and return stubs. */
1284
1285 int
1286 spu_elf_size_stubs (struct bfd_link_info *info)
1287 {
1288 struct spu_link_hash_table *htab;
1289 bfd *ibfd;
1290 bfd_size_type amt;
1291 flagword flags;
1292 unsigned int i;
1293 asection *stub;
1294
1295 if (!process_stubs (info, FALSE))
1296 return 0;
1297
1298 htab = spu_hash_table (info);
1299 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1300 if (htab->stub_err)
1301 return 0;
1302
1303 if (htab->stub_count == NULL)
1304 return 1;
1305
1306 ibfd = info->input_bfds;
1307 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1308 htab->stub_sec = bfd_zmalloc (amt);
1309 if (htab->stub_sec == NULL)
1310 return 0;
1311
1312 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1313 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1314 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1315 htab->stub_sec[0] = stub;
1316 if (stub == NULL
1317 || !bfd_set_section_alignment (ibfd, stub,
1318 htab->params->ovly_flavour + 3))
1319 return 0;
1320 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params->ovly_flavour);
1321 (*htab->params->place_spu_section) (stub, NULL, ".text");
1322
1323 for (i = 0; i < htab->num_overlays; ++i)
1324 {
1325 asection *osec = htab->ovl_sec[i];
1326 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1327 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1328 htab->stub_sec[ovl] = stub;
1329 if (stub == NULL
1330 || !bfd_set_section_alignment (ibfd, stub,
1331 htab->params->ovly_flavour + 3))
1332 return 0;
1333 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params->ovly_flavour);
1334 (*htab->params->place_spu_section) (stub, osec, NULL);
1335 }
1336
1337 /* htab->ovtab consists of two arrays.
1338 . struct {
1339 . u32 vma;
1340 . u32 size;
1341 . u32 file_off;
1342 . u32 buf;
1343 . } _ovly_table[];
1344 .
1345 . struct {
1346 . u32 mapped;
1347 . } _ovly_buf_table[];
1348 . */
1349
1350 flags = (SEC_ALLOC | SEC_LOAD
1351 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1352 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1353 if (htab->ovtab == NULL
1354 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1355 return 0;
1356
1357 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1358 (*htab->params->place_spu_section) (htab->ovtab, NULL, ".data");
1359
1360 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1361 if (htab->toe == NULL
1362 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1363 return 0;
1364 htab->toe->size = 16;
1365 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1366
1367 return 2;
1368 }
1369
1370 /* Functions to handle embedded spu_ovl.o object. */
1371
1372 static void *
1373 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1374 {
1375 return stream;
1376 }
1377
1378 static file_ptr
1379 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1380 void *stream,
1381 void *buf,
1382 file_ptr nbytes,
1383 file_ptr offset)
1384 {
1385 struct _ovl_stream *os;
1386 size_t count;
1387 size_t max;
1388
1389 os = (struct _ovl_stream *) stream;
1390 max = (const char *) os->end - (const char *) os->start;
1391
1392 if ((ufile_ptr) offset >= max)
1393 return 0;
1394
1395 count = nbytes;
1396 if (count > max - offset)
1397 count = max - offset;
1398
1399 memcpy (buf, (const char *) os->start + offset, count);
1400 return count;
1401 }
1402
1403 bfd_boolean
1404 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1405 {
1406 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1407 "elf32-spu",
1408 ovl_mgr_open,
1409 (void *) stream,
1410 ovl_mgr_pread,
1411 NULL,
1412 NULL);
1413 return *ovl_bfd != NULL;
1414 }
1415
1416 /* Define an STT_OBJECT symbol. */
1417
1418 static struct elf_link_hash_entry *
1419 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1420 {
1421 struct elf_link_hash_entry *h;
1422
1423 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1424 if (h == NULL)
1425 return NULL;
1426
1427 if (h->root.type != bfd_link_hash_defined
1428 || !h->def_regular)
1429 {
1430 h->root.type = bfd_link_hash_defined;
1431 h->root.u.def.section = htab->ovtab;
1432 h->type = STT_OBJECT;
1433 h->ref_regular = 1;
1434 h->def_regular = 1;
1435 h->ref_regular_nonweak = 1;
1436 h->non_elf = 0;
1437 }
1438 else if (h->root.u.def.section->owner != NULL)
1439 {
1440 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1441 h->root.u.def.section->owner,
1442 h->root.root.string);
1443 bfd_set_error (bfd_error_bad_value);
1444 return NULL;
1445 }
1446 else
1447 {
1448 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1449 h->root.root.string);
1450 bfd_set_error (bfd_error_bad_value);
1451 return NULL;
1452 }
1453
1454 return h;
1455 }
1456
1457 /* Fill in all stubs and the overlay tables. */
1458
1459 bfd_boolean
1460 spu_elf_build_stubs (struct bfd_link_info *info)
1461 {
1462 struct spu_link_hash_table *htab = spu_hash_table (info);
1463 struct elf_link_hash_entry *h;
1464 bfd_byte *p;
1465 asection *s;
1466 bfd *obfd;
1467 unsigned int i;
1468
1469 if (htab->stub_count == NULL)
1470 return TRUE;
1471
1472 for (i = 0; i <= htab->num_overlays; i++)
1473 if (htab->stub_sec[i]->size != 0)
1474 {
1475 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1476 htab->stub_sec[i]->size);
1477 if (htab->stub_sec[i]->contents == NULL)
1478 return FALSE;
1479 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1480 htab->stub_sec[i]->size = 0;
1481 }
1482
1483 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1484 htab->ovly_load = h;
1485 BFD_ASSERT (h != NULL
1486 && (h->root.type == bfd_link_hash_defined
1487 || h->root.type == bfd_link_hash_defweak)
1488 && h->def_regular);
1489
1490 s = h->root.u.def.section->output_section;
1491 if (spu_elf_section_data (s)->u.o.ovl_index)
1492 {
1493 (*_bfd_error_handler) (_("%s in overlay section"),
1494 h->root.root.string);
1495 bfd_set_error (bfd_error_bad_value);
1496 return FALSE;
1497 }
1498
1499 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1500 htab->ovly_return = h;
1501
1502 /* Fill in all the stubs. */
1503 process_stubs (info, TRUE);
1504 if (!htab->stub_err)
1505 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1506
1507 if (htab->stub_err)
1508 {
1509 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1510 bfd_set_error (bfd_error_bad_value);
1511 return FALSE;
1512 }
1513
1514 for (i = 0; i <= htab->num_overlays; i++)
1515 {
1516 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1517 {
1518 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1519 bfd_set_error (bfd_error_bad_value);
1520 return FALSE;
1521 }
1522 htab->stub_sec[i]->rawsize = 0;
1523 }
1524
1525 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1526 if (htab->ovtab->contents == NULL)
1527 return FALSE;
1528
1529 /* Write out _ovly_table. */
1530 p = htab->ovtab->contents;
1531 /* set low bit of .size to mark non-overlay area as present. */
1532 p[7] = 1;
1533 obfd = htab->ovtab->output_section->owner;
1534 for (s = obfd->sections; s != NULL; s = s->next)
1535 {
1536 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1537
1538 if (ovl_index != 0)
1539 {
1540 unsigned long off = ovl_index * 16;
1541 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1542
1543 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1544 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1545 /* file_off written later in spu_elf_modify_program_headers. */
1546 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1547 }
1548 }
1549
1550 h = define_ovtab_symbol (htab, "_ovly_table");
1551 if (h == NULL)
1552 return FALSE;
1553 h->root.u.def.value = 16;
1554 h->size = htab->num_overlays * 16;
1555
1556 h = define_ovtab_symbol (htab, "_ovly_table_end");
1557 if (h == NULL)
1558 return FALSE;
1559 h->root.u.def.value = htab->num_overlays * 16 + 16;
1560 h->size = 0;
1561
1562 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1563 if (h == NULL)
1564 return FALSE;
1565 h->root.u.def.value = htab->num_overlays * 16 + 16;
1566 h->size = htab->num_buf * 4;
1567
1568 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1569 if (h == NULL)
1570 return FALSE;
1571 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1572 h->size = 0;
1573
1574 h = define_ovtab_symbol (htab, "_EAR_");
1575 if (h == NULL)
1576 return FALSE;
1577 h->root.u.def.section = htab->toe;
1578 h->root.u.def.value = 0;
1579 h->size = 16;
1580
1581 return TRUE;
1582 }
1583
1584 /* Check that all loadable section VMAs lie in the range
1585 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1586
1587 asection *
1588 spu_elf_check_vma (struct bfd_link_info *info)
1589 {
1590 struct elf_segment_map *m;
1591 unsigned int i;
1592 struct spu_link_hash_table *htab = spu_hash_table (info);
1593 bfd *abfd = info->output_bfd;
1594 bfd_vma hi = htab->params->local_store_hi;
1595 bfd_vma lo = htab->params->local_store_lo;
1596
1597 htab->local_store = hi + 1 - lo;
1598
1599 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1600 if (m->p_type == PT_LOAD)
1601 for (i = 0; i < m->count; i++)
1602 if (m->sections[i]->size != 0
1603 && (m->sections[i]->vma < lo
1604 || m->sections[i]->vma > hi
1605 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1606 return m->sections[i];
1607
1608 /* No need for overlays if it all fits. */
1609 htab->params->auto_overlay = 0;
1610 return NULL;
1611 }
1612
1613 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1614 Search for stack adjusting insns, and return the sp delta. */
1615
1616 static int
1617 find_function_stack_adjust (asection *sec, bfd_vma offset)
1618 {
1619 int reg[128];
1620
1621 memset (reg, 0, sizeof (reg));
1622 for ( ; offset + 4 <= sec->size; offset += 4)
1623 {
1624 unsigned char buf[4];
1625 int rt, ra;
1626 int imm;
1627
1628 /* Assume no relocs on stack adjusing insns. */
1629 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1630 break;
1631
1632 if (buf[0] == 0x24 /* stqd */)
1633 continue;
1634
1635 rt = buf[3] & 0x7f;
1636 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1637 /* Partly decoded immediate field. */
1638 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1639
1640 if (buf[0] == 0x1c /* ai */)
1641 {
1642 imm >>= 7;
1643 imm = (imm ^ 0x200) - 0x200;
1644 reg[rt] = reg[ra] + imm;
1645
1646 if (rt == 1 /* sp */)
1647 {
1648 if (reg[rt] > 0)
1649 break;
1650 return reg[rt];
1651 }
1652 }
1653 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1654 {
1655 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1656
1657 reg[rt] = reg[ra] + reg[rb];
1658 if (rt == 1)
1659 {
1660 if (reg[rt] > 0)
1661 break;
1662 return reg[rt];
1663 }
1664 }
1665 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1666 {
1667 if (buf[0] >= 0x42 /* ila */)
1668 imm |= (buf[0] & 1) << 17;
1669 else
1670 {
1671 imm &= 0xffff;
1672
1673 if (buf[0] == 0x40 /* il */)
1674 {
1675 if ((buf[1] & 0x80) == 0)
1676 continue;
1677 imm = (imm ^ 0x8000) - 0x8000;
1678 }
1679 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1680 imm <<= 16;
1681 }
1682 reg[rt] = imm;
1683 continue;
1684 }
1685 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1686 {
1687 reg[rt] |= imm & 0xffff;
1688 continue;
1689 }
1690 else if (buf[0] == 0x04 /* ori */)
1691 {
1692 imm >>= 7;
1693 imm = (imm ^ 0x200) - 0x200;
1694 reg[rt] = reg[ra] | imm;
1695 continue;
1696 }
1697 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
1698 {
1699 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
1700 | ((imm & 0x4000) ? 0x00ff0000 : 0)
1701 | ((imm & 0x2000) ? 0x0000ff00 : 0)
1702 | ((imm & 0x1000) ? 0x000000ff : 0));
1703 continue;
1704 }
1705 else if (buf[0] == 0x16 /* andbi */)
1706 {
1707 imm >>= 7;
1708 imm &= 0xff;
1709 imm |= imm << 8;
1710 imm |= imm << 16;
1711 reg[rt] = reg[ra] & imm;
1712 continue;
1713 }
1714 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1715 {
1716 /* Used in pic reg load. Say rt is trashed. Won't be used
1717 in stack adjust, but we need to continue past this branch. */
1718 reg[rt] = 0;
1719 continue;
1720 }
1721 else if (is_branch (buf) || is_indirect_branch (buf))
1722 /* If we hit a branch then we must be out of the prologue. */
1723 break;
1724 }
1725
1726 return 0;
1727 }
1728
1729 /* qsort predicate to sort symbols by section and value. */
1730
1731 static Elf_Internal_Sym *sort_syms_syms;
1732 static asection **sort_syms_psecs;
1733
1734 static int
1735 sort_syms (const void *a, const void *b)
1736 {
1737 Elf_Internal_Sym *const *s1 = a;
1738 Elf_Internal_Sym *const *s2 = b;
1739 asection *sec1,*sec2;
1740 bfd_signed_vma delta;
1741
1742 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1743 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1744
1745 if (sec1 != sec2)
1746 return sec1->index - sec2->index;
1747
1748 delta = (*s1)->st_value - (*s2)->st_value;
1749 if (delta != 0)
1750 return delta < 0 ? -1 : 1;
1751
1752 delta = (*s2)->st_size - (*s1)->st_size;
1753 if (delta != 0)
1754 return delta < 0 ? -1 : 1;
1755
1756 return *s1 < *s2 ? -1 : 1;
1757 }
1758
1759 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1760 entries for section SEC. */
1761
1762 static struct spu_elf_stack_info *
1763 alloc_stack_info (asection *sec, int max_fun)
1764 {
1765 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1766 bfd_size_type amt;
1767
1768 amt = sizeof (struct spu_elf_stack_info);
1769 amt += (max_fun - 1) * sizeof (struct function_info);
1770 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1771 if (sec_data->u.i.stack_info != NULL)
1772 sec_data->u.i.stack_info->max_fun = max_fun;
1773 return sec_data->u.i.stack_info;
1774 }
1775
1776 /* Add a new struct function_info describing a (part of a) function
1777 starting at SYM_H. Keep the array sorted by address. */
1778
1779 static struct function_info *
1780 maybe_insert_function (asection *sec,
1781 void *sym_h,
1782 bfd_boolean global,
1783 bfd_boolean is_func)
1784 {
1785 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1786 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1787 int i;
1788 bfd_vma off, size;
1789
1790 if (sinfo == NULL)
1791 {
1792 sinfo = alloc_stack_info (sec, 20);
1793 if (sinfo == NULL)
1794 return NULL;
1795 }
1796
1797 if (!global)
1798 {
1799 Elf_Internal_Sym *sym = sym_h;
1800 off = sym->st_value;
1801 size = sym->st_size;
1802 }
1803 else
1804 {
1805 struct elf_link_hash_entry *h = sym_h;
1806 off = h->root.u.def.value;
1807 size = h->size;
1808 }
1809
1810 for (i = sinfo->num_fun; --i >= 0; )
1811 if (sinfo->fun[i].lo <= off)
1812 break;
1813
1814 if (i >= 0)
1815 {
1816 /* Don't add another entry for an alias, but do update some
1817 info. */
1818 if (sinfo->fun[i].lo == off)
1819 {
1820 /* Prefer globals over local syms. */
1821 if (global && !sinfo->fun[i].global)
1822 {
1823 sinfo->fun[i].global = TRUE;
1824 sinfo->fun[i].u.h = sym_h;
1825 }
1826 if (is_func)
1827 sinfo->fun[i].is_func = TRUE;
1828 return &sinfo->fun[i];
1829 }
1830 /* Ignore a zero-size symbol inside an existing function. */
1831 else if (sinfo->fun[i].hi > off && size == 0)
1832 return &sinfo->fun[i];
1833 }
1834
1835 if (sinfo->num_fun >= sinfo->max_fun)
1836 {
1837 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1838 bfd_size_type old = amt;
1839
1840 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1841 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1842 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1843 sinfo = bfd_realloc (sinfo, amt);
1844 if (sinfo == NULL)
1845 return NULL;
1846 memset ((char *) sinfo + old, 0, amt - old);
1847 sec_data->u.i.stack_info = sinfo;
1848 }
1849
1850 if (++i < sinfo->num_fun)
1851 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1852 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1853 sinfo->fun[i].is_func = is_func;
1854 sinfo->fun[i].global = global;
1855 sinfo->fun[i].sec = sec;
1856 if (global)
1857 sinfo->fun[i].u.h = sym_h;
1858 else
1859 sinfo->fun[i].u.sym = sym_h;
1860 sinfo->fun[i].lo = off;
1861 sinfo->fun[i].hi = off + size;
1862 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1863 sinfo->num_fun += 1;
1864 return &sinfo->fun[i];
1865 }
1866
1867 /* Return the name of FUN. */
1868
1869 static const char *
1870 func_name (struct function_info *fun)
1871 {
1872 asection *sec;
1873 bfd *ibfd;
1874 Elf_Internal_Shdr *symtab_hdr;
1875
1876 while (fun->start != NULL)
1877 fun = fun->start;
1878
1879 if (fun->global)
1880 return fun->u.h->root.root.string;
1881
1882 sec = fun->sec;
1883 if (fun->u.sym->st_name == 0)
1884 {
1885 size_t len = strlen (sec->name);
1886 char *name = bfd_malloc (len + 10);
1887 if (name == NULL)
1888 return "(null)";
1889 sprintf (name, "%s+%lx", sec->name,
1890 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1891 return name;
1892 }
1893 ibfd = sec->owner;
1894 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1895 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1896 }
1897
1898 /* Read the instruction at OFF in SEC. Return true iff the instruction
1899 is a nop, lnop, or stop 0 (all zero insn). */
1900
1901 static bfd_boolean
1902 is_nop (asection *sec, bfd_vma off)
1903 {
1904 unsigned char insn[4];
1905
1906 if (off + 4 > sec->size
1907 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1908 return FALSE;
1909 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1910 return TRUE;
1911 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1912 return TRUE;
1913 return FALSE;
1914 }
1915
1916 /* Extend the range of FUN to cover nop padding up to LIMIT.
1917 Return TRUE iff some instruction other than a NOP was found. */
1918
1919 static bfd_boolean
1920 insns_at_end (struct function_info *fun, bfd_vma limit)
1921 {
1922 bfd_vma off = (fun->hi + 3) & -4;
1923
1924 while (off < limit && is_nop (fun->sec, off))
1925 off += 4;
1926 if (off < limit)
1927 {
1928 fun->hi = off;
1929 return TRUE;
1930 }
1931 fun->hi = limit;
1932 return FALSE;
1933 }
1934
1935 /* Check and fix overlapping function ranges. Return TRUE iff there
1936 are gaps in the current info we have about functions in SEC. */
1937
1938 static bfd_boolean
1939 check_function_ranges (asection *sec, struct bfd_link_info *info)
1940 {
1941 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1942 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1943 int i;
1944 bfd_boolean gaps = FALSE;
1945
1946 if (sinfo == NULL)
1947 return FALSE;
1948
1949 for (i = 1; i < sinfo->num_fun; i++)
1950 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1951 {
1952 /* Fix overlapping symbols. */
1953 const char *f1 = func_name (&sinfo->fun[i - 1]);
1954 const char *f2 = func_name (&sinfo->fun[i]);
1955
1956 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1957 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1958 }
1959 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1960 gaps = TRUE;
1961
1962 if (sinfo->num_fun == 0)
1963 gaps = TRUE;
1964 else
1965 {
1966 if (sinfo->fun[0].lo != 0)
1967 gaps = TRUE;
1968 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1969 {
1970 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1971
1972 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1973 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1974 }
1975 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1976 gaps = TRUE;
1977 }
1978 return gaps;
1979 }
1980
1981 /* Search current function info for a function that contains address
1982 OFFSET in section SEC. */
1983
1984 static struct function_info *
1985 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1986 {
1987 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1988 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1989 int lo, hi, mid;
1990
1991 lo = 0;
1992 hi = sinfo->num_fun;
1993 while (lo < hi)
1994 {
1995 mid = (lo + hi) / 2;
1996 if (offset < sinfo->fun[mid].lo)
1997 hi = mid;
1998 else if (offset >= sinfo->fun[mid].hi)
1999 lo = mid + 1;
2000 else
2001 return &sinfo->fun[mid];
2002 }
2003 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2004 sec, offset);
2005 return NULL;
2006 }
2007
2008 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2009 if CALLEE was new. If this function return FALSE, CALLEE should
2010 be freed. */
2011
2012 static bfd_boolean
2013 insert_callee (struct function_info *caller, struct call_info *callee)
2014 {
2015 struct call_info **pp, *p;
2016
2017 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2018 if (p->fun == callee->fun)
2019 {
2020 /* Tail calls use less stack than normal calls. Retain entry
2021 for normal call over one for tail call. */
2022 p->is_tail &= callee->is_tail;
2023 if (!p->is_tail)
2024 {
2025 p->fun->start = NULL;
2026 p->fun->is_func = TRUE;
2027 }
2028 p->count += 1;
2029 /* Reorder list so most recent call is first. */
2030 *pp = p->next;
2031 p->next = caller->call_list;
2032 caller->call_list = p;
2033 return FALSE;
2034 }
2035 callee->next = caller->call_list;
2036 callee->count += 1;
2037 caller->call_list = callee;
2038 return TRUE;
2039 }
2040
2041 /* Copy CALL and insert the copy into CALLER. */
2042
2043 static bfd_boolean
2044 copy_callee (struct function_info *caller, const struct call_info *call)
2045 {
2046 struct call_info *callee;
2047 callee = bfd_malloc (sizeof (*callee));
2048 if (callee == NULL)
2049 return FALSE;
2050 *callee = *call;
2051 if (!insert_callee (caller, callee))
2052 free (callee);
2053 return TRUE;
2054 }
2055
2056 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2057 overlay stub sections. */
2058
2059 static bfd_boolean
2060 interesting_section (asection *s)
2061 {
2062 return (s->output_section != bfd_abs_section_ptr
2063 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2064 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2065 && s->size != 0);
2066 }
2067
2068 /* Rummage through the relocs for SEC, looking for function calls.
2069 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2070 mark destination symbols on calls as being functions. Also
2071 look at branches, which may be tail calls or go to hot/cold
2072 section part of same function. */
2073
2074 static bfd_boolean
2075 mark_functions_via_relocs (asection *sec,
2076 struct bfd_link_info *info,
2077 int call_tree)
2078 {
2079 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2080 Elf_Internal_Shdr *symtab_hdr;
2081 void *psyms;
2082 static bfd_boolean warned;
2083
2084 if (!interesting_section (sec)
2085 || sec->reloc_count == 0)
2086 return TRUE;
2087
2088 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2089 info->keep_memory);
2090 if (internal_relocs == NULL)
2091 return FALSE;
2092
2093 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2094 psyms = &symtab_hdr->contents;
2095 irela = internal_relocs;
2096 irelaend = irela + sec->reloc_count;
2097 for (; irela < irelaend; irela++)
2098 {
2099 enum elf_spu_reloc_type r_type;
2100 unsigned int r_indx;
2101 asection *sym_sec;
2102 Elf_Internal_Sym *sym;
2103 struct elf_link_hash_entry *h;
2104 bfd_vma val;
2105 bfd_boolean reject, is_call;
2106 struct function_info *caller;
2107 struct call_info *callee;
2108
2109 reject = FALSE;
2110 r_type = ELF32_R_TYPE (irela->r_info);
2111 if (r_type != R_SPU_REL16
2112 && r_type != R_SPU_ADDR16)
2113 {
2114 reject = TRUE;
2115 if (!(call_tree && spu_hash_table (info)->params->auto_overlay))
2116 continue;
2117 }
2118
2119 r_indx = ELF32_R_SYM (irela->r_info);
2120 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2121 return FALSE;
2122
2123 if (sym_sec == NULL
2124 || sym_sec->output_section == bfd_abs_section_ptr)
2125 continue;
2126
2127 is_call = FALSE;
2128 if (!reject)
2129 {
2130 unsigned char insn[4];
2131
2132 if (!bfd_get_section_contents (sec->owner, sec, insn,
2133 irela->r_offset, 4))
2134 return FALSE;
2135 if (is_branch (insn))
2136 {
2137 is_call = (insn[0] & 0xfd) == 0x31;
2138 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2139 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2140 {
2141 if (!warned)
2142 info->callbacks->einfo
2143 (_("%B(%A+0x%v): call to non-code section"
2144 " %B(%A), analysis incomplete\n"),
2145 sec->owner, sec, irela->r_offset,
2146 sym_sec->owner, sym_sec);
2147 warned = TRUE;
2148 continue;
2149 }
2150 }
2151 else
2152 {
2153 reject = TRUE;
2154 if (!(call_tree && spu_hash_table (info)->params->auto_overlay)
2155 || is_hint (insn))
2156 continue;
2157 }
2158 }
2159
2160 if (reject)
2161 {
2162 /* For --auto-overlay, count possible stubs we need for
2163 function pointer references. */
2164 unsigned int sym_type;
2165 if (h)
2166 sym_type = h->type;
2167 else
2168 sym_type = ELF_ST_TYPE (sym->st_info);
2169 if (sym_type == STT_FUNC)
2170 spu_hash_table (info)->non_ovly_stub += 1;
2171 continue;
2172 }
2173
2174 if (h)
2175 val = h->root.u.def.value;
2176 else
2177 val = sym->st_value;
2178 val += irela->r_addend;
2179
2180 if (!call_tree)
2181 {
2182 struct function_info *fun;
2183
2184 if (irela->r_addend != 0)
2185 {
2186 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2187 if (fake == NULL)
2188 return FALSE;
2189 fake->st_value = val;
2190 fake->st_shndx
2191 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2192 sym = fake;
2193 }
2194 if (sym)
2195 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2196 else
2197 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2198 if (fun == NULL)
2199 return FALSE;
2200 if (irela->r_addend != 0
2201 && fun->u.sym != sym)
2202 free (sym);
2203 continue;
2204 }
2205
2206 caller = find_function (sec, irela->r_offset, info);
2207 if (caller == NULL)
2208 return FALSE;
2209 callee = bfd_malloc (sizeof *callee);
2210 if (callee == NULL)
2211 return FALSE;
2212
2213 callee->fun = find_function (sym_sec, val, info);
2214 if (callee->fun == NULL)
2215 return FALSE;
2216 callee->is_tail = !is_call;
2217 callee->is_pasted = FALSE;
2218 callee->count = 0;
2219 if (callee->fun->last_caller != sec)
2220 {
2221 callee->fun->last_caller = sec;
2222 callee->fun->call_count += 1;
2223 }
2224 if (!insert_callee (caller, callee))
2225 free (callee);
2226 else if (!is_call
2227 && !callee->fun->is_func
2228 && callee->fun->stack == 0)
2229 {
2230 /* This is either a tail call or a branch from one part of
2231 the function to another, ie. hot/cold section. If the
2232 destination has been called by some other function then
2233 it is a separate function. We also assume that functions
2234 are not split across input files. */
2235 if (sec->owner != sym_sec->owner)
2236 {
2237 callee->fun->start = NULL;
2238 callee->fun->is_func = TRUE;
2239 }
2240 else if (callee->fun->start == NULL)
2241 callee->fun->start = caller;
2242 else
2243 {
2244 struct function_info *callee_start;
2245 struct function_info *caller_start;
2246 callee_start = callee->fun;
2247 while (callee_start->start)
2248 callee_start = callee_start->start;
2249 caller_start = caller;
2250 while (caller_start->start)
2251 caller_start = caller_start->start;
2252 if (caller_start != callee_start)
2253 {
2254 callee->fun->start = NULL;
2255 callee->fun->is_func = TRUE;
2256 }
2257 }
2258 }
2259 }
2260
2261 return TRUE;
2262 }
2263
2264 /* Handle something like .init or .fini, which has a piece of a function.
2265 These sections are pasted together to form a single function. */
2266
2267 static bfd_boolean
2268 pasted_function (asection *sec, struct bfd_link_info *info)
2269 {
2270 struct bfd_link_order *l;
2271 struct _spu_elf_section_data *sec_data;
2272 struct spu_elf_stack_info *sinfo;
2273 Elf_Internal_Sym *fake;
2274 struct function_info *fun, *fun_start;
2275
2276 fake = bfd_zmalloc (sizeof (*fake));
2277 if (fake == NULL)
2278 return FALSE;
2279 fake->st_value = 0;
2280 fake->st_size = sec->size;
2281 fake->st_shndx
2282 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2283 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2284 if (!fun)
2285 return FALSE;
2286
2287 /* Find a function immediately preceding this section. */
2288 fun_start = NULL;
2289 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2290 {
2291 if (l->u.indirect.section == sec)
2292 {
2293 if (fun_start != NULL)
2294 {
2295 struct call_info *callee = bfd_malloc (sizeof *callee);
2296 if (callee == NULL)
2297 return FALSE;
2298
2299 fun->start = fun_start;
2300 callee->fun = fun;
2301 callee->is_tail = TRUE;
2302 callee->is_pasted = TRUE;
2303 callee->count = 0;
2304 if (!insert_callee (fun_start, callee))
2305 free (callee);
2306 return TRUE;
2307 }
2308 break;
2309 }
2310 if (l->type == bfd_indirect_link_order
2311 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2312 && (sinfo = sec_data->u.i.stack_info) != NULL
2313 && sinfo->num_fun != 0)
2314 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2315 }
2316
2317 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2318 return FALSE;
2319 }
2320
2321 /* Map address ranges in code sections to functions. */
2322
2323 static bfd_boolean
2324 discover_functions (struct bfd_link_info *info)
2325 {
2326 bfd *ibfd;
2327 int bfd_idx;
2328 Elf_Internal_Sym ***psym_arr;
2329 asection ***sec_arr;
2330 bfd_boolean gaps = FALSE;
2331
2332 bfd_idx = 0;
2333 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2334 bfd_idx++;
2335
2336 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2337 if (psym_arr == NULL)
2338 return FALSE;
2339 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2340 if (sec_arr == NULL)
2341 return FALSE;
2342
2343
2344 for (ibfd = info->input_bfds, bfd_idx = 0;
2345 ibfd != NULL;
2346 ibfd = ibfd->link_next, bfd_idx++)
2347 {
2348 extern const bfd_target bfd_elf32_spu_vec;
2349 Elf_Internal_Shdr *symtab_hdr;
2350 asection *sec;
2351 size_t symcount;
2352 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2353 asection **psecs, **p;
2354
2355 if (ibfd->xvec != &bfd_elf32_spu_vec)
2356 continue;
2357
2358 /* Read all the symbols. */
2359 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2360 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2361 if (symcount == 0)
2362 {
2363 if (!gaps)
2364 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2365 if (interesting_section (sec))
2366 {
2367 gaps = TRUE;
2368 break;
2369 }
2370 continue;
2371 }
2372
2373 if (symtab_hdr->contents != NULL)
2374 {
2375 /* Don't use cached symbols since the generic ELF linker
2376 code only reads local symbols, and we need globals too. */
2377 free (symtab_hdr->contents);
2378 symtab_hdr->contents = NULL;
2379 }
2380 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2381 NULL, NULL, NULL);
2382 symtab_hdr->contents = (void *) syms;
2383 if (syms == NULL)
2384 return FALSE;
2385
2386 /* Select defined function symbols that are going to be output. */
2387 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2388 if (psyms == NULL)
2389 return FALSE;
2390 psym_arr[bfd_idx] = psyms;
2391 psecs = bfd_malloc (symcount * sizeof (*psecs));
2392 if (psecs == NULL)
2393 return FALSE;
2394 sec_arr[bfd_idx] = psecs;
2395 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2396 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2397 || ELF_ST_TYPE (sy->st_info) == STT_FUNC
2398 || ELF_ST_TYPE (sy->st_info) == STT_SECTION)
2399 {
2400 asection *s;
2401
2402 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2403 if (s != NULL && interesting_section (s))
2404 *psy++ = sy;
2405 }
2406 symcount = psy - psyms;
2407 *psy = NULL;
2408
2409 /* Sort them by section and offset within section. */
2410 sort_syms_syms = syms;
2411 sort_syms_psecs = psecs;
2412 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2413
2414 /* Now inspect the function symbols. */
2415 for (psy = psyms; psy < psyms + symcount; )
2416 {
2417 asection *s = psecs[*psy - syms];
2418 Elf_Internal_Sym **psy2;
2419
2420 for (psy2 = psy; ++psy2 < psyms + symcount; )
2421 if (psecs[*psy2 - syms] != s)
2422 break;
2423
2424 if (!alloc_stack_info (s, psy2 - psy))
2425 return FALSE;
2426 psy = psy2;
2427 }
2428
2429 /* First install info about properly typed and sized functions.
2430 In an ideal world this will cover all code sections, except
2431 when partitioning functions into hot and cold sections,
2432 and the horrible pasted together .init and .fini functions. */
2433 for (psy = psyms; psy < psyms + symcount; ++psy)
2434 {
2435 sy = *psy;
2436 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2437 {
2438 asection *s = psecs[sy - syms];
2439 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2440 return FALSE;
2441 }
2442 }
2443
2444 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2445 if (interesting_section (sec))
2446 gaps |= check_function_ranges (sec, info);
2447 }
2448
2449 if (gaps)
2450 {
2451 /* See if we can discover more function symbols by looking at
2452 relocations. */
2453 for (ibfd = info->input_bfds, bfd_idx = 0;
2454 ibfd != NULL;
2455 ibfd = ibfd->link_next, bfd_idx++)
2456 {
2457 asection *sec;
2458
2459 if (psym_arr[bfd_idx] == NULL)
2460 continue;
2461
2462 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2463 if (!mark_functions_via_relocs (sec, info, FALSE))
2464 return FALSE;
2465 }
2466
2467 for (ibfd = info->input_bfds, bfd_idx = 0;
2468 ibfd != NULL;
2469 ibfd = ibfd->link_next, bfd_idx++)
2470 {
2471 Elf_Internal_Shdr *symtab_hdr;
2472 asection *sec;
2473 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2474 asection **psecs;
2475
2476 if ((psyms = psym_arr[bfd_idx]) == NULL)
2477 continue;
2478
2479 psecs = sec_arr[bfd_idx];
2480
2481 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2482 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2483
2484 gaps = FALSE;
2485 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2486 if (interesting_section (sec))
2487 gaps |= check_function_ranges (sec, info);
2488 if (!gaps)
2489 continue;
2490
2491 /* Finally, install all globals. */
2492 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2493 {
2494 asection *s;
2495
2496 s = psecs[sy - syms];
2497
2498 /* Global syms might be improperly typed functions. */
2499 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2500 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2501 {
2502 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2503 return FALSE;
2504 }
2505 }
2506 }
2507
2508 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2509 {
2510 extern const bfd_target bfd_elf32_spu_vec;
2511 asection *sec;
2512
2513 if (ibfd->xvec != &bfd_elf32_spu_vec)
2514 continue;
2515
2516 /* Some of the symbols we've installed as marking the
2517 beginning of functions may have a size of zero. Extend
2518 the range of such functions to the beginning of the
2519 next symbol of interest. */
2520 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2521 if (interesting_section (sec))
2522 {
2523 struct _spu_elf_section_data *sec_data;
2524 struct spu_elf_stack_info *sinfo;
2525
2526 sec_data = spu_elf_section_data (sec);
2527 sinfo = sec_data->u.i.stack_info;
2528 if (sinfo != NULL)
2529 {
2530 int fun_idx;
2531 bfd_vma hi = sec->size;
2532
2533 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2534 {
2535 sinfo->fun[fun_idx].hi = hi;
2536 hi = sinfo->fun[fun_idx].lo;
2537 }
2538 }
2539 /* No symbols in this section. Must be .init or .fini
2540 or something similar. */
2541 else if (!pasted_function (sec, info))
2542 return FALSE;
2543 }
2544 }
2545 }
2546
2547 for (ibfd = info->input_bfds, bfd_idx = 0;
2548 ibfd != NULL;
2549 ibfd = ibfd->link_next, bfd_idx++)
2550 {
2551 if (psym_arr[bfd_idx] == NULL)
2552 continue;
2553
2554 free (psym_arr[bfd_idx]);
2555 free (sec_arr[bfd_idx]);
2556 }
2557
2558 free (psym_arr);
2559 free (sec_arr);
2560
2561 return TRUE;
2562 }
2563
2564 /* Iterate over all function_info we have collected, calling DOIT on
2565 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2566 if ROOT_ONLY. */
2567
2568 static bfd_boolean
2569 for_each_node (bfd_boolean (*doit) (struct function_info *,
2570 struct bfd_link_info *,
2571 void *),
2572 struct bfd_link_info *info,
2573 void *param,
2574 int root_only)
2575 {
2576 bfd *ibfd;
2577
2578 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2579 {
2580 extern const bfd_target bfd_elf32_spu_vec;
2581 asection *sec;
2582
2583 if (ibfd->xvec != &bfd_elf32_spu_vec)
2584 continue;
2585
2586 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2587 {
2588 struct _spu_elf_section_data *sec_data;
2589 struct spu_elf_stack_info *sinfo;
2590
2591 if ((sec_data = spu_elf_section_data (sec)) != NULL
2592 && (sinfo = sec_data->u.i.stack_info) != NULL)
2593 {
2594 int i;
2595 for (i = 0; i < sinfo->num_fun; ++i)
2596 if (!root_only || !sinfo->fun[i].non_root)
2597 if (!doit (&sinfo->fun[i], info, param))
2598 return FALSE;
2599 }
2600 }
2601 }
2602 return TRUE;
2603 }
2604
2605 /* Transfer call info attached to struct function_info entries for
2606 all of a given function's sections to the first entry. */
2607
2608 static bfd_boolean
2609 transfer_calls (struct function_info *fun,
2610 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2611 void *param ATTRIBUTE_UNUSED)
2612 {
2613 struct function_info *start = fun->start;
2614
2615 if (start != NULL)
2616 {
2617 struct call_info *call, *call_next;
2618
2619 while (start->start != NULL)
2620 start = start->start;
2621 for (call = fun->call_list; call != NULL; call = call_next)
2622 {
2623 call_next = call->next;
2624 if (!insert_callee (start, call))
2625 free (call);
2626 }
2627 fun->call_list = NULL;
2628 }
2629 return TRUE;
2630 }
2631
2632 /* Mark nodes in the call graph that are called by some other node. */
2633
2634 static bfd_boolean
2635 mark_non_root (struct function_info *fun,
2636 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2637 void *param ATTRIBUTE_UNUSED)
2638 {
2639 struct call_info *call;
2640
2641 if (fun->visit1)
2642 return TRUE;
2643 fun->visit1 = TRUE;
2644 for (call = fun->call_list; call; call = call->next)
2645 {
2646 call->fun->non_root = TRUE;
2647 mark_non_root (call->fun, 0, 0);
2648 }
2649 return TRUE;
2650 }
2651
2652 /* Remove cycles from the call graph. Set depth of nodes. */
2653
2654 static bfd_boolean
2655 remove_cycles (struct function_info *fun,
2656 struct bfd_link_info *info,
2657 void *param)
2658 {
2659 struct call_info **callp, *call;
2660 unsigned int depth = *(unsigned int *) param;
2661 unsigned int max_depth = depth;
2662
2663 fun->depth = depth;
2664 fun->visit2 = TRUE;
2665 fun->marking = TRUE;
2666
2667 callp = &fun->call_list;
2668 while ((call = *callp) != NULL)
2669 {
2670 if (!call->fun->visit2)
2671 {
2672 call->max_depth = depth + !call->is_pasted;
2673 if (!remove_cycles (call->fun, info, &call->max_depth))
2674 return FALSE;
2675 if (max_depth < call->max_depth)
2676 max_depth = call->max_depth;
2677 }
2678 else if (call->fun->marking)
2679 {
2680 if (!spu_hash_table (info)->params->auto_overlay)
2681 {
2682 const char *f1 = func_name (fun);
2683 const char *f2 = func_name (call->fun);
2684
2685 info->callbacks->info (_("Stack analysis will ignore the call "
2686 "from %s to %s\n"),
2687 f1, f2);
2688 }
2689 *callp = call->next;
2690 free (call);
2691 continue;
2692 }
2693 callp = &call->next;
2694 }
2695 fun->marking = FALSE;
2696 *(unsigned int *) param = max_depth;
2697 return TRUE;
2698 }
2699
2700 /* Check that we actually visited all nodes in remove_cycles. If we
2701 didn't, then there is some cycle in the call graph not attached to
2702 any root node. Arbitrarily choose a node in the cycle as a new
2703 root and break the cycle. */
2704
2705 static bfd_boolean
2706 mark_detached_root (struct function_info *fun,
2707 struct bfd_link_info *info,
2708 void *param)
2709 {
2710 if (fun->visit2)
2711 return TRUE;
2712 fun->non_root = FALSE;
2713 *(unsigned int *) param = 0;
2714 return remove_cycles (fun, info, param);
2715 }
2716
2717 /* Populate call_list for each function. */
2718
2719 static bfd_boolean
2720 build_call_tree (struct bfd_link_info *info)
2721 {
2722 bfd *ibfd;
2723 unsigned int depth;
2724
2725 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2726 {
2727 extern const bfd_target bfd_elf32_spu_vec;
2728 asection *sec;
2729
2730 if (ibfd->xvec != &bfd_elf32_spu_vec)
2731 continue;
2732
2733 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2734 if (!mark_functions_via_relocs (sec, info, TRUE))
2735 return FALSE;
2736 }
2737
2738 /* Transfer call info from hot/cold section part of function
2739 to main entry. */
2740 if (!spu_hash_table (info)->params->auto_overlay
2741 && !for_each_node (transfer_calls, info, 0, FALSE))
2742 return FALSE;
2743
2744 /* Find the call graph root(s). */
2745 if (!for_each_node (mark_non_root, info, 0, FALSE))
2746 return FALSE;
2747
2748 /* Remove cycles from the call graph. We start from the root node(s)
2749 so that we break cycles in a reasonable place. */
2750 depth = 0;
2751 if (!for_each_node (remove_cycles, info, &depth, TRUE))
2752 return FALSE;
2753
2754 return for_each_node (mark_detached_root, info, &depth, FALSE);
2755 }
2756
2757 /* qsort predicate to sort calls by max_depth then count. */
2758
2759 static int
2760 sort_calls (const void *a, const void *b)
2761 {
2762 struct call_info *const *c1 = a;
2763 struct call_info *const *c2 = b;
2764 int delta;
2765
2766 delta = (*c2)->max_depth - (*c1)->max_depth;
2767 if (delta != 0)
2768 return delta;
2769
2770 delta = (*c2)->count - (*c1)->count;
2771 if (delta != 0)
2772 return delta;
2773
2774 return (char *) c1 - (char *) c2;
2775 }
2776
2777 struct _mos_param {
2778 unsigned int max_overlay_size;
2779 };
2780
2781 /* Set linker_mark and gc_mark on any sections that we will put in
2782 overlays. These flags are used by the generic ELF linker, but we
2783 won't be continuing on to bfd_elf_final_link so it is OK to use
2784 them. linker_mark is clear before we get here. Set segment_mark
2785 on sections that are part of a pasted function (excluding the last
2786 section).
2787
2788 Set up function rodata section if --overlay-rodata. We don't
2789 currently include merged string constant rodata sections since
2790
2791 Sort the call graph so that the deepest nodes will be visited
2792 first. */
2793
2794 static bfd_boolean
2795 mark_overlay_section (struct function_info *fun,
2796 struct bfd_link_info *info,
2797 void *param)
2798 {
2799 struct call_info *call;
2800 unsigned int count;
2801 struct _mos_param *mos_param = param;
2802
2803 if (fun->visit4)
2804 return TRUE;
2805
2806 fun->visit4 = TRUE;
2807 if (!fun->sec->linker_mark)
2808 {
2809 unsigned int size;
2810
2811 fun->sec->linker_mark = 1;
2812 fun->sec->gc_mark = 1;
2813 fun->sec->segment_mark = 0;
2814 /* Ensure SEC_CODE is set on this text section (it ought to
2815 be!), and SEC_CODE is clear on rodata sections. We use
2816 this flag to differentiate the two overlay section types. */
2817 fun->sec->flags |= SEC_CODE;
2818
2819 if (spu_hash_table (info)->params->auto_overlay & OVERLAY_RODATA)
2820 {
2821 char *name = NULL;
2822
2823 /* Find the rodata section corresponding to this function's
2824 text section. */
2825 if (strcmp (fun->sec->name, ".text") == 0)
2826 {
2827 name = bfd_malloc (sizeof (".rodata"));
2828 if (name == NULL)
2829 return FALSE;
2830 memcpy (name, ".rodata", sizeof (".rodata"));
2831 }
2832 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2833 {
2834 size_t len = strlen (fun->sec->name);
2835 name = bfd_malloc (len + 3);
2836 if (name == NULL)
2837 return FALSE;
2838 memcpy (name, ".rodata", sizeof (".rodata"));
2839 memcpy (name + 7, fun->sec->name + 5, len - 4);
2840 }
2841 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2842 {
2843 size_t len = strlen (fun->sec->name) + 1;
2844 name = bfd_malloc (len);
2845 if (name == NULL)
2846 return FALSE;
2847 memcpy (name, fun->sec->name, len);
2848 name[14] = 'r';
2849 }
2850
2851 if (name != NULL)
2852 {
2853 asection *rodata = NULL;
2854 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2855 if (group_sec == NULL)
2856 rodata = bfd_get_section_by_name (fun->sec->owner, name);
2857 else
2858 while (group_sec != NULL && group_sec != fun->sec)
2859 {
2860 if (strcmp (group_sec->name, name) == 0)
2861 {
2862 rodata = group_sec;
2863 break;
2864 }
2865 group_sec = elf_section_data (group_sec)->next_in_group;
2866 }
2867 fun->rodata = rodata;
2868 if (fun->rodata)
2869 {
2870 fun->rodata->linker_mark = 1;
2871 fun->rodata->gc_mark = 1;
2872 fun->rodata->flags &= ~SEC_CODE;
2873 }
2874 free (name);
2875 }
2876 }
2877 size = fun->sec->size;
2878 if (fun->rodata)
2879 size += fun->rodata->size;
2880 if (mos_param->max_overlay_size < size)
2881 mos_param->max_overlay_size = size;
2882 }
2883
2884 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2885 count += 1;
2886
2887 if (count > 1)
2888 {
2889 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2890 if (calls == NULL)
2891 return FALSE;
2892
2893 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2894 calls[count++] = call;
2895
2896 qsort (calls, count, sizeof (*calls), sort_calls);
2897
2898 fun->call_list = NULL;
2899 while (count != 0)
2900 {
2901 --count;
2902 calls[count]->next = fun->call_list;
2903 fun->call_list = calls[count];
2904 }
2905 free (calls);
2906 }
2907
2908 for (call = fun->call_list; call != NULL; call = call->next)
2909 {
2910 if (call->is_pasted)
2911 {
2912 /* There can only be one is_pasted call per function_info. */
2913 BFD_ASSERT (!fun->sec->segment_mark);
2914 fun->sec->segment_mark = 1;
2915 }
2916 if (!mark_overlay_section (call->fun, info, param))
2917 return FALSE;
2918 }
2919
2920 /* Don't put entry code into an overlay. The overlay manager needs
2921 a stack! */
2922 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2923 == info->output_bfd->start_address)
2924 {
2925 fun->sec->linker_mark = 0;
2926 if (fun->rodata != NULL)
2927 fun->rodata->linker_mark = 0;
2928 }
2929 return TRUE;
2930 }
2931
2932 /* If non-zero then unmark functions called from those within sections
2933 that we need to unmark. Unfortunately this isn't reliable since the
2934 call graph cannot know the destination of function pointer calls. */
2935 #define RECURSE_UNMARK 0
2936
2937 struct _uos_param {
2938 asection *exclude_input_section;
2939 asection *exclude_output_section;
2940 unsigned long clearing;
2941 };
2942
2943 /* Undo some of mark_overlay_section's work. */
2944
2945 static bfd_boolean
2946 unmark_overlay_section (struct function_info *fun,
2947 struct bfd_link_info *info,
2948 void *param)
2949 {
2950 struct call_info *call;
2951 struct _uos_param *uos_param = param;
2952 unsigned int excluded = 0;
2953
2954 if (fun->visit5)
2955 return TRUE;
2956
2957 fun->visit5 = TRUE;
2958
2959 excluded = 0;
2960 if (fun->sec == uos_param->exclude_input_section
2961 || fun->sec->output_section == uos_param->exclude_output_section)
2962 excluded = 1;
2963
2964 if (RECURSE_UNMARK)
2965 uos_param->clearing += excluded;
2966
2967 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
2968 {
2969 fun->sec->linker_mark = 0;
2970 if (fun->rodata)
2971 fun->rodata->linker_mark = 0;
2972 }
2973
2974 for (call = fun->call_list; call != NULL; call = call->next)
2975 if (!unmark_overlay_section (call->fun, info, param))
2976 return FALSE;
2977
2978 if (RECURSE_UNMARK)
2979 uos_param->clearing -= excluded;
2980 return TRUE;
2981 }
2982
2983 struct _cl_param {
2984 unsigned int lib_size;
2985 asection **lib_sections;
2986 };
2987
2988 /* Add sections we have marked as belonging to overlays to an array
2989 for consideration as non-overlay sections. The array consist of
2990 pairs of sections, (text,rodata), for functions in the call graph. */
2991
2992 static bfd_boolean
2993 collect_lib_sections (struct function_info *fun,
2994 struct bfd_link_info *info,
2995 void *param)
2996 {
2997 struct _cl_param *lib_param = param;
2998 struct call_info *call;
2999 unsigned int size;
3000
3001 if (fun->visit6)
3002 return TRUE;
3003
3004 fun->visit6 = TRUE;
3005 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3006 return TRUE;
3007
3008 size = fun->sec->size;
3009 if (fun->rodata)
3010 size += fun->rodata->size;
3011 if (size <= lib_param->lib_size)
3012 {
3013 *lib_param->lib_sections++ = fun->sec;
3014 fun->sec->gc_mark = 0;
3015 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3016 {
3017 *lib_param->lib_sections++ = fun->rodata;
3018 fun->rodata->gc_mark = 0;
3019 }
3020 else
3021 *lib_param->lib_sections++ = NULL;
3022 }
3023
3024 for (call = fun->call_list; call != NULL; call = call->next)
3025 collect_lib_sections (call->fun, info, param);
3026
3027 return TRUE;
3028 }
3029
3030 /* qsort predicate to sort sections by call count. */
3031
3032 static int
3033 sort_lib (const void *a, const void *b)
3034 {
3035 asection *const *s1 = a;
3036 asection *const *s2 = b;
3037 struct _spu_elf_section_data *sec_data;
3038 struct spu_elf_stack_info *sinfo;
3039 int delta;
3040
3041 delta = 0;
3042 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3043 && (sinfo = sec_data->u.i.stack_info) != NULL)
3044 {
3045 int i;
3046 for (i = 0; i < sinfo->num_fun; ++i)
3047 delta -= sinfo->fun[i].call_count;
3048 }
3049
3050 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3051 && (sinfo = sec_data->u.i.stack_info) != NULL)
3052 {
3053 int i;
3054 for (i = 0; i < sinfo->num_fun; ++i)
3055 delta += sinfo->fun[i].call_count;
3056 }
3057
3058 if (delta != 0)
3059 return delta;
3060
3061 return s1 - s2;
3062 }
3063
3064 /* Remove some sections from those marked to be in overlays. Choose
3065 those that are called from many places, likely library functions. */
3066
3067 static unsigned int
3068 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3069 {
3070 bfd *ibfd;
3071 asection **lib_sections;
3072 unsigned int i, lib_count;
3073 struct _cl_param collect_lib_param;
3074 struct function_info dummy_caller;
3075 struct spu_link_hash_table *htab;
3076
3077 memset (&dummy_caller, 0, sizeof (dummy_caller));
3078 lib_count = 0;
3079 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3080 {
3081 extern const bfd_target bfd_elf32_spu_vec;
3082 asection *sec;
3083
3084 if (ibfd->xvec != &bfd_elf32_spu_vec)
3085 continue;
3086
3087 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3088 if (sec->linker_mark
3089 && sec->size < lib_size
3090 && (sec->flags & SEC_CODE) != 0)
3091 lib_count += 1;
3092 }
3093 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3094 if (lib_sections == NULL)
3095 return (unsigned int) -1;
3096 collect_lib_param.lib_size = lib_size;
3097 collect_lib_param.lib_sections = lib_sections;
3098 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3099 TRUE))
3100 return (unsigned int) -1;
3101 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3102
3103 /* Sort sections so that those with the most calls are first. */
3104 if (lib_count > 1)
3105 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3106
3107 htab = spu_hash_table (info);
3108 for (i = 0; i < lib_count; i++)
3109 {
3110 unsigned int tmp, stub_size;
3111 asection *sec;
3112 struct _spu_elf_section_data *sec_data;
3113 struct spu_elf_stack_info *sinfo;
3114
3115 sec = lib_sections[2 * i];
3116 /* If this section is OK, its size must be less than lib_size. */
3117 tmp = sec->size;
3118 /* If it has a rodata section, then add that too. */
3119 if (lib_sections[2 * i + 1])
3120 tmp += lib_sections[2 * i + 1]->size;
3121 /* Add any new overlay call stubs needed by the section. */
3122 stub_size = 0;
3123 if (tmp < lib_size
3124 && (sec_data = spu_elf_section_data (sec)) != NULL
3125 && (sinfo = sec_data->u.i.stack_info) != NULL)
3126 {
3127 int k;
3128 struct call_info *call;
3129
3130 for (k = 0; k < sinfo->num_fun; ++k)
3131 for (call = sinfo->fun[k].call_list; call; call = call->next)
3132 if (call->fun->sec->linker_mark)
3133 {
3134 struct call_info *p;
3135 for (p = dummy_caller.call_list; p; p = p->next)
3136 if (p->fun == call->fun)
3137 break;
3138 if (!p)
3139 stub_size += ovl_stub_size (htab->params->ovly_flavour);
3140 }
3141 }
3142 if (tmp + stub_size < lib_size)
3143 {
3144 struct call_info **pp, *p;
3145
3146 /* This section fits. Mark it as non-overlay. */
3147 lib_sections[2 * i]->linker_mark = 0;
3148 if (lib_sections[2 * i + 1])
3149 lib_sections[2 * i + 1]->linker_mark = 0;
3150 lib_size -= tmp + stub_size;
3151 /* Call stubs to the section we just added are no longer
3152 needed. */
3153 pp = &dummy_caller.call_list;
3154 while ((p = *pp) != NULL)
3155 if (!p->fun->sec->linker_mark)
3156 {
3157 lib_size += ovl_stub_size (htab->params->ovly_flavour);
3158 *pp = p->next;
3159 free (p);
3160 }
3161 else
3162 pp = &p->next;
3163 /* Add new call stubs to dummy_caller. */
3164 if ((sec_data = spu_elf_section_data (sec)) != NULL
3165 && (sinfo = sec_data->u.i.stack_info) != NULL)
3166 {
3167 int k;
3168 struct call_info *call;
3169
3170 for (k = 0; k < sinfo->num_fun; ++k)
3171 for (call = sinfo->fun[k].call_list;
3172 call;
3173 call = call->next)
3174 if (call->fun->sec->linker_mark)
3175 {
3176 struct call_info *callee;
3177 callee = bfd_malloc (sizeof (*callee));
3178 if (callee == NULL)
3179 return (unsigned int) -1;
3180 *callee = *call;
3181 if (!insert_callee (&dummy_caller, callee))
3182 free (callee);
3183 }
3184 }
3185 }
3186 }
3187 while (dummy_caller.call_list != NULL)
3188 {
3189 struct call_info *call = dummy_caller.call_list;
3190 dummy_caller.call_list = call->next;
3191 free (call);
3192 }
3193 for (i = 0; i < 2 * lib_count; i++)
3194 if (lib_sections[i])
3195 lib_sections[i]->gc_mark = 1;
3196 free (lib_sections);
3197 return lib_size;
3198 }
3199
3200 /* Build an array of overlay sections. The deepest node's section is
3201 added first, then its parent node's section, then everything called
3202 from the parent section. The idea being to group sections to
3203 minimise calls between different overlays. */
3204
3205 static bfd_boolean
3206 collect_overlays (struct function_info *fun,
3207 struct bfd_link_info *info,
3208 void *param)
3209 {
3210 struct call_info *call;
3211 bfd_boolean added_fun;
3212 asection ***ovly_sections = param;
3213
3214 if (fun->visit7)
3215 return TRUE;
3216
3217 fun->visit7 = TRUE;
3218 for (call = fun->call_list; call != NULL; call = call->next)
3219 if (!call->is_pasted)
3220 {
3221 if (!collect_overlays (call->fun, info, ovly_sections))
3222 return FALSE;
3223 break;
3224 }
3225
3226 added_fun = FALSE;
3227 if (fun->sec->linker_mark && fun->sec->gc_mark)
3228 {
3229 fun->sec->gc_mark = 0;
3230 *(*ovly_sections)++ = fun->sec;
3231 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3232 {
3233 fun->rodata->gc_mark = 0;
3234 *(*ovly_sections)++ = fun->rodata;
3235 }
3236 else
3237 *(*ovly_sections)++ = NULL;
3238 added_fun = TRUE;
3239
3240 /* Pasted sections must stay with the first section. We don't
3241 put pasted sections in the array, just the first section.
3242 Mark subsequent sections as already considered. */
3243 if (fun->sec->segment_mark)
3244 {
3245 struct function_info *call_fun = fun;
3246 do
3247 {
3248 for (call = call_fun->call_list; call != NULL; call = call->next)
3249 if (call->is_pasted)
3250 {
3251 call_fun = call->fun;
3252 call_fun->sec->gc_mark = 0;
3253 if (call_fun->rodata)
3254 call_fun->rodata->gc_mark = 0;
3255 break;
3256 }
3257 if (call == NULL)
3258 abort ();
3259 }
3260 while (call_fun->sec->segment_mark);
3261 }
3262 }
3263
3264 for (call = fun->call_list; call != NULL; call = call->next)
3265 if (!collect_overlays (call->fun, info, ovly_sections))
3266 return FALSE;
3267
3268 if (added_fun)
3269 {
3270 struct _spu_elf_section_data *sec_data;
3271 struct spu_elf_stack_info *sinfo;
3272
3273 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3274 && (sinfo = sec_data->u.i.stack_info) != NULL)
3275 {
3276 int i;
3277 for (i = 0; i < sinfo->num_fun; ++i)
3278 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3279 return FALSE;
3280 }
3281 }
3282
3283 return TRUE;
3284 }
3285
3286 struct _sum_stack_param {
3287 size_t cum_stack;
3288 size_t overall_stack;
3289 bfd_boolean emit_stack_syms;
3290 };
3291
3292 /* Descend the call graph for FUN, accumulating total stack required. */
3293
3294 static bfd_boolean
3295 sum_stack (struct function_info *fun,
3296 struct bfd_link_info *info,
3297 void *param)
3298 {
3299 struct call_info *call;
3300 struct function_info *max;
3301 size_t stack, cum_stack;
3302 const char *f1;
3303 bfd_boolean has_call;
3304 struct _sum_stack_param *sum_stack_param = param;
3305 struct spu_link_hash_table *htab;
3306
3307 cum_stack = fun->stack;
3308 sum_stack_param->cum_stack = cum_stack;
3309 if (fun->visit3)
3310 return TRUE;
3311
3312 has_call = FALSE;
3313 max = NULL;
3314 for (call = fun->call_list; call; call = call->next)
3315 {
3316 if (!call->is_pasted)
3317 has_call = TRUE;
3318 if (!sum_stack (call->fun, info, sum_stack_param))
3319 return FALSE;
3320 stack = sum_stack_param->cum_stack;
3321 /* Include caller stack for normal calls, don't do so for
3322 tail calls. fun->stack here is local stack usage for
3323 this function. */
3324 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3325 stack += fun->stack;
3326 if (cum_stack < stack)
3327 {
3328 cum_stack = stack;
3329 max = call->fun;
3330 }
3331 }
3332
3333 sum_stack_param->cum_stack = cum_stack;
3334 stack = fun->stack;
3335 /* Now fun->stack holds cumulative stack. */
3336 fun->stack = cum_stack;
3337 fun->visit3 = TRUE;
3338
3339 if (!fun->non_root
3340 && sum_stack_param->overall_stack < cum_stack)
3341 sum_stack_param->overall_stack = cum_stack;
3342
3343 htab = spu_hash_table (info);
3344 if (htab->params->auto_overlay)
3345 return TRUE;
3346
3347 f1 = func_name (fun);
3348 if (!fun->non_root)
3349 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3350 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3351 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3352
3353 if (has_call)
3354 {
3355 info->callbacks->minfo (_(" calls:\n"));
3356 for (call = fun->call_list; call; call = call->next)
3357 if (!call->is_pasted)
3358 {
3359 const char *f2 = func_name (call->fun);
3360 const char *ann1 = call->fun == max ? "*" : " ";
3361 const char *ann2 = call->is_tail ? "t" : " ";
3362
3363 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3364 }
3365 }
3366
3367 if (sum_stack_param->emit_stack_syms)
3368 {
3369 char *name = bfd_malloc (18 + strlen (f1));
3370 struct elf_link_hash_entry *h;
3371
3372 if (name == NULL)
3373 return FALSE;
3374
3375 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3376 sprintf (name, "__stack_%s", f1);
3377 else
3378 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3379
3380 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3381 free (name);
3382 if (h != NULL
3383 && (h->root.type == bfd_link_hash_new
3384 || h->root.type == bfd_link_hash_undefined
3385 || h->root.type == bfd_link_hash_undefweak))
3386 {
3387 h->root.type = bfd_link_hash_defined;
3388 h->root.u.def.section = bfd_abs_section_ptr;
3389 h->root.u.def.value = cum_stack;
3390 h->size = 0;
3391 h->type = 0;
3392 h->ref_regular = 1;
3393 h->def_regular = 1;
3394 h->ref_regular_nonweak = 1;
3395 h->forced_local = 1;
3396 h->non_elf = 0;
3397 }
3398 }
3399
3400 return TRUE;
3401 }
3402
3403 /* SEC is part of a pasted function. Return the call_info for the
3404 next section of this function. */
3405
3406 static struct call_info *
3407 find_pasted_call (asection *sec)
3408 {
3409 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3410 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3411 struct call_info *call;
3412 int k;
3413
3414 for (k = 0; k < sinfo->num_fun; ++k)
3415 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3416 if (call->is_pasted)
3417 return call;
3418 abort ();
3419 return 0;
3420 }
3421
3422 /* qsort predicate to sort bfds by file name. */
3423
3424 static int
3425 sort_bfds (const void *a, const void *b)
3426 {
3427 bfd *const *abfd1 = a;
3428 bfd *const *abfd2 = b;
3429
3430 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3431 }
3432
3433 /* Handle --auto-overlay. */
3434
3435 static void spu_elf_auto_overlay (struct bfd_link_info *)
3436 ATTRIBUTE_NORETURN;
3437
3438 static void
3439 spu_elf_auto_overlay (struct bfd_link_info *info)
3440 {
3441 bfd *ibfd;
3442 bfd **bfd_arr;
3443 struct elf_segment_map *m;
3444 unsigned int fixed_size, lo, hi;
3445 struct spu_link_hash_table *htab;
3446 unsigned int base, i, count, bfd_count;
3447 int ovlynum;
3448 asection **ovly_sections, **ovly_p;
3449 FILE *script;
3450 unsigned int total_overlay_size, overlay_size;
3451 struct elf_link_hash_entry *h;
3452 struct _mos_param mos_param;
3453 struct _uos_param uos_param;
3454 struct function_info dummy_caller;
3455
3456 /* Find the extents of our loadable image. */
3457 lo = (unsigned int) -1;
3458 hi = 0;
3459 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3460 if (m->p_type == PT_LOAD)
3461 for (i = 0; i < m->count; i++)
3462 if (m->sections[i]->size != 0)
3463 {
3464 if (m->sections[i]->vma < lo)
3465 lo = m->sections[i]->vma;
3466 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3467 hi = m->sections[i]->vma + m->sections[i]->size - 1;
3468 }
3469 fixed_size = hi + 1 - lo;
3470
3471 if (!discover_functions (info))
3472 goto err_exit;
3473
3474 if (!build_call_tree (info))
3475 goto err_exit;
3476
3477 uos_param.exclude_input_section = 0;
3478 uos_param.exclude_output_section
3479 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3480
3481 htab = spu_hash_table (info);
3482 h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3483 FALSE, FALSE, FALSE);
3484 if (h != NULL
3485 && (h->root.type == bfd_link_hash_defined
3486 || h->root.type == bfd_link_hash_defweak)
3487 && h->def_regular)
3488 {
3489 /* We have a user supplied overlay manager. */
3490 uos_param.exclude_input_section = h->root.u.def.section;
3491 }
3492 else
3493 {
3494 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3495 builtin version to .text, and will adjust .text size. */
3496 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
3497 }
3498
3499 /* Mark overlay sections, and find max overlay section size. */
3500 mos_param.max_overlay_size = 0;
3501 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3502 goto err_exit;
3503
3504 /* We can't put the overlay manager or interrupt routines in
3505 overlays. */
3506 uos_param.clearing = 0;
3507 if ((uos_param.exclude_input_section
3508 || uos_param.exclude_output_section)
3509 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3510 goto err_exit;
3511
3512 bfd_count = 0;
3513 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3514 ++bfd_count;
3515 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3516 if (bfd_arr == NULL)
3517 goto err_exit;
3518
3519 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3520 count = 0;
3521 bfd_count = 0;
3522 total_overlay_size = 0;
3523 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3524 {
3525 extern const bfd_target bfd_elf32_spu_vec;
3526 asection *sec;
3527 unsigned int old_count;
3528
3529 if (ibfd->xvec != &bfd_elf32_spu_vec)
3530 continue;
3531
3532 old_count = count;
3533 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3534 if (sec->linker_mark)
3535 {
3536 if ((sec->flags & SEC_CODE) != 0)
3537 count += 1;
3538 fixed_size -= sec->size;
3539 total_overlay_size += sec->size;
3540 }
3541 if (count != old_count)
3542 bfd_arr[bfd_count++] = ibfd;
3543 }
3544
3545 /* Since the overlay link script selects sections by file name and
3546 section name, ensure that file names are unique. */
3547 if (bfd_count > 1)
3548 {
3549 bfd_boolean ok = TRUE;
3550
3551 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3552 for (i = 1; i < bfd_count; ++i)
3553 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3554 {
3555 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
3556 {
3557 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
3558 info->callbacks->einfo (_("%s duplicated in %s\n"),
3559 bfd_arr[i]->filename,
3560 bfd_arr[i]->my_archive->filename);
3561 else
3562 info->callbacks->einfo (_("%s duplicated\n"),
3563 bfd_arr[i]->filename);
3564 ok = FALSE;
3565 }
3566 }
3567 if (!ok)
3568 {
3569 info->callbacks->einfo (_("sorry, no support for duplicate "
3570 "object files in auto-overlay script\n"));
3571 bfd_set_error (bfd_error_bad_value);
3572 goto err_exit;
3573 }
3574 }
3575 free (bfd_arr);
3576
3577 if (htab->reserved == 0)
3578 {
3579 struct _sum_stack_param sum_stack_param;
3580
3581 sum_stack_param.emit_stack_syms = 0;
3582 sum_stack_param.overall_stack = 0;
3583 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3584 goto err_exit;
3585 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
3586 }
3587 fixed_size += htab->reserved;
3588 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params->ovly_flavour);
3589 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3590 {
3591 /* Guess number of overlays. Assuming overlay buffer is on
3592 average only half full should be conservative. */
3593 ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3594 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3595 fixed_size += ovlynum * 16 + 16 + 4 + 16;
3596 }
3597
3598 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
3599 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3600 "size of 0x%v exceeds local store\n"),
3601 (bfd_vma) fixed_size,
3602 (bfd_vma) mos_param.max_overlay_size);
3603
3604 /* Now see if we should put some functions in the non-overlay area. */
3605 else if (fixed_size < htab->overlay_fixed)
3606 {
3607 unsigned int max_fixed, lib_size;
3608
3609 max_fixed = htab->local_store - mos_param.max_overlay_size;
3610 if (max_fixed > htab->overlay_fixed)
3611 max_fixed = htab->overlay_fixed;
3612 lib_size = max_fixed - fixed_size;
3613 lib_size = auto_ovl_lib_functions (info, lib_size);
3614 if (lib_size == (unsigned int) -1)
3615 goto err_exit;
3616 fixed_size = max_fixed - lib_size;
3617 }
3618
3619 /* Build an array of sections, suitably sorted to place into
3620 overlays. */
3621 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3622 if (ovly_sections == NULL)
3623 goto err_exit;
3624 ovly_p = ovly_sections;
3625 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3626 goto err_exit;
3627 count = (size_t) (ovly_p - ovly_sections) / 2;
3628
3629 script = (*htab->params->spu_elf_open_overlay_script) ();
3630
3631 if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3632 goto file_err;
3633
3634 memset (&dummy_caller, 0, sizeof (dummy_caller));
3635 overlay_size = htab->local_store - fixed_size;
3636 base = 0;
3637 ovlynum = 0;
3638 while (base < count)
3639 {
3640 unsigned int size = 0;
3641 unsigned int j;
3642
3643 for (i = base; i < count; i++)
3644 {
3645 asection *sec;
3646 unsigned int tmp;
3647 unsigned int num_stubs;
3648 struct call_info *call, *pasty;
3649 struct _spu_elf_section_data *sec_data;
3650 struct spu_elf_stack_info *sinfo;
3651 int k;
3652
3653 /* See whether we can add this section to the current
3654 overlay without overflowing our overlay buffer. */
3655 sec = ovly_sections[2 * i];
3656 tmp = size + sec->size;
3657 if (ovly_sections[2 * i + 1])
3658 tmp += ovly_sections[2 * i + 1]->size;
3659 if (tmp > overlay_size)
3660 break;
3661 if (sec->segment_mark)
3662 {
3663 /* Pasted sections must stay together, so add their
3664 sizes too. */
3665 struct call_info *pasty = find_pasted_call (sec);
3666 while (pasty != NULL)
3667 {
3668 struct function_info *call_fun = pasty->fun;
3669 tmp += call_fun->sec->size;
3670 if (call_fun->rodata)
3671 tmp += call_fun->rodata->size;
3672 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3673 if (pasty->is_pasted)
3674 break;
3675 }
3676 }
3677 if (tmp > overlay_size)
3678 break;
3679
3680 /* If we add this section, we might need new overlay call
3681 stubs. Add any overlay section calls to dummy_call. */
3682 pasty = NULL;
3683 sec_data = spu_elf_section_data (sec);
3684 sinfo = sec_data->u.i.stack_info;
3685 for (k = 0; k < sinfo->num_fun; ++k)
3686 for (call = sinfo->fun[k].call_list; call; call = call->next)
3687 if (call->is_pasted)
3688 {
3689 BFD_ASSERT (pasty == NULL);
3690 pasty = call;
3691 }
3692 else if (call->fun->sec->linker_mark)
3693 {
3694 if (!copy_callee (&dummy_caller, call))
3695 goto err_exit;
3696 }
3697 while (pasty != NULL)
3698 {
3699 struct function_info *call_fun = pasty->fun;
3700 pasty = NULL;
3701 for (call = call_fun->call_list; call; call = call->next)
3702 if (call->is_pasted)
3703 {
3704 BFD_ASSERT (pasty == NULL);
3705 pasty = call;
3706 }
3707 else if (!copy_callee (&dummy_caller, call))
3708 goto err_exit;
3709 }
3710
3711 /* Calculate call stub size. */
3712 num_stubs = 0;
3713 for (call = dummy_caller.call_list; call; call = call->next)
3714 {
3715 unsigned int k;
3716
3717 ++num_stubs;
3718 /* If the call is within this overlay, we won't need a
3719 stub. */
3720 for (k = base; k < i + 1; k++)
3721 if (call->fun->sec == ovly_sections[2 * k])
3722 {
3723 --num_stubs;
3724 break;
3725 }
3726 }
3727 if (tmp + num_stubs * ovl_stub_size (htab->params->ovly_flavour)
3728 > overlay_size)
3729 break;
3730
3731 size = tmp;
3732 }
3733
3734 if (i == base)
3735 {
3736 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3737 ovly_sections[2 * i]->owner,
3738 ovly_sections[2 * i],
3739 ovly_sections[2 * i + 1] ? " + rodata" : "");
3740 bfd_set_error (bfd_error_bad_value);
3741 goto err_exit;
3742 }
3743
3744 if (fprintf (script, " .ovly%d {\n", ++ovlynum) <= 0)
3745 goto file_err;
3746 for (j = base; j < i; j++)
3747 {
3748 asection *sec = ovly_sections[2 * j];
3749
3750 if (fprintf (script, " %s%c%s (%s)\n",
3751 (sec->owner->my_archive != NULL
3752 ? sec->owner->my_archive->filename : ""),
3753 info->path_separator,
3754 sec->owner->filename,
3755 sec->name) <= 0)
3756 goto file_err;
3757 if (sec->segment_mark)
3758 {
3759 struct call_info *call = find_pasted_call (sec);
3760 while (call != NULL)
3761 {
3762 struct function_info *call_fun = call->fun;
3763 sec = call_fun->sec;
3764 if (fprintf (script, " %s%c%s (%s)\n",
3765 (sec->owner->my_archive != NULL
3766 ? sec->owner->my_archive->filename : ""),
3767 info->path_separator,
3768 sec->owner->filename,
3769 sec->name) <= 0)
3770 goto file_err;
3771 for (call = call_fun->call_list; call; call = call->next)
3772 if (call->is_pasted)
3773 break;
3774 }
3775 }
3776 }
3777
3778 for (j = base; j < i; j++)
3779 {
3780 asection *sec = ovly_sections[2 * j + 1];
3781 if (sec != NULL
3782 && fprintf (script, " %s%c%s (%s)\n",
3783 (sec->owner->my_archive != NULL
3784 ? sec->owner->my_archive->filename : ""),
3785 info->path_separator,
3786 sec->owner->filename,
3787 sec->name) <= 0)
3788 goto file_err;
3789
3790 sec = ovly_sections[2 * j];
3791 if (sec->segment_mark)
3792 {
3793 struct call_info *call = find_pasted_call (sec);
3794 while (call != NULL)
3795 {
3796 struct function_info *call_fun = call->fun;
3797 sec = call_fun->rodata;
3798 if (sec != NULL
3799 && fprintf (script, " %s%c%s (%s)\n",
3800 (sec->owner->my_archive != NULL
3801 ? sec->owner->my_archive->filename : ""),
3802 info->path_separator,
3803 sec->owner->filename,
3804 sec->name) <= 0)
3805 goto file_err;
3806 for (call = call_fun->call_list; call; call = call->next)
3807 if (call->is_pasted)
3808 break;
3809 }
3810 }
3811 }
3812
3813 if (fprintf (script, " }\n") <= 0)
3814 goto file_err;
3815
3816 while (dummy_caller.call_list != NULL)
3817 {
3818 struct call_info *call = dummy_caller.call_list;
3819 dummy_caller.call_list = call->next;
3820 free (call);
3821 }
3822
3823 base = i;
3824 }
3825 free (ovly_sections);
3826
3827 if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3828 goto file_err;
3829 if (fclose (script) != 0)
3830 goto file_err;
3831
3832 if (htab->params->auto_overlay & AUTO_RELINK)
3833 (*htab->params->spu_elf_relink) ();
3834
3835 xexit (0);
3836
3837 file_err:
3838 bfd_set_error (bfd_error_system_call);
3839 err_exit:
3840 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3841 xexit (1);
3842 }
3843
3844 /* Provide an estimate of total stack required. */
3845
3846 static bfd_boolean
3847 spu_elf_stack_analysis (struct bfd_link_info *info)
3848 {
3849 struct spu_link_hash_table *htab;
3850 struct _sum_stack_param sum_stack_param;
3851
3852 if (!discover_functions (info))
3853 return FALSE;
3854
3855 if (!build_call_tree (info))
3856 return FALSE;
3857
3858 htab = spu_hash_table (info);
3859 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3860 info->callbacks->minfo (_("\nStack size for functions. "
3861 "Annotations: '*' max stack, 't' tail call\n"));
3862
3863 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
3864 sum_stack_param.overall_stack = 0;
3865 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3866 return FALSE;
3867
3868 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3869 (bfd_vma) sum_stack_param.overall_stack);
3870 return TRUE;
3871 }
3872
3873 /* Perform a final link. */
3874
3875 static bfd_boolean
3876 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3877 {
3878 struct spu_link_hash_table *htab = spu_hash_table (info);
3879
3880 if (htab->params->auto_overlay)
3881 spu_elf_auto_overlay (info);
3882
3883 if (htab->params->stack_analysis
3884 && !spu_elf_stack_analysis (info))
3885 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3886
3887 return bfd_elf_final_link (output_bfd, info);
3888 }
3889
3890 /* Called when not normally emitting relocs, ie. !info->relocatable
3891 and !info->emitrelocations. Returns a count of special relocs
3892 that need to be emitted. */
3893
3894 static unsigned int
3895 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
3896 {
3897 Elf_Internal_Rela *relocs;
3898 unsigned int count = 0;
3899
3900 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
3901 info->keep_memory);
3902 if (relocs != NULL)
3903 {
3904 Elf_Internal_Rela *rel;
3905 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3906
3907 for (rel = relocs; rel < relend; rel++)
3908 {
3909 int r_type = ELF32_R_TYPE (rel->r_info);
3910 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3911 ++count;
3912 }
3913
3914 if (elf_section_data (sec)->relocs != relocs)
3915 free (relocs);
3916 }
3917
3918 return count;
3919 }
3920
3921 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3922
3923 static int
3924 spu_elf_relocate_section (bfd *output_bfd,
3925 struct bfd_link_info *info,
3926 bfd *input_bfd,
3927 asection *input_section,
3928 bfd_byte *contents,
3929 Elf_Internal_Rela *relocs,
3930 Elf_Internal_Sym *local_syms,
3931 asection **local_sections)
3932 {
3933 Elf_Internal_Shdr *symtab_hdr;
3934 struct elf_link_hash_entry **sym_hashes;
3935 Elf_Internal_Rela *rel, *relend;
3936 struct spu_link_hash_table *htab;
3937 asection *ea;
3938 int ret = TRUE;
3939 bfd_boolean emit_these_relocs = FALSE;
3940 bfd_boolean is_ea_sym;
3941 bfd_boolean stubs;
3942
3943 htab = spu_hash_table (info);
3944 stubs = (htab->stub_sec != NULL
3945 && maybe_needs_stubs (input_section));
3946 ea = bfd_get_section_by_name (output_bfd, "._ea");
3947 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3948 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3949
3950 rel = relocs;
3951 relend = relocs + input_section->reloc_count;
3952 for (; rel < relend; rel++)
3953 {
3954 int r_type;
3955 reloc_howto_type *howto;
3956 unsigned int r_symndx;
3957 Elf_Internal_Sym *sym;
3958 asection *sec;
3959 struct elf_link_hash_entry *h;
3960 const char *sym_name;
3961 bfd_vma relocation;
3962 bfd_vma addend;
3963 bfd_reloc_status_type r;
3964 bfd_boolean unresolved_reloc;
3965 bfd_boolean warned;
3966 enum _stub_type stub_type;
3967
3968 r_symndx = ELF32_R_SYM (rel->r_info);
3969 r_type = ELF32_R_TYPE (rel->r_info);
3970 howto = elf_howto_table + r_type;
3971 unresolved_reloc = FALSE;
3972 warned = FALSE;
3973 h = NULL;
3974 sym = NULL;
3975 sec = NULL;
3976 if (r_symndx < symtab_hdr->sh_info)
3977 {
3978 sym = local_syms + r_symndx;
3979 sec = local_sections[r_symndx];
3980 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
3981 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
3982 }
3983 else
3984 {
3985 if (sym_hashes == NULL)
3986 return FALSE;
3987
3988 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3989
3990 while (h->root.type == bfd_link_hash_indirect
3991 || h->root.type == bfd_link_hash_warning)
3992 h = (struct elf_link_hash_entry *) h->root.u.i.link;
3993
3994 relocation = 0;
3995 if (h->root.type == bfd_link_hash_defined
3996 || h->root.type == bfd_link_hash_defweak)
3997 {
3998 sec = h->root.u.def.section;
3999 if (sec == NULL
4000 || sec->output_section == NULL)
4001 /* Set a flag that will be cleared later if we find a
4002 relocation value for this symbol. output_section
4003 is typically NULL for symbols satisfied by a shared
4004 library. */
4005 unresolved_reloc = TRUE;
4006 else
4007 relocation = (h->root.u.def.value
4008 + sec->output_section->vma
4009 + sec->output_offset);
4010 }
4011 else if (h->root.type == bfd_link_hash_undefweak)
4012 ;
4013 else if (info->unresolved_syms_in_objects == RM_IGNORE
4014 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4015 ;
4016 else if (!info->relocatable
4017 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4018 {
4019 bfd_boolean err;
4020 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4021 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4022 if (!info->callbacks->undefined_symbol (info,
4023 h->root.root.string,
4024 input_bfd,
4025 input_section,
4026 rel->r_offset, err))
4027 return FALSE;
4028 warned = TRUE;
4029 }
4030 sym_name = h->root.root.string;
4031 }
4032
4033 if (sec != NULL && elf_discarded_section (sec))
4034 {
4035 /* For relocs against symbols from removed linkonce sections,
4036 or sections discarded by a linker script, we just want the
4037 section contents zeroed. Avoid any special processing. */
4038 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4039 rel->r_info = 0;
4040 rel->r_addend = 0;
4041 continue;
4042 }
4043
4044 if (info->relocatable)
4045 continue;
4046
4047 is_ea_sym = (ea != NULL
4048 && sec != NULL
4049 && sec->output_section == ea);
4050
4051 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4052 {
4053 if (is_ea_sym)
4054 {
4055 /* ._ea is a special section that isn't allocated in SPU
4056 memory, but rather occupies space in PPU memory as
4057 part of an embedded ELF image. If this reloc is
4058 against a symbol defined in ._ea, then transform the
4059 reloc into an equivalent one without a symbol
4060 relative to the start of the ELF image. */
4061 rel->r_addend += (relocation
4062 - ea->vma
4063 + elf_section_data (ea)->this_hdr.sh_offset);
4064 rel->r_info = ELF32_R_INFO (0, r_type);
4065 }
4066 emit_these_relocs = TRUE;
4067 continue;
4068 }
4069
4070 if (is_ea_sym)
4071 unresolved_reloc = TRUE;
4072
4073 if (unresolved_reloc)
4074 {
4075 (*_bfd_error_handler)
4076 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4077 input_bfd,
4078 bfd_get_section_name (input_bfd, input_section),
4079 (long) rel->r_offset,
4080 howto->name,
4081 sym_name);
4082 ret = FALSE;
4083 }
4084
4085 /* If this symbol is in an overlay area, we may need to relocate
4086 to the overlay stub. */
4087 addend = rel->r_addend;
4088 if (stubs
4089 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4090 contents, info)) != no_stub)
4091 {
4092 unsigned int ovl = 0;
4093 struct got_entry *g, **head;
4094
4095 if (stub_type != nonovl_stub)
4096 ovl = (spu_elf_section_data (input_section->output_section)
4097 ->u.o.ovl_index);
4098
4099 if (h != NULL)
4100 head = &h->got.glist;
4101 else
4102 head = elf_local_got_ents (input_bfd) + r_symndx;
4103
4104 for (g = *head; g != NULL; g = g->next)
4105 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4106 break;
4107 if (g == NULL)
4108 abort ();
4109
4110 relocation = g->stub_addr;
4111 addend = 0;
4112 }
4113
4114 r = _bfd_final_link_relocate (howto,
4115 input_bfd,
4116 input_section,
4117 contents,
4118 rel->r_offset, relocation, addend);
4119
4120 if (r != bfd_reloc_ok)
4121 {
4122 const char *msg = (const char *) 0;
4123
4124 switch (r)
4125 {
4126 case bfd_reloc_overflow:
4127 if (!((*info->callbacks->reloc_overflow)
4128 (info, (h ? &h->root : NULL), sym_name, howto->name,
4129 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4130 return FALSE;
4131 break;
4132
4133 case bfd_reloc_undefined:
4134 if (!((*info->callbacks->undefined_symbol)
4135 (info, sym_name, input_bfd, input_section,
4136 rel->r_offset, TRUE)))
4137 return FALSE;
4138 break;
4139
4140 case bfd_reloc_outofrange:
4141 msg = _("internal error: out of range error");
4142 goto common_error;
4143
4144 case bfd_reloc_notsupported:
4145 msg = _("internal error: unsupported relocation error");
4146 goto common_error;
4147
4148 case bfd_reloc_dangerous:
4149 msg = _("internal error: dangerous error");
4150 goto common_error;
4151
4152 default:
4153 msg = _("internal error: unknown error");
4154 /* fall through */
4155
4156 common_error:
4157 ret = FALSE;
4158 if (!((*info->callbacks->warning)
4159 (info, msg, sym_name, input_bfd, input_section,
4160 rel->r_offset)))
4161 return FALSE;
4162 break;
4163 }
4164 }
4165 }
4166
4167 if (ret
4168 && emit_these_relocs
4169 && !info->emitrelocations)
4170 {
4171 Elf_Internal_Rela *wrel;
4172 Elf_Internal_Shdr *rel_hdr;
4173
4174 wrel = rel = relocs;
4175 relend = relocs + input_section->reloc_count;
4176 for (; rel < relend; rel++)
4177 {
4178 int r_type;
4179
4180 r_type = ELF32_R_TYPE (rel->r_info);
4181 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4182 *wrel++ = *rel;
4183 }
4184 input_section->reloc_count = wrel - relocs;
4185 /* Backflips for _bfd_elf_link_output_relocs. */
4186 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4187 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4188 ret = 2;
4189 }
4190
4191 return ret;
4192 }
4193
4194 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4195
4196 static bfd_boolean
4197 spu_elf_output_symbol_hook (struct bfd_link_info *info,
4198 const char *sym_name ATTRIBUTE_UNUSED,
4199 Elf_Internal_Sym *sym,
4200 asection *sym_sec ATTRIBUTE_UNUSED,
4201 struct elf_link_hash_entry *h)
4202 {
4203 struct spu_link_hash_table *htab = spu_hash_table (info);
4204
4205 if (!info->relocatable
4206 && htab->stub_sec != NULL
4207 && h != NULL
4208 && (h->root.type == bfd_link_hash_defined
4209 || h->root.type == bfd_link_hash_defweak)
4210 && h->def_regular
4211 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4212 {
4213 struct got_entry *g;
4214
4215 for (g = h->got.glist; g != NULL; g = g->next)
4216 if (g->addend == 0 && g->ovl == 0)
4217 {
4218 sym->st_shndx = (_bfd_elf_section_from_bfd_section
4219 (htab->stub_sec[0]->output_section->owner,
4220 htab->stub_sec[0]->output_section));
4221 sym->st_value = g->stub_addr;
4222 break;
4223 }
4224 }
4225
4226 return TRUE;
4227 }
4228
4229 static int spu_plugin = 0;
4230
4231 void
4232 spu_elf_plugin (int val)
4233 {
4234 spu_plugin = val;
4235 }
4236
4237 /* Set ELF header e_type for plugins. */
4238
4239 static void
4240 spu_elf_post_process_headers (bfd *abfd,
4241 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4242 {
4243 if (spu_plugin)
4244 {
4245 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4246
4247 i_ehdrp->e_type = ET_DYN;
4248 }
4249 }
4250
4251 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4252 segments for overlays. */
4253
4254 static int
4255 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4256 {
4257 int extra = 0;
4258 asection *sec;
4259
4260 if (info != NULL)
4261 {
4262 struct spu_link_hash_table *htab = spu_hash_table (info);
4263 extra = htab->num_overlays;
4264 }
4265
4266 if (extra)
4267 ++extra;
4268
4269 sec = bfd_get_section_by_name (abfd, ".toe");
4270 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4271 ++extra;
4272
4273 return extra;
4274 }
4275
4276 /* Remove .toe section from other PT_LOAD segments and put it in
4277 a segment of its own. Put overlays in separate segments too. */
4278
4279 static bfd_boolean
4280 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4281 {
4282 asection *toe, *s;
4283 struct elf_segment_map *m;
4284 unsigned int i;
4285
4286 if (info == NULL)
4287 return TRUE;
4288
4289 toe = bfd_get_section_by_name (abfd, ".toe");
4290 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4291 if (m->p_type == PT_LOAD && m->count > 1)
4292 for (i = 0; i < m->count; i++)
4293 if ((s = m->sections[i]) == toe
4294 || spu_elf_section_data (s)->u.o.ovl_index != 0)
4295 {
4296 struct elf_segment_map *m2;
4297 bfd_vma amt;
4298
4299 if (i + 1 < m->count)
4300 {
4301 amt = sizeof (struct elf_segment_map);
4302 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4303 m2 = bfd_zalloc (abfd, amt);
4304 if (m2 == NULL)
4305 return FALSE;
4306 m2->count = m->count - (i + 1);
4307 memcpy (m2->sections, m->sections + i + 1,
4308 m2->count * sizeof (m->sections[0]));
4309 m2->p_type = PT_LOAD;
4310 m2->next = m->next;
4311 m->next = m2;
4312 }
4313 m->count = 1;
4314 if (i != 0)
4315 {
4316 m->count = i;
4317 amt = sizeof (struct elf_segment_map);
4318 m2 = bfd_zalloc (abfd, amt);
4319 if (m2 == NULL)
4320 return FALSE;
4321 m2->p_type = PT_LOAD;
4322 m2->count = 1;
4323 m2->sections[0] = s;
4324 m2->next = m->next;
4325 m->next = m2;
4326 }
4327 break;
4328 }
4329
4330 return TRUE;
4331 }
4332
4333 /* Tweak the section type of .note.spu_name. */
4334
4335 static bfd_boolean
4336 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4337 Elf_Internal_Shdr *hdr,
4338 asection *sec)
4339 {
4340 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4341 hdr->sh_type = SHT_NOTE;
4342 return TRUE;
4343 }
4344
4345 /* Tweak phdrs before writing them out. */
4346
4347 static int
4348 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4349 {
4350 const struct elf_backend_data *bed;
4351 struct elf_obj_tdata *tdata;
4352 Elf_Internal_Phdr *phdr, *last;
4353 struct spu_link_hash_table *htab;
4354 unsigned int count;
4355 unsigned int i;
4356
4357 if (info == NULL)
4358 return TRUE;
4359
4360 bed = get_elf_backend_data (abfd);
4361 tdata = elf_tdata (abfd);
4362 phdr = tdata->phdr;
4363 count = tdata->program_header_size / bed->s->sizeof_phdr;
4364 htab = spu_hash_table (info);
4365 if (htab->num_overlays != 0)
4366 {
4367 struct elf_segment_map *m;
4368 unsigned int o;
4369
4370 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4371 if (m->count != 0
4372 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
4373 {
4374 /* Mark this as an overlay header. */
4375 phdr[i].p_flags |= PF_OVERLAY;
4376
4377 if (htab->ovtab != NULL && htab->ovtab->size != 0)
4378 {
4379 bfd_byte *p = htab->ovtab->contents;
4380 unsigned int off = o * 16 + 8;
4381
4382 /* Write file_off into _ovly_table. */
4383 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4384 }
4385 }
4386 }
4387
4388 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4389 of 16. This should always be possible when using the standard
4390 linker scripts, but don't create overlapping segments if
4391 someone is playing games with linker scripts. */
4392 last = NULL;
4393 for (i = count; i-- != 0; )
4394 if (phdr[i].p_type == PT_LOAD)
4395 {
4396 unsigned adjust;
4397
4398 adjust = -phdr[i].p_filesz & 15;
4399 if (adjust != 0
4400 && last != NULL
4401 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4402 break;
4403
4404 adjust = -phdr[i].p_memsz & 15;
4405 if (adjust != 0
4406 && last != NULL
4407 && phdr[i].p_filesz != 0
4408 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4409 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4410 break;
4411
4412 if (phdr[i].p_filesz != 0)
4413 last = &phdr[i];
4414 }
4415
4416 if (i == (unsigned int) -1)
4417 for (i = count; i-- != 0; )
4418 if (phdr[i].p_type == PT_LOAD)
4419 {
4420 unsigned adjust;
4421
4422 adjust = -phdr[i].p_filesz & 15;
4423 phdr[i].p_filesz += adjust;
4424
4425 adjust = -phdr[i].p_memsz & 15;
4426 phdr[i].p_memsz += adjust;
4427 }
4428
4429 return TRUE;
4430 }
4431
4432 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4433 #define TARGET_BIG_NAME "elf32-spu"
4434 #define ELF_ARCH bfd_arch_spu
4435 #define ELF_MACHINE_CODE EM_SPU
4436 /* This matches the alignment need for DMA. */
4437 #define ELF_MAXPAGESIZE 0x80
4438 #define elf_backend_rela_normal 1
4439 #define elf_backend_can_gc_sections 1
4440
4441 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4442 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4443 #define elf_info_to_howto spu_elf_info_to_howto
4444 #define elf_backend_count_relocs spu_elf_count_relocs
4445 #define elf_backend_relocate_section spu_elf_relocate_section
4446 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4447 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4448 #define elf_backend_object_p spu_elf_object_p
4449 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4450 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4451
4452 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4453 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4454 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4455 #define elf_backend_post_process_headers spu_elf_post_process_headers
4456 #define elf_backend_fake_sections spu_elf_fake_sections
4457 #define elf_backend_special_sections spu_elf_special_sections
4458 #define bfd_elf32_bfd_final_link spu_elf_final_link
4459
4460 #include "elf32-target.h"
This page took 0.118606 seconds and 5 git commands to generate.