* elf32-spu.c (define_ovtab_symbol): Don't abort on symbols
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
CommitLineData
e9f53129
AM
1/* SPU specific support for 32-bit ELF
2
d16c7321 3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
e9f53129
AM
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
cd123cb7 9 the Free Software Foundation; either version 3 of the License, or
e9f53129
AM
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
e9f53129 21#include "sysdep.h"
9dcc4794 22#include "libiberty.h"
3db64b00 23#include "bfd.h"
e9f53129
AM
24#include "bfdlink.h"
25#include "libbfd.h"
26#include "elf-bfd.h"
27#include "elf/spu.h"
28#include "elf32-spu.h"
29
30/* We use RELA style relocs. Don't define USE_REL. */
31
32static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36/* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
b427ea91 58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
e9f53129
AM
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
b427ea91 79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
e9f53129
AM
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
4f4416b5
AM
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
b427ea91 85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
ece5ef60
AM
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
b427ea91 88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
ece5ef60
AM
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
e9f53129
AM
91};
92
93static struct bfd_elf_special_section const spu_elf_special_sections[] = {
8374f9d4 94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
e9f53129
AM
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96 { NULL, 0, 0, 0, 0 }
97};
98
99static enum elf_spu_reloc_type
100spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101{
102 switch (code)
103 {
104 default:
105 return R_SPU_NONE;
106 case BFD_RELOC_SPU_IMM10W:
107 return R_SPU_ADDR10;
108 case BFD_RELOC_SPU_IMM16W:
109 return R_SPU_ADDR16;
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
115 return R_SPU_ADDR18;
116 case BFD_RELOC_SPU_PCREL16:
117 return R_SPU_REL16;
118 case BFD_RELOC_SPU_IMM7:
119 return R_SPU_ADDR7;
120 case BFD_RELOC_SPU_IMM8:
121 return R_SPU_NONE;
122 case BFD_RELOC_SPU_PCREL9a:
123 return R_SPU_REL9;
124 case BFD_RELOC_SPU_PCREL9b:
125 return R_SPU_REL9I;
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
130 case BFD_RELOC_32:
131 return R_SPU_ADDR32;
132 case BFD_RELOC_32_PCREL:
133 return R_SPU_REL32;
ece5ef60
AM
134 case BFD_RELOC_SPU_PPU32:
135 return R_SPU_PPU32;
136 case BFD_RELOC_SPU_PPU64:
137 return R_SPU_PPU64;
e9f53129
AM
138 }
139}
140
141static void
142spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143 arelent *cache_ptr,
144 Elf_Internal_Rela *dst)
145{
146 enum elf_spu_reloc_type r_type;
147
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
151}
152
153static reloc_howto_type *
154spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
156{
b16f296e
AM
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
158
159 if (r_type == R_SPU_NONE)
160 return NULL;
161
162 return elf_howto_table + r_type;
e9f53129
AM
163}
164
157090f7
AM
165static reloc_howto_type *
166spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 const char *r_name)
168{
169 unsigned int i;
170
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
175
176 return NULL;
177}
178
e9f53129
AM
179/* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
180
181static bfd_reloc_status_type
182spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
185{
186 bfd_size_type octets;
187 bfd_vma val;
188 long insn;
189
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
192 link time. */
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
196
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
200
201 /* Get symbol value. */
202 val = 0;
203 if (!bfd_is_com_section (symbol->section))
204 val = symbol->value;
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
207
208 val += reloc_entry->addend;
209
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
212
213 val >>= 2;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
216
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
218
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225 return bfd_reloc_ok;
226}
227
228static bfd_boolean
229spu_elf_new_section_hook (bfd *abfd, asection *sec)
230{
231 if (!sec->used_by_bfd)
232 {
233 struct _spu_elf_section_data *sdata;
234
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 if (sdata == NULL)
237 return FALSE;
238 sec->used_by_bfd = sdata;
239 }
240
241 return _bfd_elf_new_section_hook (abfd, sec);
242}
243
124b52c6
AM
244/* Set up overlay info for executables. */
245
246static bfd_boolean
247spu_elf_object_p (bfd *abfd)
248{
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
250 {
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
255
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
258 {
259 unsigned int j;
260
261 ++num_ovl;
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264 ++num_buf;
265 last_phdr = phdr;
266 for (j = 1; j < elf_numsections (abfd); j++)
267 {
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
269
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
271 {
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
275 }
276 }
277 }
278 }
279 return TRUE;
280}
281
e9f53129
AM
282/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
284
285static void
286spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
287{
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
292}
293
294/* SPU ELF linker hash table. */
295
296struct spu_link_hash_table
297{
298 struct elf_link_hash_table elf;
299
e9f53129 300 /* Shortcuts to overlay sections. */
e9f53129 301 asection *ovtab;
47f6dab9
AM
302 asection *toe;
303 asection **ovl_sec;
304
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count;
307
308 /* The stub section for each overlay section. */
309 asection **stub_sec;
e9f53129
AM
310
311 struct elf_link_hash_entry *ovly_load;
47f6dab9 312 struct elf_link_hash_entry *ovly_return;
2cb5950e 313 unsigned long ovly_load_r_symndx;
e9f53129 314
e9f53129
AM
315 /* Number of overlay buffers. */
316 unsigned int num_buf;
317
318 /* Total number of overlays. */
319 unsigned int num_overlays;
320
9dcc4794
AM
321 /* How much memory we have. */
322 unsigned int local_store;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved;
99302af9
AM
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space;
9dcc4794
AM
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub;
335
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr) (void);
338 FILE *(*spu_elf_open_overlay_script) (void);
339 void (*spu_elf_relink) (void);
340
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay : 3;
345#define AUTO_OVERLAY 1
346#define AUTO_RELINK 2
347#define OVERLAY_RODATA 4
348
e9f53129
AM
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms:1;
351
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs : 1;
355
356 /* Set on error. */
47f6dab9 357 unsigned int stub_err : 1;
49fa1e15
AM
358
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis : 1;
361
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms : 1;
e9f53129
AM
364};
365
47f6dab9 366/* Hijack the generic got fields for overlay stub accounting. */
e9f53129 367
47f6dab9 368struct got_entry
e9f53129 369{
47f6dab9
AM
370 struct got_entry *next;
371 unsigned int ovl;
4a628337 372 bfd_vma addend;
47f6dab9 373 bfd_vma stub_addr;
e9f53129
AM
374};
375
47f6dab9
AM
376#define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
e9f53129
AM
378
379/* Create a spu ELF linker hash table. */
380
381static struct bfd_link_hash_table *
382spu_elf_link_hash_table_create (bfd *abfd)
383{
384 struct spu_link_hash_table *htab;
385
386 htab = bfd_malloc (sizeof (*htab));
387 if (htab == NULL)
388 return NULL;
389
390 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
391 _bfd_elf_link_hash_newfunc,
392 sizeof (struct elf_link_hash_entry)))
393 {
394 free (htab);
395 return NULL;
396 }
397
47f6dab9
AM
398 memset (&htab->ovtab, 0,
399 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
e9f53129 400
47f6dab9
AM
401 htab->elf.init_got_refcount.refcount = 0;
402 htab->elf.init_got_refcount.glist = NULL;
403 htab->elf.init_got_offset.offset = 0;
404 htab->elf.init_got_offset.glist = NULL;
e9f53129
AM
405 return &htab->elf.root;
406}
407
e9f53129
AM
408/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
411
412static bfd_boolean
413get_sym_h (struct elf_link_hash_entry **hp,
414 Elf_Internal_Sym **symp,
415 asection **symsecp,
416 Elf_Internal_Sym **locsymsp,
417 unsigned long r_symndx,
418 bfd *ibfd)
419{
420 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
421
422 if (r_symndx >= symtab_hdr->sh_info)
423 {
424 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
425 struct elf_link_hash_entry *h;
426
427 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
428 while (h->root.type == bfd_link_hash_indirect
429 || h->root.type == bfd_link_hash_warning)
430 h = (struct elf_link_hash_entry *) h->root.u.i.link;
431
432 if (hp != NULL)
433 *hp = h;
434
435 if (symp != NULL)
436 *symp = NULL;
437
438 if (symsecp != NULL)
439 {
440 asection *symsec = NULL;
441 if (h->root.type == bfd_link_hash_defined
442 || h->root.type == bfd_link_hash_defweak)
443 symsec = h->root.u.def.section;
444 *symsecp = symsec;
445 }
446 }
447 else
448 {
449 Elf_Internal_Sym *sym;
450 Elf_Internal_Sym *locsyms = *locsymsp;
451
452 if (locsyms == NULL)
453 {
454 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
455 if (locsyms == NULL)
1f27ab8d
AM
456 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
457 symtab_hdr->sh_info,
458 0, NULL, NULL, NULL);
e9f53129
AM
459 if (locsyms == NULL)
460 return FALSE;
461 *locsymsp = locsyms;
462 }
463 sym = locsyms + r_symndx;
464
465 if (hp != NULL)
466 *hp = NULL;
467
468 if (symp != NULL)
469 *symp = sym;
470
471 if (symsecp != NULL)
cb33740c 472 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
e9f53129 473 }
49fa1e15 474
e9f53129
AM
475 return TRUE;
476}
477
e9f53129
AM
478/* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
480
481bfd_boolean
c65be8d7 482spu_elf_create_sections (struct bfd_link_info *info,
49fa1e15
AM
483 int stack_analysis,
484 int emit_stack_syms)
e9f53129
AM
485{
486 bfd *ibfd;
49fa1e15
AM
487 struct spu_link_hash_table *htab = spu_hash_table (info);
488
489 /* Stash some options away where we can get at them later. */
490 htab->stack_analysis = stack_analysis;
491 htab->emit_stack_syms = emit_stack_syms;
e9f53129 492
58eb693e 493 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
e9f53129
AM
494 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
495 break;
496
497 if (ibfd == NULL)
498 {
499 /* Make SPU_PTNOTE_SPUNAME section. */
500 asection *s;
501 size_t name_len;
502 size_t size;
503 bfd_byte *data;
504 flagword flags;
505
506 ibfd = info->input_bfds;
507 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
508 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
509 if (s == NULL
510 || !bfd_set_section_alignment (ibfd, s, 4))
511 return FALSE;
512
c65be8d7 513 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
e9f53129
AM
514 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
515 size += (name_len + 3) & -4;
516
517 if (!bfd_set_section_size (ibfd, s, size))
518 return FALSE;
519
520 data = bfd_zalloc (ibfd, size);
521 if (data == NULL)
522 return FALSE;
523
524 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
525 bfd_put_32 (ibfd, name_len, data + 4);
526 bfd_put_32 (ibfd, 1, data + 8);
527 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
528 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
c65be8d7 529 bfd_get_filename (info->output_bfd), name_len);
e9f53129
AM
530 s->contents = data;
531 }
532
533 return TRUE;
534}
535
e9f53129
AM
536/* qsort predicate to sort sections by vma. */
537
538static int
539sort_sections (const void *a, const void *b)
540{
541 const asection *const *s1 = a;
542 const asection *const *s2 = b;
543 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
544
545 if (delta != 0)
546 return delta < 0 ? -1 : 1;
547
548 return (*s1)->index - (*s2)->index;
549}
550
551/* Identify overlays in the output bfd, and number them. */
552
553bfd_boolean
c65be8d7 554spu_elf_find_overlays (struct bfd_link_info *info)
e9f53129
AM
555{
556 struct spu_link_hash_table *htab = spu_hash_table (info);
557 asection **alloc_sec;
558 unsigned int i, n, ovl_index, num_buf;
559 asection *s;
560 bfd_vma ovl_end;
561
c65be8d7 562 if (info->output_bfd->section_count < 2)
e9f53129
AM
563 return FALSE;
564
c65be8d7
AM
565 alloc_sec
566 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
e9f53129
AM
567 if (alloc_sec == NULL)
568 return FALSE;
569
570 /* Pick out all the alloced sections. */
c65be8d7 571 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
e9f53129
AM
572 if ((s->flags & SEC_ALLOC) != 0
573 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
574 && s->size != 0)
575 alloc_sec[n++] = s;
576
577 if (n == 0)
578 {
579 free (alloc_sec);
580 return FALSE;
581 }
582
583 /* Sort them by vma. */
584 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
585
586 /* Look for overlapping vmas. Any with overlap must be overlays.
47f6dab9 587 Count them. Also count the number of overlay regions. */
e9f53129
AM
588 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
589 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
590 {
591 s = alloc_sec[i];
592 if (s->vma < ovl_end)
593 {
594 asection *s0 = alloc_sec[i - 1];
595
47f6dab9 596 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
e9f53129 597 {
47f6dab9
AM
598 alloc_sec[ovl_index] = s0;
599 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
600 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
e9f53129 601 }
47f6dab9
AM
602 alloc_sec[ovl_index] = s;
603 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
604 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
605 if (s0->vma != s->vma)
e9f53129 606 {
47f6dab9
AM
607 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
609 s0, s);
610 return FALSE;
e9f53129 611 }
47f6dab9
AM
612 if (ovl_end < s->vma + s->size)
613 ovl_end = s->vma + s->size;
e9f53129
AM
614 }
615 else
616 ovl_end = s->vma + s->size;
617 }
618
619 htab->num_overlays = ovl_index;
620 htab->num_buf = num_buf;
47f6dab9 621 htab->ovl_sec = alloc_sec;
fdba2fcd
AM
622 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
623 FALSE, FALSE, FALSE);
624 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
625 FALSE, FALSE, FALSE);
47f6dab9 626 return ovl_index != 0;
e9f53129
AM
627}
628
47f6dab9
AM
629/* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631#ifndef OVL_STUB_SIZE
632/* Default to faster. */
633#define OVL_STUB_SIZE 16
634/* #define OVL_STUB_SIZE 8 */
635#endif
636#define BRSL 0x33000000
637#define BR 0x32000000
e9f53129 638#define NOP 0x40200000
47f6dab9
AM
639#define LNOP 0x00200000
640#define ILA 0x42000000
e9f53129 641
49fa1e15 642/* Return true for all relative and absolute branch instructions.
e9f53129
AM
643 bra 00110000 0..
644 brasl 00110001 0..
645 br 00110010 0..
646 brsl 00110011 0..
647 brz 00100000 0..
648 brnz 00100001 0..
649 brhz 00100010 0..
49fa1e15
AM
650 brhnz 00100011 0.. */
651
652static bfd_boolean
653is_branch (const unsigned char *insn)
654{
655 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
656}
657
fad9eaf0
AM
658/* Return true for all indirect branch instructions.
659 bi 00110101 000
660 bisl 00110101 001
661 iret 00110101 010
662 bisled 00110101 011
663 biz 00100101 000
664 binz 00100101 001
665 bihz 00100101 010
666 bihnz 00100101 011 */
667
668static bfd_boolean
669is_indirect_branch (const unsigned char *insn)
670{
671 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
672}
673
49fa1e15 674/* Return true for branch hint instructions.
e9f53129
AM
675 hbra 0001000..
676 hbrr 0001001.. */
677
678static bfd_boolean
49fa1e15 679is_hint (const unsigned char *insn)
e9f53129 680{
49fa1e15 681 return (insn[0] & 0xfc) == 0x10;
e9f53129
AM
682}
683
fdba2fcd 684/* True if INPUT_SECTION might need overlay stubs. */
aa7a0635
AM
685
686static bfd_boolean
fdba2fcd
AM
687maybe_needs_stubs (asection *input_section, bfd *output_bfd)
688{
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section->flags & SEC_ALLOC) == 0)
691 return FALSE;
692
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section->output_section == NULL
695 || input_section->output_section->owner != output_bfd)
696 return FALSE;
697
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section->name, ".eh_frame") == 0)
700 return FALSE;
701
702 return TRUE;
703}
704
705enum _stub_type
706{
707 no_stub,
708 ovl_stub,
709 nonovl_stub,
710 stub_error
711};
712
713/* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
715
716static enum _stub_type
717needs_ovl_stub (struct elf_link_hash_entry *h,
718 Elf_Internal_Sym *sym,
aa7a0635
AM
719 asection *sym_sec,
720 asection *input_section,
fdba2fcd
AM
721 Elf_Internal_Rela *irela,
722 bfd_byte *contents,
723 struct bfd_link_info *info)
aa7a0635 724{
fdba2fcd
AM
725 struct spu_link_hash_table *htab = spu_hash_table (info);
726 enum elf_spu_reloc_type r_type;
727 unsigned int sym_type;
728 bfd_boolean branch;
729 enum _stub_type ret = no_stub;
aa7a0635
AM
730
731 if (sym_sec == NULL
2c67c5f3 732 || sym_sec->output_section == NULL
fdba2fcd 733 || sym_sec->output_section->owner != info->output_bfd
2c67c5f3 734 || spu_elf_section_data (sym_sec->output_section) == NULL)
fdba2fcd 735 return ret;
aa7a0635 736
fdba2fcd
AM
737 if (h != NULL)
738 {
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h == htab->ovly_load || h == htab->ovly_return)
741 return ret;
742
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h->root.root.string, "setjmp", 6) == 0
747 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
748 ret = ovl_stub;
749 }
aa7a0635
AM
750
751 /* Usually, symbols in non-overlay sections don't need stubs. */
47f6dab9 752 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
aa7a0635 753 && !htab->non_overlay_stubs)
fdba2fcd
AM
754 return ret;
755
756 if (h != NULL)
757 sym_type = h->type;
758 else
759 sym_type = ELF_ST_TYPE (sym->st_info);
760
761 r_type = ELF32_R_TYPE (irela->r_info);
762 branch = FALSE;
763 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
764 {
765 bfd_byte insn[4];
766
767 if (contents == NULL)
768 {
769 contents = insn;
770 if (!bfd_get_section_contents (input_section->owner,
771 input_section,
772 contents,
773 irela->r_offset, 4))
774 return stub_error;
775 }
776 else
777 contents += irela->r_offset;
778
779 if (is_branch (contents) || is_hint (contents))
780 {
781 branch = TRUE;
782 if ((contents[0] & 0xfd) == 0x31
783 && sym_type != STT_FUNC
9dcc4794 784 && contents != insn)
fdba2fcd
AM
785 {
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name;
793
794 if (h != NULL)
795 sym_name = h->root.root.string;
796 else
797 {
798 Elf_Internal_Shdr *symtab_hdr;
799 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
800 sym_name = bfd_elf_sym_name (input_section->owner,
801 symtab_hdr,
802 sym,
803 sym_sec);
804 }
805 (*_bfd_error_handler) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec->owner, sym_name);
808
809 }
810 }
811 }
812
813 if (sym_type != STT_FUNC
814 && !branch
815 && (sym_sec->flags & SEC_CODE) == 0)
816 return ret;
aa7a0635
AM
817
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
47f6dab9
AM
820 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
821 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
7c39fb20 822 ret = ovl_stub;
aa7a0635
AM
823
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
fdba2fcd 826 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
aa7a0635
AM
827}
828
47f6dab9
AM
829static bfd_boolean
830count_stub (struct spu_link_hash_table *htab,
831 bfd *ibfd,
832 asection *isec,
fdba2fcd 833 enum _stub_type stub_type,
47f6dab9
AM
834 struct elf_link_hash_entry *h,
835 const Elf_Internal_Rela *irela)
836{
837 unsigned int ovl = 0;
838 struct got_entry *g, **head;
4a628337 839 bfd_vma addend;
47f6dab9
AM
840
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
fdba2fcd 846 if (stub_type != nonovl_stub)
47f6dab9
AM
847 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
848
849 if (h != NULL)
850 head = &h->got.glist;
851 else
852 {
853 if (elf_local_got_ents (ibfd) == NULL)
854 {
855 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
856 * sizeof (*elf_local_got_ents (ibfd)));
857 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
858 if (elf_local_got_ents (ibfd) == NULL)
859 return FALSE;
860 }
861 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
862 }
863
4a628337
AM
864 addend = 0;
865 if (irela != NULL)
866 addend = irela->r_addend;
47f6dab9
AM
867
868 if (ovl == 0)
869 {
870 struct got_entry *gnext;
871
4a628337
AM
872 for (g = *head; g != NULL; g = g->next)
873 if (g->addend == addend && g->ovl == 0)
874 break;
875
876 if (g == NULL)
47f6dab9 877 {
4a628337
AM
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g = *head; g != NULL; g = gnext)
880 {
881 gnext = g->next;
882 if (g->addend == addend)
883 {
884 htab->stub_count[g->ovl] -= 1;
885 free (g);
886 }
887 }
47f6dab9
AM
888 }
889 }
890 else
891 {
4a628337
AM
892 for (g = *head; g != NULL; g = g->next)
893 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
47f6dab9
AM
894 break;
895 }
896
897 if (g == NULL)
898 {
899 g = bfd_malloc (sizeof *g);
900 if (g == NULL)
901 return FALSE;
902 g->ovl = ovl;
4a628337 903 g->addend = addend;
47f6dab9
AM
904 g->stub_addr = (bfd_vma) -1;
905 g->next = *head;
906 *head = g;
907
908 htab->stub_count[ovl] += 1;
909 }
910
911 return TRUE;
912}
913
914/* Two instruction overlay stubs look like:
915
916 brsl $75,__ovly_load
917 .word target_ovl_and_address
918
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
921
922 Four instruction overlay stubs look like:
923
924 ila $78,ovl_number
925 lnop
926 ila $79,target_address
927 br __ovly_load */
928
929static bfd_boolean
930build_stub (struct spu_link_hash_table *htab,
931 bfd *ibfd,
932 asection *isec,
fdba2fcd 933 enum _stub_type stub_type,
47f6dab9
AM
934 struct elf_link_hash_entry *h,
935 const Elf_Internal_Rela *irela,
936 bfd_vma dest,
937 asection *dest_sec)
938{
939 unsigned int ovl;
940 struct got_entry *g, **head;
941 asection *sec;
4a628337 942 bfd_vma addend, val, from, to;
47f6dab9
AM
943
944 ovl = 0;
fdba2fcd 945 if (stub_type != nonovl_stub)
47f6dab9
AM
946 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
947
948 if (h != NULL)
949 head = &h->got.glist;
950 else
951 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
952
4a628337
AM
953 addend = 0;
954 if (irela != NULL)
955 addend = irela->r_addend;
47f6dab9 956
4a628337
AM
957 for (g = *head; g != NULL; g = g->next)
958 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
47f6dab9
AM
959 break;
960 if (g == NULL)
961 abort ();
962
4a628337
AM
963 if (g->ovl == 0 && ovl != 0)
964 return TRUE;
965
47f6dab9
AM
966 if (g->stub_addr != (bfd_vma) -1)
967 return TRUE;
968
969 sec = htab->stub_sec[ovl];
970 dest += dest_sec->output_offset + dest_sec->output_section->vma;
971 from = sec->size + sec->output_offset + sec->output_section->vma;
972 g->stub_addr = from;
973 to = (htab->ovly_load->root.u.def.value
974 + htab->ovly_load->root.u.def.section->output_offset
975 + htab->ovly_load->root.u.def.section->output_section->vma);
976 val = to - from;
977 if (OVL_STUB_SIZE == 16)
978 val -= 12;
979 if (((dest | to | from) & 3) != 0
f3c29e8a 980 || val + 0x40000 >= 0x80000)
47f6dab9
AM
981 {
982 htab->stub_err = 1;
983 return FALSE;
984 }
985 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
986
987 if (OVL_STUB_SIZE == 16)
988 {
989 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
990 sec->contents + sec->size);
991 bfd_put_32 (sec->owner, LNOP,
992 sec->contents + sec->size + 4);
993 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
994 sec->contents + sec->size + 8);
995 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
996 sec->contents + sec->size + 12);
997 }
998 else if (OVL_STUB_SIZE == 8)
999 {
1000 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
1001 sec->contents + sec->size);
1002
124b52c6 1003 val = (dest & 0x3ffff) | (ovl << 18);
47f6dab9
AM
1004 bfd_put_32 (sec->owner, val,
1005 sec->contents + sec->size + 4);
1006 }
1007 else
1008 abort ();
1009 sec->size += OVL_STUB_SIZE;
1010
1011 if (htab->emit_stub_syms)
1012 {
1013 size_t len;
1014 char *name;
1015 int add;
1016
1017 len = 8 + sizeof (".ovl_call.") - 1;
1018 if (h != NULL)
1019 len += strlen (h->root.root.string);
1020 else
1021 len += 8 + 1 + 8;
1022 add = 0;
1023 if (irela != NULL)
1024 add = (int) irela->r_addend & 0xffffffff;
1025 if (add != 0)
1026 len += 1 + 8;
1027 name = bfd_malloc (len);
1028 if (name == NULL)
1029 return FALSE;
1030
1031 sprintf (name, "%08x.ovl_call.", g->ovl);
1032 if (h != NULL)
1033 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1034 else
1035 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec->id & 0xffffffff,
1037 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1038 if (add != 0)
1039 sprintf (name + len - 9, "+%x", add);
1040
1041 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1042 free (name);
1043 if (h == NULL)
1044 return FALSE;
1045 if (h->root.type == bfd_link_hash_new)
1046 {
1047 h->root.type = bfd_link_hash_defined;
1048 h->root.u.def.section = sec;
1049 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
1050 h->size = OVL_STUB_SIZE;
1051 h->type = STT_FUNC;
1052 h->ref_regular = 1;
1053 h->def_regular = 1;
1054 h->ref_regular_nonweak = 1;
1055 h->forced_local = 1;
1056 h->non_elf = 0;
1057 }
1058 }
1059
1060 return TRUE;
1061}
1062
f4b39977
AM
1063/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1064 symbols. */
1065
1066static bfd_boolean
1067allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1068{
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
380814a6
AM
1071 struct bfd_link_info *info = inf;
1072 struct spu_link_hash_table *htab = spu_hash_table (info);
1073 asection *sym_sec;
1074
f4b39977
AM
1075 if ((h->root.type == bfd_link_hash_defined
1076 || h->root.type == bfd_link_hash_defweak)
1077 && h->def_regular
380814a6
AM
1078 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1079 && (sym_sec = h->root.u.def.section) != NULL
1080 && sym_sec->output_section != NULL
1081 && sym_sec->output_section->owner == info->output_bfd
1082 && spu_elf_section_data (sym_sec->output_section) != NULL
1083 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1084 || htab->non_overlay_stubs))
f4b39977 1085 {
f3c29e8a 1086 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
f4b39977
AM
1087 }
1088
1089 return TRUE;
1090}
1091
e9f53129 1092static bfd_boolean
47f6dab9 1093build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
e9f53129 1094{
47f6dab9
AM
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
380814a6
AM
1097 struct bfd_link_info *info = inf;
1098 struct spu_link_hash_table *htab = spu_hash_table (info);
1099 asection *sym_sec;
1100
47f6dab9
AM
1101 if ((h->root.type == bfd_link_hash_defined
1102 || h->root.type == bfd_link_hash_defweak)
1103 && h->def_regular
380814a6
AM
1104 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1105 && (sym_sec = h->root.u.def.section) != NULL
1106 && sym_sec->output_section != NULL
1107 && sym_sec->output_section->owner == info->output_bfd
1108 && spu_elf_section_data (sym_sec->output_section) != NULL
1109 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1110 || htab->non_overlay_stubs))
47f6dab9 1111 {
f3c29e8a
AM
1112 return build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1113 h->root.u.def.value, sym_sec);
47f6dab9
AM
1114 }
1115
e9f53129
AM
1116 return TRUE;
1117}
1118
47f6dab9 1119/* Size or build stubs. */
e9f53129 1120
47f6dab9 1121static bfd_boolean
c65be8d7 1122process_stubs (struct bfd_link_info *info, bfd_boolean build)
e9f53129
AM
1123{
1124 struct spu_link_hash_table *htab = spu_hash_table (info);
1125 bfd *ibfd;
e9f53129 1126
e9f53129
AM
1127 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1128 {
1129 extern const bfd_target bfd_elf32_spu_vec;
1130 Elf_Internal_Shdr *symtab_hdr;
47f6dab9 1131 asection *isec;
e9f53129
AM
1132 Elf_Internal_Sym *local_syms = NULL;
1133
1134 if (ibfd->xvec != &bfd_elf32_spu_vec)
1135 continue;
1136
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1139 if (symtab_hdr->sh_info == 0)
1140 continue;
1141
1142 /* Walk over each section attached to the input bfd. */
47f6dab9 1143 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
e9f53129
AM
1144 {
1145 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1146
1147 /* If there aren't any relocs, then there's nothing more to do. */
47f6dab9 1148 if ((isec->flags & SEC_RELOC) == 0
47f6dab9 1149 || isec->reloc_count == 0)
e9f53129
AM
1150 continue;
1151
c65be8d7 1152 if (!maybe_needs_stubs (isec, info->output_bfd))
e9f53129
AM
1153 continue;
1154
1155 /* Get the relocs. */
47f6dab9
AM
1156 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1157 info->keep_memory);
e9f53129
AM
1158 if (internal_relocs == NULL)
1159 goto error_ret_free_local;
1160
1161 /* Now examine each relocation. */
1162 irela = internal_relocs;
47f6dab9 1163 irelaend = irela + isec->reloc_count;
e9f53129
AM
1164 for (; irela < irelaend; irela++)
1165 {
1166 enum elf_spu_reloc_type r_type;
1167 unsigned int r_indx;
1168 asection *sym_sec;
1169 Elf_Internal_Sym *sym;
1170 struct elf_link_hash_entry *h;
fdba2fcd 1171 enum _stub_type stub_type;
e9f53129
AM
1172
1173 r_type = ELF32_R_TYPE (irela->r_info);
1174 r_indx = ELF32_R_SYM (irela->r_info);
1175
1176 if (r_type >= R_SPU_max)
1177 {
1178 bfd_set_error (bfd_error_bad_value);
47f6dab9
AM
1179 error_ret_free_internal:
1180 if (elf_section_data (isec)->relocs != internal_relocs)
1181 free (internal_relocs);
1182 error_ret_free_local:
1183 if (local_syms != NULL
1184 && (symtab_hdr->contents
1185 != (unsigned char *) local_syms))
1186 free (local_syms);
1187 return FALSE;
e9f53129
AM
1188 }
1189
1190 /* Determine the reloc target section. */
1f27ab8d 1191 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
e9f53129
AM
1192 goto error_ret_free_internal;
1193
fdba2fcd
AM
1194 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1195 NULL, info);
1196 if (stub_type == no_stub)
e9f53129 1197 continue;
fdba2fcd
AM
1198 else if (stub_type == stub_error)
1199 goto error_ret_free_internal;
e9f53129 1200
47f6dab9 1201 if (htab->stub_count == NULL)
e9f53129 1202 {
47f6dab9
AM
1203 bfd_size_type amt;
1204 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1205 htab->stub_count = bfd_zmalloc (amt);
1206 if (htab->stub_count == NULL)
1207 goto error_ret_free_internal;
e9f53129
AM
1208 }
1209
47f6dab9 1210 if (!build)
e9f53129 1211 {
fdba2fcd 1212 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
47f6dab9 1213 goto error_ret_free_internal;
e9f53129 1214 }
e9f53129 1215 else
47f6dab9
AM
1216 {
1217 bfd_vma dest;
1218
1219 if (h != NULL)
1220 dest = h->root.u.def.value;
1221 else
1222 dest = sym->st_value;
4a628337 1223 dest += irela->r_addend;
fdba2fcd 1224 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
47f6dab9
AM
1225 dest, sym_sec))
1226 goto error_ret_free_internal;
1227 }
e9f53129
AM
1228 }
1229
1230 /* We're done with the internal relocs, free them. */
47f6dab9 1231 if (elf_section_data (isec)->relocs != internal_relocs)
e9f53129
AM
1232 free (internal_relocs);
1233 }
1234
1235 if (local_syms != NULL
1236 && symtab_hdr->contents != (unsigned char *) local_syms)
1237 {
1238 if (!info->keep_memory)
1239 free (local_syms);
1240 else
1241 symtab_hdr->contents = (unsigned char *) local_syms;
1242 }
1243 }
1244
47f6dab9
AM
1245 return TRUE;
1246}
1247
1248/* Allocate space for overlay call and return stubs. */
1249
1250int
c65be8d7 1251spu_elf_size_stubs (struct bfd_link_info *info,
47f6dab9
AM
1252 void (*place_spu_section) (asection *, asection *,
1253 const char *),
1254 int non_overlay_stubs)
1255{
1256 struct spu_link_hash_table *htab = spu_hash_table (info);
1257 bfd *ibfd;
1258 bfd_size_type amt;
1259 flagword flags;
1260 unsigned int i;
1261 asection *stub;
1262
1263 htab->non_overlay_stubs = non_overlay_stubs;
c65be8d7 1264 if (!process_stubs (info, FALSE))
47f6dab9
AM
1265 return 0;
1266
380814a6 1267 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
47f6dab9
AM
1268 if (htab->stub_err)
1269 return 0;
f4b39977 1270
47f6dab9
AM
1271 if (htab->stub_count == NULL)
1272 return 1;
e9f53129
AM
1273
1274 ibfd = info->input_bfds;
47f6dab9
AM
1275 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1276 htab->stub_sec = bfd_zmalloc (amt);
1277 if (htab->stub_sec == NULL)
1278 return 0;
e9f53129 1279
47f6dab9 1280 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
e9f53129 1281 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
47f6dab9
AM
1282 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1283 htab->stub_sec[0] = stub;
1284 if (stub == NULL
1285 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1286 return 0;
1287 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1288 (*place_spu_section) (stub, NULL, ".text");
e9f53129 1289
47f6dab9 1290 for (i = 0; i < htab->num_overlays; ++i)
e9f53129 1291 {
47f6dab9
AM
1292 asection *osec = htab->ovl_sec[i];
1293 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1294 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1295 htab->stub_sec[ovl] = stub;
1296 if (stub == NULL
1297 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1298 return 0;
1299 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1300 (*place_spu_section) (stub, osec, NULL);
e9f53129 1301 }
e9f53129
AM
1302
1303 /* htab->ovtab consists of two arrays.
1304 . struct {
1305 . u32 vma;
1306 . u32 size;
1307 . u32 file_off;
1308 . u32 buf;
1309 . } _ovly_table[];
1310 .
1311 . struct {
1312 . u32 mapped;
47f6dab9
AM
1313 . } _ovly_buf_table[];
1314 . */
e9f53129 1315
47f6dab9
AM
1316 flags = (SEC_ALLOC | SEC_LOAD
1317 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1318 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1319 if (htab->ovtab == NULL
1320 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1321 return 0;
e9f53129 1322
2e444bea 1323 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
47f6dab9
AM
1324 (*place_spu_section) (htab->ovtab, NULL, ".data");
1325
1326 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1327 if (htab->toe == NULL
1328 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1329 return 0;
1330 htab->toe->size = 16;
1331 (*place_spu_section) (htab->toe, NULL, ".toe");
1332
1333 return 2;
e9f53129
AM
1334}
1335
1336/* Functions to handle embedded spu_ovl.o object. */
1337
1338static void *
1339ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1340{
1341 return stream;
1342}
1343
1344static file_ptr
1345ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1346 void *stream,
1347 void *buf,
1348 file_ptr nbytes,
1349 file_ptr offset)
1350{
1351 struct _ovl_stream *os;
1352 size_t count;
1353 size_t max;
1354
1355 os = (struct _ovl_stream *) stream;
7a8757b3 1356 max = (const char *) os->end - (const char *) os->start;
e9f53129
AM
1357
1358 if ((ufile_ptr) offset >= max)
1359 return 0;
1360
1361 count = nbytes;
1362 if (count > max - offset)
1363 count = max - offset;
1364
7a8757b3 1365 memcpy (buf, (const char *) os->start + offset, count);
e9f53129
AM
1366 return count;
1367}
1368
1369bfd_boolean
1370spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1371{
1372 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1373 "elf32-spu",
1374 ovl_mgr_open,
1375 (void *) stream,
1376 ovl_mgr_pread,
f6cf9273 1377 NULL,
e9f53129
AM
1378 NULL);
1379 return *ovl_bfd != NULL;
1380}
1381
e9f53129
AM
1382/* Define an STT_OBJECT symbol. */
1383
1384static struct elf_link_hash_entry *
1385define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1386{
1387 struct elf_link_hash_entry *h;
1388
1389 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1390 if (h == NULL)
1391 return NULL;
1392
1393 if (h->root.type != bfd_link_hash_defined
1394 || !h->def_regular)
1395 {
1396 h->root.type = bfd_link_hash_defined;
1397 h->root.u.def.section = htab->ovtab;
1398 h->type = STT_OBJECT;
1399 h->ref_regular = 1;
1400 h->def_regular = 1;
1401 h->ref_regular_nonweak = 1;
1402 h->non_elf = 0;
1403 }
b0c41709 1404 else if (h->root.u.def.section->owner != NULL)
e9f53129
AM
1405 {
1406 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1407 h->root.u.def.section->owner,
1408 h->root.root.string);
1409 bfd_set_error (bfd_error_bad_value);
1410 return NULL;
1411 }
b0c41709
AM
1412 else
1413 {
1414 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1415 h->root.root.string);
1416 bfd_set_error (bfd_error_bad_value);
1417 return NULL;
1418 }
e9f53129
AM
1419
1420 return h;
1421}
1422
1423/* Fill in all stubs and the overlay tables. */
1424
1425bfd_boolean
47f6dab9 1426spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
e9f53129
AM
1427{
1428 struct spu_link_hash_table *htab = spu_hash_table (info);
1429 struct elf_link_hash_entry *h;
1430 bfd_byte *p;
1431 asection *s;
1432 bfd *obfd;
1433 unsigned int i;
1434
1435 htab->emit_stub_syms = emit_syms;
47f6dab9
AM
1436 if (htab->stub_count == NULL)
1437 return TRUE;
1438
1439 for (i = 0; i <= htab->num_overlays; i++)
1440 if (htab->stub_sec[i]->size != 0)
1441 {
1442 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1443 htab->stub_sec[i]->size);
1444 if (htab->stub_sec[i]->contents == NULL)
1445 return FALSE;
1446 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1447 htab->stub_sec[i]->size = 0;
1448 }
e9f53129
AM
1449
1450 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1451 htab->ovly_load = h;
1452 BFD_ASSERT (h != NULL
1453 && (h->root.type == bfd_link_hash_defined
1454 || h->root.type == bfd_link_hash_defweak)
1455 && h->def_regular);
1456
1457 s = h->root.u.def.section->output_section;
47f6dab9 1458 if (spu_elf_section_data (s)->u.o.ovl_index)
e9f53129
AM
1459 {
1460 (*_bfd_error_handler) (_("%s in overlay section"),
2ec9638b 1461 h->root.root.string);
e9f53129
AM
1462 bfd_set_error (bfd_error_bad_value);
1463 return FALSE;
1464 }
1465
47f6dab9
AM
1466 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1467 htab->ovly_return = h;
1468
c65be8d7
AM
1469 /* Fill in all the stubs. */
1470 process_stubs (info, TRUE);
f3c29e8a
AM
1471 if (!htab->stub_err)
1472 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
47f6dab9 1473
47f6dab9 1474 if (htab->stub_err)
f3c29e8a
AM
1475 {
1476 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1477 bfd_set_error (bfd_error_bad_value);
1478 return FALSE;
1479 }
e9f53129 1480
47f6dab9
AM
1481 for (i = 0; i <= htab->num_overlays; i++)
1482 {
1483 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1484 {
1485 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1486 bfd_set_error (bfd_error_bad_value);
1487 return FALSE;
1488 }
1489 htab->stub_sec[i]->rawsize = 0;
1490 }
1491
e9f53129
AM
1492 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1493 if (htab->ovtab->contents == NULL)
1494 return FALSE;
1495
1496 /* Write out _ovly_table. */
1497 p = htab->ovtab->contents;
2e444bea
AM
1498 /* set low bit of .size to mark non-overlay area as present. */
1499 p[7] = 1;
c65be8d7 1500 obfd = htab->ovtab->output_section->owner;
e9f53129
AM
1501 for (s = obfd->sections; s != NULL; s = s->next)
1502 {
47f6dab9 1503 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
e9f53129
AM
1504
1505 if (ovl_index != 0)
1506 {
47f6dab9
AM
1507 unsigned long off = ovl_index * 16;
1508 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1509
e9f53129
AM
1510 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1511 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1512 /* file_off written later in spu_elf_modify_program_headers. */
2e444bea 1513 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
e9f53129
AM
1514 }
1515 }
1516
e9f53129
AM
1517 h = define_ovtab_symbol (htab, "_ovly_table");
1518 if (h == NULL)
1519 return FALSE;
47f6dab9 1520 h->root.u.def.value = 16;
e9f53129
AM
1521 h->size = htab->num_overlays * 16;
1522
1523 h = define_ovtab_symbol (htab, "_ovly_table_end");
1524 if (h == NULL)
1525 return FALSE;
47f6dab9 1526 h->root.u.def.value = htab->num_overlays * 16 + 16;
e9f53129
AM
1527 h->size = 0;
1528
1529 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1530 if (h == NULL)
1531 return FALSE;
47f6dab9 1532 h->root.u.def.value = htab->num_overlays * 16 + 16;
2e444bea 1533 h->size = htab->num_buf * 4;
e9f53129
AM
1534
1535 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1536 if (h == NULL)
1537 return FALSE;
2e444bea 1538 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
e9f53129
AM
1539 h->size = 0;
1540
1541 h = define_ovtab_symbol (htab, "_EAR_");
1542 if (h == NULL)
1543 return FALSE;
47f6dab9 1544 h->root.u.def.section = htab->toe;
e9f53129
AM
1545 h->root.u.def.value = 0;
1546 h->size = 16;
1547
1548 return TRUE;
1549}
1550
c65be8d7 1551/* Check that all loadable section VMAs lie in the range
9dcc4794 1552 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
c65be8d7
AM
1553
1554asection *
9dcc4794
AM
1555spu_elf_check_vma (struct bfd_link_info *info,
1556 int auto_overlay,
1557 unsigned int lo,
1558 unsigned int hi,
1559 unsigned int overlay_fixed,
1560 unsigned int reserved,
99302af9 1561 int extra_stack_space,
9dcc4794
AM
1562 void (*spu_elf_load_ovl_mgr) (void),
1563 FILE *(*spu_elf_open_overlay_script) (void),
1564 void (*spu_elf_relink) (void))
c65be8d7
AM
1565{
1566 struct elf_segment_map *m;
1567 unsigned int i;
9dcc4794 1568 struct spu_link_hash_table *htab = spu_hash_table (info);
c65be8d7
AM
1569 bfd *abfd = info->output_bfd;
1570
9dcc4794
AM
1571 if (auto_overlay & AUTO_OVERLAY)
1572 htab->auto_overlay = auto_overlay;
1573 htab->local_store = hi + 1 - lo;
1574 htab->overlay_fixed = overlay_fixed;
1575 htab->reserved = reserved;
99302af9 1576 htab->extra_stack_space = extra_stack_space;
9dcc4794
AM
1577 htab->spu_elf_load_ovl_mgr = spu_elf_load_ovl_mgr;
1578 htab->spu_elf_open_overlay_script = spu_elf_open_overlay_script;
1579 htab->spu_elf_relink = spu_elf_relink;
1580
c65be8d7
AM
1581 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1582 if (m->p_type == PT_LOAD)
1583 for (i = 0; i < m->count; i++)
1584 if (m->sections[i]->size != 0
1585 && (m->sections[i]->vma < lo
1586 || m->sections[i]->vma > hi
1587 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1588 return m->sections[i];
1589
9dcc4794
AM
1590 /* No need for overlays if it all fits. */
1591 htab->auto_overlay = 0;
c65be8d7
AM
1592 return NULL;
1593}
1594
49fa1e15
AM
1595/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1596 Search for stack adjusting insns, and return the sp delta. */
1597
1598static int
1599find_function_stack_adjust (asection *sec, bfd_vma offset)
1600{
49fa1e15
AM
1601 int reg[128];
1602
1603 memset (reg, 0, sizeof (reg));
667f3338 1604 for ( ; offset + 4 <= sec->size; offset += 4)
49fa1e15
AM
1605 {
1606 unsigned char buf[4];
1607 int rt, ra;
1608 int imm;
1609
1610 /* Assume no relocs on stack adjusing insns. */
1611 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1612 break;
1613
1614 if (buf[0] == 0x24 /* stqd */)
1615 continue;
1616
1617 rt = buf[3] & 0x7f;
1618 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1619 /* Partly decoded immediate field. */
1620 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1621
1622 if (buf[0] == 0x1c /* ai */)
1623 {
1624 imm >>= 7;
1625 imm = (imm ^ 0x200) - 0x200;
1626 reg[rt] = reg[ra] + imm;
1627
1628 if (rt == 1 /* sp */)
1629 {
667f3338 1630 if (reg[rt] > 0)
49fa1e15
AM
1631 break;
1632 return reg[rt];
1633 }
1634 }
1635 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1636 {
1637 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1638
1639 reg[rt] = reg[ra] + reg[rb];
1640 if (rt == 1)
667f3338
AM
1641 {
1642 if (reg[rt] > 0)
1643 break;
1644 return reg[rt];
1645 }
49fa1e15
AM
1646 }
1647 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1648 {
1649 if (buf[0] >= 0x42 /* ila */)
1650 imm |= (buf[0] & 1) << 17;
1651 else
1652 {
1653 imm &= 0xffff;
1654
1655 if (buf[0] == 0x40 /* il */)
1656 {
1657 if ((buf[1] & 0x80) == 0)
667f3338 1658 continue;
49fa1e15
AM
1659 imm = (imm ^ 0x8000) - 0x8000;
1660 }
1661 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1662 imm <<= 16;
1663 }
1664 reg[rt] = imm;
1665 continue;
1666 }
1667 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1668 {
1669 reg[rt] |= imm & 0xffff;
1670 continue;
1671 }
1672 else if (buf[0] == 0x04 /* ori */)
1673 {
1674 imm >>= 7;
1675 imm = (imm ^ 0x200) - 0x200;
1676 reg[rt] = reg[ra] | imm;
1677 continue;
1678 }
667f3338
AM
1679 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
1680 {
1681 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
1682 | ((imm & 0x4000) ? 0x00ff0000 : 0)
1683 | ((imm & 0x2000) ? 0x0000ff00 : 0)
1684 | ((imm & 0x1000) ? 0x000000ff : 0));
1685 continue;
1686 }
1687 else if (buf[0] == 0x16 /* andbi */)
49fa1e15 1688 {
667f3338
AM
1689 imm >>= 7;
1690 imm &= 0xff;
1691 imm |= imm << 8;
1692 imm |= imm << 16;
1693 reg[rt] = reg[ra] & imm;
1694 continue;
1695 }
1696 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1697 {
1698 /* Used in pic reg load. Say rt is trashed. Won't be used
1699 in stack adjust, but we need to continue past this branch. */
49fa1e15
AM
1700 reg[rt] = 0;
1701 continue;
1702 }
fad9eaf0 1703 else if (is_branch (buf) || is_indirect_branch (buf))
49fa1e15
AM
1704 /* If we hit a branch then we must be out of the prologue. */
1705 break;
49fa1e15
AM
1706 }
1707
1708 return 0;
1709}
1710
1711/* qsort predicate to sort symbols by section and value. */
1712
1713static Elf_Internal_Sym *sort_syms_syms;
1714static asection **sort_syms_psecs;
1715
1716static int
1717sort_syms (const void *a, const void *b)
1718{
1719 Elf_Internal_Sym *const *s1 = a;
1720 Elf_Internal_Sym *const *s2 = b;
1721 asection *sec1,*sec2;
1722 bfd_signed_vma delta;
1723
1724 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1725 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1726
1727 if (sec1 != sec2)
1728 return sec1->index - sec2->index;
1729
1730 delta = (*s1)->st_value - (*s2)->st_value;
1731 if (delta != 0)
1732 return delta < 0 ? -1 : 1;
1733
1734 delta = (*s2)->st_size - (*s1)->st_size;
1735 if (delta != 0)
1736 return delta < 0 ? -1 : 1;
1737
1738 return *s1 < *s2 ? -1 : 1;
1739}
1740
1741struct call_info
1742{
1743 struct function_info *fun;
1744 struct call_info *next;
9dcc4794
AM
1745 unsigned int count;
1746 unsigned int max_depth;
c65be8d7 1747 unsigned int is_tail : 1;
9dcc4794 1748 unsigned int is_pasted : 1;
49fa1e15
AM
1749};
1750
1751struct function_info
1752{
1753 /* List of functions called. Also branches to hot/cold part of
1754 function. */
1755 struct call_info *call_list;
1756 /* For hot/cold part of function, point to owner. */
1757 struct function_info *start;
1758 /* Symbol at start of function. */
1759 union {
1760 Elf_Internal_Sym *sym;
1761 struct elf_link_hash_entry *h;
1762 } u;
1763 /* Function section. */
1764 asection *sec;
9dcc4794
AM
1765 asection *rodata;
1766 /* Where last called from, and number of sections called from. */
1767 asection *last_caller;
1768 unsigned int call_count;
49fa1e15
AM
1769 /* Address range of (this part of) function. */
1770 bfd_vma lo, hi;
1771 /* Stack usage. */
1772 int stack;
9dcc4794
AM
1773 /* Distance from root of call tree. Tail and hot/cold branches
1774 count as one deeper. We aren't counting stack frames here. */
1775 unsigned int depth;
49fa1e15
AM
1776 /* Set if global symbol. */
1777 unsigned int global : 1;
1778 /* Set if known to be start of function (as distinct from a hunk
1779 in hot/cold section. */
1780 unsigned int is_func : 1;
9dcc4794 1781 /* Set if not a root node. */
49fa1e15 1782 unsigned int non_root : 1;
9dcc4794
AM
1783 /* Flags used during call tree traversal. It's cheaper to replicate
1784 the visit flags than have one which needs clearing after a traversal. */
1785 unsigned int visit1 : 1;
49fa1e15
AM
1786 unsigned int visit2 : 1;
1787 unsigned int marking : 1;
1788 unsigned int visit3 : 1;
9dcc4794
AM
1789 unsigned int visit4 : 1;
1790 unsigned int visit5 : 1;
1791 unsigned int visit6 : 1;
1792 unsigned int visit7 : 1;
49fa1e15
AM
1793};
1794
1795struct spu_elf_stack_info
1796{
1797 int num_fun;
1798 int max_fun;
1799 /* Variable size array describing functions, one per contiguous
1800 address range belonging to a function. */
1801 struct function_info fun[1];
1802};
1803
1804/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1805 entries for section SEC. */
1806
1807static struct spu_elf_stack_info *
1808alloc_stack_info (asection *sec, int max_fun)
1809{
1810 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1811 bfd_size_type amt;
1812
1813 amt = sizeof (struct spu_elf_stack_info);
1814 amt += (max_fun - 1) * sizeof (struct function_info);
47f6dab9
AM
1815 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1816 if (sec_data->u.i.stack_info != NULL)
1817 sec_data->u.i.stack_info->max_fun = max_fun;
1818 return sec_data->u.i.stack_info;
49fa1e15
AM
1819}
1820
1821/* Add a new struct function_info describing a (part of a) function
1822 starting at SYM_H. Keep the array sorted by address. */
1823
1824static struct function_info *
1825maybe_insert_function (asection *sec,
1826 void *sym_h,
1827 bfd_boolean global,
1828 bfd_boolean is_func)
1829{
1830 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 1831 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
1832 int i;
1833 bfd_vma off, size;
1834
1835 if (sinfo == NULL)
1836 {
1837 sinfo = alloc_stack_info (sec, 20);
1838 if (sinfo == NULL)
1839 return NULL;
1840 }
1841
1842 if (!global)
1843 {
1844 Elf_Internal_Sym *sym = sym_h;
1845 off = sym->st_value;
1846 size = sym->st_size;
1847 }
1848 else
1849 {
1850 struct elf_link_hash_entry *h = sym_h;
1851 off = h->root.u.def.value;
1852 size = h->size;
1853 }
1854
1855 for (i = sinfo->num_fun; --i >= 0; )
1856 if (sinfo->fun[i].lo <= off)
1857 break;
1858
1859 if (i >= 0)
1860 {
1861 /* Don't add another entry for an alias, but do update some
1862 info. */
1863 if (sinfo->fun[i].lo == off)
1864 {
1865 /* Prefer globals over local syms. */
1866 if (global && !sinfo->fun[i].global)
1867 {
1868 sinfo->fun[i].global = TRUE;
1869 sinfo->fun[i].u.h = sym_h;
1870 }
1871 if (is_func)
1872 sinfo->fun[i].is_func = TRUE;
1873 return &sinfo->fun[i];
1874 }
1875 /* Ignore a zero-size symbol inside an existing function. */
1876 else if (sinfo->fun[i].hi > off && size == 0)
1877 return &sinfo->fun[i];
1878 }
1879
1f27ab8d 1880 if (sinfo->num_fun >= sinfo->max_fun)
49fa1e15
AM
1881 {
1882 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1883 bfd_size_type old = amt;
1884
1885 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1886 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1887 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1888 sinfo = bfd_realloc (sinfo, amt);
1889 if (sinfo == NULL)
1890 return NULL;
1891 memset ((char *) sinfo + old, 0, amt - old);
47f6dab9 1892 sec_data->u.i.stack_info = sinfo;
49fa1e15 1893 }
1f27ab8d
AM
1894
1895 if (++i < sinfo->num_fun)
1896 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1897 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
49fa1e15
AM
1898 sinfo->fun[i].is_func = is_func;
1899 sinfo->fun[i].global = global;
1900 sinfo->fun[i].sec = sec;
1901 if (global)
1902 sinfo->fun[i].u.h = sym_h;
1903 else
1904 sinfo->fun[i].u.sym = sym_h;
1905 sinfo->fun[i].lo = off;
1906 sinfo->fun[i].hi = off + size;
1907 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1908 sinfo->num_fun += 1;
1909 return &sinfo->fun[i];
1910}
1911
1912/* Return the name of FUN. */
1913
1914static const char *
1915func_name (struct function_info *fun)
1916{
1917 asection *sec;
1918 bfd *ibfd;
1919 Elf_Internal_Shdr *symtab_hdr;
1920
1921 while (fun->start != NULL)
1922 fun = fun->start;
1923
1924 if (fun->global)
1925 return fun->u.h->root.root.string;
1926
1927 sec = fun->sec;
1928 if (fun->u.sym->st_name == 0)
1929 {
1930 size_t len = strlen (sec->name);
1931 char *name = bfd_malloc (len + 10);
1932 if (name == NULL)
1933 return "(null)";
1934 sprintf (name, "%s+%lx", sec->name,
1935 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1936 return name;
1937 }
1938 ibfd = sec->owner;
1939 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1940 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1941}
1942
1943/* Read the instruction at OFF in SEC. Return true iff the instruction
1944 is a nop, lnop, or stop 0 (all zero insn). */
1945
1946static bfd_boolean
1947is_nop (asection *sec, bfd_vma off)
1948{
1949 unsigned char insn[4];
1950
1951 if (off + 4 > sec->size
1952 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1953 return FALSE;
1954 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1955 return TRUE;
1956 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1957 return TRUE;
1958 return FALSE;
1959}
1960
1961/* Extend the range of FUN to cover nop padding up to LIMIT.
1962 Return TRUE iff some instruction other than a NOP was found. */
1963
1964static bfd_boolean
1965insns_at_end (struct function_info *fun, bfd_vma limit)
1966{
1967 bfd_vma off = (fun->hi + 3) & -4;
1968
1969 while (off < limit && is_nop (fun->sec, off))
1970 off += 4;
1971 if (off < limit)
1972 {
1973 fun->hi = off;
1974 return TRUE;
1975 }
1976 fun->hi = limit;
1977 return FALSE;
1978}
1979
1980/* Check and fix overlapping function ranges. Return TRUE iff there
1981 are gaps in the current info we have about functions in SEC. */
1982
1983static bfd_boolean
1984check_function_ranges (asection *sec, struct bfd_link_info *info)
1985{
1986 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 1987 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
1988 int i;
1989 bfd_boolean gaps = FALSE;
1990
1991 if (sinfo == NULL)
1992 return FALSE;
1993
1994 for (i = 1; i < sinfo->num_fun; i++)
1995 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1996 {
1997 /* Fix overlapping symbols. */
1998 const char *f1 = func_name (&sinfo->fun[i - 1]);
1999 const char *f2 = func_name (&sinfo->fun[i]);
2000
2001 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2002 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2003 }
2004 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2005 gaps = TRUE;
2006
2007 if (sinfo->num_fun == 0)
2008 gaps = TRUE;
2009 else
2010 {
2011 if (sinfo->fun[0].lo != 0)
2012 gaps = TRUE;
2013 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2014 {
2015 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2016
2017 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2018 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2019 }
2020 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2021 gaps = TRUE;
2022 }
2023 return gaps;
2024}
2025
2026/* Search current function info for a function that contains address
2027 OFFSET in section SEC. */
2028
2029static struct function_info *
2030find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2031{
2032 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 2033 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
2034 int lo, hi, mid;
2035
2036 lo = 0;
2037 hi = sinfo->num_fun;
2038 while (lo < hi)
2039 {
2040 mid = (lo + hi) / 2;
2041 if (offset < sinfo->fun[mid].lo)
2042 hi = mid;
2043 else if (offset >= sinfo->fun[mid].hi)
2044 lo = mid + 1;
2045 else
2046 return &sinfo->fun[mid];
2047 }
2048 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2049 sec, offset);
2050 return NULL;
2051}
2052
9dcc4794
AM
2053/* Add CALLEE to CALLER call list if not already present. Return TRUE
2054 if CALLEE was new. If this function return FALSE, CALLEE should
2055 be freed. */
49fa1e15
AM
2056
2057static bfd_boolean
2058insert_callee (struct function_info *caller, struct call_info *callee)
2059{
055ed83b
AM
2060 struct call_info **pp, *p;
2061
2062 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
49fa1e15
AM
2063 if (p->fun == callee->fun)
2064 {
2065 /* Tail calls use less stack than normal calls. Retain entry
2066 for normal call over one for tail call. */
c65be8d7
AM
2067 p->is_tail &= callee->is_tail;
2068 if (!p->is_tail)
2069 {
2070 p->fun->start = NULL;
2071 p->fun->is_func = TRUE;
2072 }
9dcc4794 2073 p->count += 1;
055ed83b
AM
2074 /* Reorder list so most recent call is first. */
2075 *pp = p->next;
2076 p->next = caller->call_list;
2077 caller->call_list = p;
49fa1e15
AM
2078 return FALSE;
2079 }
2080 callee->next = caller->call_list;
9dcc4794 2081 callee->count += 1;
49fa1e15
AM
2082 caller->call_list = callee;
2083 return TRUE;
2084}
2085
9dcc4794
AM
2086/* Copy CALL and insert the copy into CALLER. */
2087
2088static bfd_boolean
2089copy_callee (struct function_info *caller, const struct call_info *call)
2090{
2091 struct call_info *callee;
2092 callee = bfd_malloc (sizeof (*callee));
2093 if (callee == NULL)
2094 return FALSE;
2095 *callee = *call;
2096 if (!insert_callee (caller, callee))
2097 free (callee);
2098 return TRUE;
2099}
2100
055ed83b
AM
2101/* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2102 overlay stub sections. */
2103
2104static bfd_boolean
2105interesting_section (asection *s, bfd *obfd)
2106{
2107 return (s->output_section != NULL
2108 && s->output_section->owner == obfd
2109 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2110 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2111 && s->size != 0);
2112}
2113
49fa1e15
AM
2114/* Rummage through the relocs for SEC, looking for function calls.
2115 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2116 mark destination symbols on calls as being functions. Also
2117 look at branches, which may be tail calls or go to hot/cold
2118 section part of same function. */
2119
2120static bfd_boolean
2121mark_functions_via_relocs (asection *sec,
2122 struct bfd_link_info *info,
2123 int call_tree)
2124{
2125 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2ec9638b 2126 Elf_Internal_Shdr *symtab_hdr;
d0249648 2127 void *psyms;
49fa1e15
AM
2128 static bfd_boolean warned;
2129
055ed83b
AM
2130 if (!interesting_section (sec, info->output_bfd)
2131 || sec->reloc_count == 0)
2132 return TRUE;
2133
49fa1e15
AM
2134 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2135 info->keep_memory);
2136 if (internal_relocs == NULL)
2137 return FALSE;
2138
2139 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
d0249648 2140 psyms = &symtab_hdr->contents;
49fa1e15
AM
2141 irela = internal_relocs;
2142 irelaend = irela + sec->reloc_count;
2143 for (; irela < irelaend; irela++)
2144 {
2145 enum elf_spu_reloc_type r_type;
2146 unsigned int r_indx;
2147 asection *sym_sec;
2148 Elf_Internal_Sym *sym;
2149 struct elf_link_hash_entry *h;
2150 bfd_vma val;
9dcc4794 2151 bfd_boolean reject, is_call;
49fa1e15
AM
2152 struct function_info *caller;
2153 struct call_info *callee;
2154
9dcc4794 2155 reject = FALSE;
49fa1e15
AM
2156 r_type = ELF32_R_TYPE (irela->r_info);
2157 if (r_type != R_SPU_REL16
2158 && r_type != R_SPU_ADDR16)
9dcc4794
AM
2159 {
2160 reject = TRUE;
2161 if (!(call_tree && spu_hash_table (info)->auto_overlay))
2162 continue;
2163 }
49fa1e15
AM
2164
2165 r_indx = ELF32_R_SYM (irela->r_info);
2166 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2167 return FALSE;
2168
2169 if (sym_sec == NULL
2170 || sym_sec->output_section == NULL
055ed83b 2171 || sym_sec->output_section->owner != info->output_bfd)
49fa1e15
AM
2172 continue;
2173
9dcc4794
AM
2174 is_call = FALSE;
2175 if (!reject)
2176 {
2177 unsigned char insn[4];
2178
2179 if (!bfd_get_section_contents (sec->owner, sec, insn,
2180 irela->r_offset, 4))
2181 return FALSE;
2182 if (is_branch (insn))
2183 {
2184 is_call = (insn[0] & 0xfd) == 0x31;
2185 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2186 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2187 {
2188 if (!warned)
2189 info->callbacks->einfo
2190 (_("%B(%A+0x%v): call to non-code section"
2191 " %B(%A), analysis incomplete\n"),
2192 sec->owner, sec, irela->r_offset,
2193 sym_sec->owner, sym_sec);
2194 warned = TRUE;
2195 continue;
2196 }
2197 }
2198 else
2199 {
2200 reject = TRUE;
2201 if (!(call_tree && spu_hash_table (info)->auto_overlay)
2202 || is_hint (insn))
2203 continue;
2204 }
2205 }
49fa1e15 2206
9dcc4794 2207 if (reject)
49fa1e15 2208 {
9dcc4794
AM
2209 /* For --auto-overlay, count possible stubs we need for
2210 function pointer references. */
2211 unsigned int sym_type;
2212 if (h)
2213 sym_type = h->type;
2214 else
2215 sym_type = ELF_ST_TYPE (sym->st_info);
2216 if (sym_type == STT_FUNC)
2217 spu_hash_table (info)->non_ovly_stub += 1;
49fa1e15
AM
2218 continue;
2219 }
2220
49fa1e15
AM
2221 if (h)
2222 val = h->root.u.def.value;
2223 else
2224 val = sym->st_value;
2225 val += irela->r_addend;
2226
2227 if (!call_tree)
2228 {
2229 struct function_info *fun;
2230
2231 if (irela->r_addend != 0)
2232 {
2233 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2234 if (fake == NULL)
2235 return FALSE;
2236 fake->st_value = val;
2237 fake->st_shndx
2238 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2239 sym = fake;
2240 }
2241 if (sym)
2242 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2243 else
2244 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2245 if (fun == NULL)
2246 return FALSE;
2247 if (irela->r_addend != 0
2248 && fun->u.sym != sym)
2249 free (sym);
2250 continue;
2251 }
2252
2253 caller = find_function (sec, irela->r_offset, info);
2254 if (caller == NULL)
2255 return FALSE;
2256 callee = bfd_malloc (sizeof *callee);
2257 if (callee == NULL)
2258 return FALSE;
2259
2260 callee->fun = find_function (sym_sec, val, info);
2261 if (callee->fun == NULL)
2262 return FALSE;
2263 callee->is_tail = !is_call;
9dcc4794
AM
2264 callee->is_pasted = FALSE;
2265 callee->count = 0;
2266 if (callee->fun->last_caller != sec)
2267 {
2268 callee->fun->last_caller = sec;
2269 callee->fun->call_count += 1;
2270 }
49fa1e15
AM
2271 if (!insert_callee (caller, callee))
2272 free (callee);
2273 else if (!is_call
2274 && !callee->fun->is_func
2275 && callee->fun->stack == 0)
2276 {
2277 /* This is either a tail call or a branch from one part of
2278 the function to another, ie. hot/cold section. If the
2279 destination has been called by some other function then
2280 it is a separate function. We also assume that functions
2281 are not split across input files. */
911f096e 2282 if (sec->owner != sym_sec->owner)
49fa1e15
AM
2283 {
2284 callee->fun->start = NULL;
2285 callee->fun->is_func = TRUE;
2286 }
911f096e 2287 else if (callee->fun->start == NULL)
49fa1e15 2288 callee->fun->start = caller;
911f096e
AM
2289 else
2290 {
2291 struct function_info *callee_start;
2292 struct function_info *caller_start;
2293 callee_start = callee->fun;
2294 while (callee_start->start)
2295 callee_start = callee_start->start;
2296 caller_start = caller;
2297 while (caller_start->start)
2298 caller_start = caller_start->start;
2299 if (caller_start != callee_start)
2300 {
2301 callee->fun->start = NULL;
2302 callee->fun->is_func = TRUE;
2303 }
2304 }
49fa1e15
AM
2305 }
2306 }
2307
2308 return TRUE;
2309}
2310
2311/* Handle something like .init or .fini, which has a piece of a function.
2312 These sections are pasted together to form a single function. */
2313
2314static bfd_boolean
2315pasted_function (asection *sec, struct bfd_link_info *info)
2316{
2317 struct bfd_link_order *l;
2318 struct _spu_elf_section_data *sec_data;
2319 struct spu_elf_stack_info *sinfo;
2320 Elf_Internal_Sym *fake;
2321 struct function_info *fun, *fun_start;
2322
2323 fake = bfd_zmalloc (sizeof (*fake));
2324 if (fake == NULL)
2325 return FALSE;
2326 fake->st_value = 0;
2327 fake->st_size = sec->size;
2328 fake->st_shndx
2329 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2330 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2331 if (!fun)
2332 return FALSE;
2333
2334 /* Find a function immediately preceding this section. */
2335 fun_start = NULL;
2336 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2337 {
2338 if (l->u.indirect.section == sec)
2339 {
2340 if (fun_start != NULL)
9dcc4794
AM
2341 {
2342 struct call_info *callee = bfd_malloc (sizeof *callee);
2343 if (callee == NULL)
2344 return FALSE;
2345
2346 fun->start = fun_start;
2347 callee->fun = fun;
2348 callee->is_tail = TRUE;
2349 callee->is_pasted = TRUE;
2350 callee->count = 0;
2351 if (!insert_callee (fun_start, callee))
2352 free (callee);
2353 return TRUE;
2354 }
2355 break;
49fa1e15
AM
2356 }
2357 if (l->type == bfd_indirect_link_order
2358 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
47f6dab9 2359 && (sinfo = sec_data->u.i.stack_info) != NULL
49fa1e15
AM
2360 && sinfo->num_fun != 0)
2361 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2362 }
2363
2364 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2365 return FALSE;
2366}
2367
49fa1e15
AM
2368/* Map address ranges in code sections to functions. */
2369
2370static bfd_boolean
c65be8d7 2371discover_functions (struct bfd_link_info *info)
49fa1e15 2372{
49fa1e15
AM
2373 bfd *ibfd;
2374 int bfd_idx;
2375 Elf_Internal_Sym ***psym_arr;
2376 asection ***sec_arr;
2377 bfd_boolean gaps = FALSE;
2378
2379 bfd_idx = 0;
2380 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2381 bfd_idx++;
2382
2383 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2384 if (psym_arr == NULL)
2385 return FALSE;
2386 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2387 if (sec_arr == NULL)
2388 return FALSE;
2389
2390
2391 for (ibfd = info->input_bfds, bfd_idx = 0;
2392 ibfd != NULL;
2393 ibfd = ibfd->link_next, bfd_idx++)
2394 {
2395 extern const bfd_target bfd_elf32_spu_vec;
2396 Elf_Internal_Shdr *symtab_hdr;
2397 asection *sec;
2398 size_t symcount;
2399 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2400 asection **psecs, **p;
2401
2402 if (ibfd->xvec != &bfd_elf32_spu_vec)
2403 continue;
2404
2405 /* Read all the symbols. */
2406 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2407 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2408 if (symcount == 0)
055ed83b
AM
2409 {
2410 if (!gaps)
2411 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2412 if (interesting_section (sec, info->output_bfd))
2413 {
2414 gaps = TRUE;
2415 break;
2416 }
2417 continue;
2418 }
49fa1e15 2419
1f27ab8d 2420 if (symtab_hdr->contents != NULL)
49fa1e15 2421 {
1f27ab8d
AM
2422 /* Don't use cached symbols since the generic ELF linker
2423 code only reads local symbols, and we need globals too. */
2424 free (symtab_hdr->contents);
2425 symtab_hdr->contents = NULL;
49fa1e15 2426 }
1f27ab8d
AM
2427 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2428 NULL, NULL, NULL);
2429 symtab_hdr->contents = (void *) syms;
2430 if (syms == NULL)
2431 return FALSE;
49fa1e15
AM
2432
2433 /* Select defined function symbols that are going to be output. */
2434 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2435 if (psyms == NULL)
2436 return FALSE;
2437 psym_arr[bfd_idx] = psyms;
2438 psecs = bfd_malloc (symcount * sizeof (*psecs));
2439 if (psecs == NULL)
2440 return FALSE;
2441 sec_arr[bfd_idx] = psecs;
2442 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2443 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
b0c41709
AM
2444 || ELF_ST_TYPE (sy->st_info) == STT_FUNC
2445 || ELF_ST_TYPE (sy->st_info) == STT_SECTION)
49fa1e15
AM
2446 {
2447 asection *s;
2448
2449 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
c65be8d7 2450 if (s != NULL && interesting_section (s, info->output_bfd))
49fa1e15
AM
2451 *psy++ = sy;
2452 }
2453 symcount = psy - psyms;
2454 *psy = NULL;
2455
2456 /* Sort them by section and offset within section. */
2457 sort_syms_syms = syms;
2458 sort_syms_psecs = psecs;
2459 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2460
2461 /* Now inspect the function symbols. */
2462 for (psy = psyms; psy < psyms + symcount; )
2463 {
2464 asection *s = psecs[*psy - syms];
2465 Elf_Internal_Sym **psy2;
2466
2467 for (psy2 = psy; ++psy2 < psyms + symcount; )
2468 if (psecs[*psy2 - syms] != s)
2469 break;
2470
2471 if (!alloc_stack_info (s, psy2 - psy))
2472 return FALSE;
2473 psy = psy2;
2474 }
2475
2476 /* First install info about properly typed and sized functions.
2477 In an ideal world this will cover all code sections, except
2478 when partitioning functions into hot and cold sections,
2479 and the horrible pasted together .init and .fini functions. */
2480 for (psy = psyms; psy < psyms + symcount; ++psy)
2481 {
2482 sy = *psy;
2483 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2484 {
2485 asection *s = psecs[sy - syms];
2486 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2487 return FALSE;
2488 }
2489 }
2490
2491 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
c65be8d7 2492 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2493 gaps |= check_function_ranges (sec, info);
2494 }
2495
2496 if (gaps)
2497 {
2498 /* See if we can discover more function symbols by looking at
2499 relocations. */
2500 for (ibfd = info->input_bfds, bfd_idx = 0;
2501 ibfd != NULL;
2502 ibfd = ibfd->link_next, bfd_idx++)
2503 {
2504 asection *sec;
2505
2506 if (psym_arr[bfd_idx] == NULL)
2507 continue;
2508
2509 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
055ed83b
AM
2510 if (!mark_functions_via_relocs (sec, info, FALSE))
2511 return FALSE;
49fa1e15
AM
2512 }
2513
2514 for (ibfd = info->input_bfds, bfd_idx = 0;
2515 ibfd != NULL;
2516 ibfd = ibfd->link_next, bfd_idx++)
2517 {
2518 Elf_Internal_Shdr *symtab_hdr;
2519 asection *sec;
2520 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2521 asection **psecs;
2522
2523 if ((psyms = psym_arr[bfd_idx]) == NULL)
2524 continue;
2525
2526 psecs = sec_arr[bfd_idx];
2527
2528 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2529 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2530
2531 gaps = FALSE;
2532 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
c65be8d7 2533 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2534 gaps |= check_function_ranges (sec, info);
2535 if (!gaps)
2536 continue;
2537
2538 /* Finally, install all globals. */
2539 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2540 {
2541 asection *s;
2542
2543 s = psecs[sy - syms];
2544
2545 /* Global syms might be improperly typed functions. */
2546 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2547 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2548 {
2549 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2550 return FALSE;
2551 }
2552 }
055ed83b
AM
2553 }
2554
2555 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2556 {
2557 extern const bfd_target bfd_elf32_spu_vec;
2558 asection *sec;
2559
2560 if (ibfd->xvec != &bfd_elf32_spu_vec)
2561 continue;
49fa1e15
AM
2562
2563 /* Some of the symbols we've installed as marking the
2564 beginning of functions may have a size of zero. Extend
2565 the range of such functions to the beginning of the
2566 next symbol of interest. */
2567 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
c65be8d7 2568 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2569 {
2570 struct _spu_elf_section_data *sec_data;
2571 struct spu_elf_stack_info *sinfo;
2572
2573 sec_data = spu_elf_section_data (sec);
47f6dab9 2574 sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
2575 if (sinfo != NULL)
2576 {
2577 int fun_idx;
2578 bfd_vma hi = sec->size;
2579
2580 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2581 {
2582 sinfo->fun[fun_idx].hi = hi;
2583 hi = sinfo->fun[fun_idx].lo;
2584 }
2585 }
2586 /* No symbols in this section. Must be .init or .fini
2587 or something similar. */
2588 else if (!pasted_function (sec, info))
2589 return FALSE;
2590 }
2591 }
2592 }
2593
2594 for (ibfd = info->input_bfds, bfd_idx = 0;
2595 ibfd != NULL;
2596 ibfd = ibfd->link_next, bfd_idx++)
2597 {
2598 if (psym_arr[bfd_idx] == NULL)
2599 continue;
2600
2601 free (psym_arr[bfd_idx]);
2602 free (sec_arr[bfd_idx]);
2603 }
2604
2605 free (psym_arr);
2606 free (sec_arr);
2607
2608 return TRUE;
2609}
2610
055ed83b
AM
2611/* Iterate over all function_info we have collected, calling DOIT on
2612 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2613 if ROOT_ONLY. */
2614
2615static bfd_boolean
2616for_each_node (bfd_boolean (*doit) (struct function_info *,
2617 struct bfd_link_info *,
2618 void *),
2619 struct bfd_link_info *info,
2620 void *param,
2621 int root_only)
2622{
2623 bfd *ibfd;
2624
2625 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2626 {
2627 extern const bfd_target bfd_elf32_spu_vec;
2628 asection *sec;
2629
2630 if (ibfd->xvec != &bfd_elf32_spu_vec)
2631 continue;
2632
2633 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2634 {
2635 struct _spu_elf_section_data *sec_data;
2636 struct spu_elf_stack_info *sinfo;
2637
2638 if ((sec_data = spu_elf_section_data (sec)) != NULL
2639 && (sinfo = sec_data->u.i.stack_info) != NULL)
2640 {
2641 int i;
2642 for (i = 0; i < sinfo->num_fun; ++i)
2643 if (!root_only || !sinfo->fun[i].non_root)
2644 if (!doit (&sinfo->fun[i], info, param))
2645 return FALSE;
2646 }
2647 }
2648 }
2649 return TRUE;
2650}
2651
2652/* Transfer call info attached to struct function_info entries for
2653 all of a given function's sections to the first entry. */
2654
2655static bfd_boolean
2656transfer_calls (struct function_info *fun,
2657 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2658 void *param ATTRIBUTE_UNUSED)
2659{
2660 struct function_info *start = fun->start;
2661
2662 if (start != NULL)
2663 {
2664 struct call_info *call, *call_next;
2665
2666 while (start->start != NULL)
2667 start = start->start;
2668 for (call = fun->call_list; call != NULL; call = call_next)
2669 {
2670 call_next = call->next;
2671 if (!insert_callee (start, call))
2672 free (call);
2673 }
2674 fun->call_list = NULL;
2675 }
2676 return TRUE;
2677}
2678
49fa1e15
AM
2679/* Mark nodes in the call graph that are called by some other node. */
2680
055ed83b
AM
2681static bfd_boolean
2682mark_non_root (struct function_info *fun,
2683 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2684 void *param ATTRIBUTE_UNUSED)
49fa1e15
AM
2685{
2686 struct call_info *call;
2687
055ed83b
AM
2688 if (fun->visit1)
2689 return TRUE;
49fa1e15
AM
2690 fun->visit1 = TRUE;
2691 for (call = fun->call_list; call; call = call->next)
2692 {
2693 call->fun->non_root = TRUE;
055ed83b 2694 mark_non_root (call->fun, 0, 0);
49fa1e15 2695 }
055ed83b 2696 return TRUE;
49fa1e15
AM
2697}
2698
9dcc4794 2699/* Remove cycles from the call graph. Set depth of nodes. */
49fa1e15 2700
055ed83b
AM
2701static bfd_boolean
2702remove_cycles (struct function_info *fun,
2703 struct bfd_link_info *info,
9dcc4794 2704 void *param)
49fa1e15
AM
2705{
2706 struct call_info **callp, *call;
9dcc4794
AM
2707 unsigned int depth = *(unsigned int *) param;
2708 unsigned int max_depth = depth;
49fa1e15 2709
9dcc4794 2710 fun->depth = depth;
49fa1e15
AM
2711 fun->visit2 = TRUE;
2712 fun->marking = TRUE;
2713
2714 callp = &fun->call_list;
2715 while ((call = *callp) != NULL)
2716 {
2717 if (!call->fun->visit2)
055ed83b 2718 {
9dcc4794
AM
2719 call->max_depth = depth + !call->is_pasted;
2720 if (!remove_cycles (call->fun, info, &call->max_depth))
055ed83b 2721 return FALSE;
9dcc4794
AM
2722 if (max_depth < call->max_depth)
2723 max_depth = call->max_depth;
055ed83b 2724 }
49fa1e15
AM
2725 else if (call->fun->marking)
2726 {
9dcc4794
AM
2727 if (!spu_hash_table (info)->auto_overlay)
2728 {
2729 const char *f1 = func_name (fun);
2730 const char *f2 = func_name (call->fun);
49fa1e15 2731
9dcc4794
AM
2732 info->callbacks->info (_("Stack analysis will ignore the call "
2733 "from %s to %s\n"),
2734 f1, f2);
2735 }
49fa1e15 2736 *callp = call->next;
055ed83b 2737 free (call);
49fa1e15
AM
2738 continue;
2739 }
2740 callp = &call->next;
2741 }
2742 fun->marking = FALSE;
9dcc4794 2743 *(unsigned int *) param = max_depth;
055ed83b 2744 return TRUE;
49fa1e15
AM
2745}
2746
667f3338
AM
2747/* Check that we actually visited all nodes in remove_cycles. If we
2748 didn't, then there is some cycle in the call graph not attached to
2749 any root node. Arbitrarily choose a node in the cycle as a new
2750 root and break the cycle. */
2751
2752static bfd_boolean
2753mark_detached_root (struct function_info *fun,
2754 struct bfd_link_info *info,
2755 void *param)
2756{
2757 if (fun->visit2)
2758 return TRUE;
2759 fun->non_root = FALSE;
2760 *(unsigned int *) param = 0;
2761 return remove_cycles (fun, info, param);
2762}
2763
49fa1e15
AM
2764/* Populate call_list for each function. */
2765
2766static bfd_boolean
c65be8d7 2767build_call_tree (struct bfd_link_info *info)
49fa1e15 2768{
49fa1e15 2769 bfd *ibfd;
9dcc4794 2770 unsigned int depth;
49fa1e15
AM
2771
2772 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2773 {
2774 extern const bfd_target bfd_elf32_spu_vec;
2775 asection *sec;
2776
2777 if (ibfd->xvec != &bfd_elf32_spu_vec)
2778 continue;
2779
2780 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
055ed83b
AM
2781 if (!mark_functions_via_relocs (sec, info, TRUE))
2782 return FALSE;
49fa1e15
AM
2783 }
2784
055ed83b
AM
2785 /* Transfer call info from hot/cold section part of function
2786 to main entry. */
9dcc4794
AM
2787 if (!spu_hash_table (info)->auto_overlay
2788 && !for_each_node (transfer_calls, info, 0, FALSE))
055ed83b 2789 return FALSE;
49fa1e15 2790
055ed83b
AM
2791 /* Find the call graph root(s). */
2792 if (!for_each_node (mark_non_root, info, 0, FALSE))
2793 return FALSE;
49fa1e15
AM
2794
2795 /* Remove cycles from the call graph. We start from the root node(s)
2796 so that we break cycles in a reasonable place. */
9dcc4794 2797 depth = 0;
667f3338
AM
2798 if (!for_each_node (remove_cycles, info, &depth, TRUE))
2799 return FALSE;
2800
2801 return for_each_node (mark_detached_root, info, &depth, FALSE);
9dcc4794
AM
2802}
2803
2804/* qsort predicate to sort calls by max_depth then count. */
2805
2806static int
2807sort_calls (const void *a, const void *b)
2808{
2809 struct call_info *const *c1 = a;
2810 struct call_info *const *c2 = b;
2811 int delta;
2812
2813 delta = (*c2)->max_depth - (*c1)->max_depth;
2814 if (delta != 0)
2815 return delta;
2816
2817 delta = (*c2)->count - (*c1)->count;
2818 if (delta != 0)
2819 return delta;
2820
667f3338 2821 return (char *) c1 - (char *) c2;
9dcc4794
AM
2822}
2823
2824struct _mos_param {
2825 unsigned int max_overlay_size;
2826};
2827
2828/* Set linker_mark and gc_mark on any sections that we will put in
2829 overlays. These flags are used by the generic ELF linker, but we
2830 won't be continuing on to bfd_elf_final_link so it is OK to use
2831 them. linker_mark is clear before we get here. Set segment_mark
2832 on sections that are part of a pasted function (excluding the last
2833 section).
2834
2835 Set up function rodata section if --overlay-rodata. We don't
2836 currently include merged string constant rodata sections since
2837
2838 Sort the call graph so that the deepest nodes will be visited
2839 first. */
2840
2841static bfd_boolean
2842mark_overlay_section (struct function_info *fun,
2843 struct bfd_link_info *info,
2844 void *param)
2845{
2846 struct call_info *call;
2847 unsigned int count;
2848 struct _mos_param *mos_param = param;
2849
2850 if (fun->visit4)
2851 return TRUE;
2852
2853 fun->visit4 = TRUE;
2854 if (!fun->sec->linker_mark)
2855 {
4f0d75be
AM
2856 unsigned int size;
2857
9dcc4794
AM
2858 fun->sec->linker_mark = 1;
2859 fun->sec->gc_mark = 1;
2860 fun->sec->segment_mark = 0;
2861 /* Ensure SEC_CODE is set on this text section (it ought to
2862 be!), and SEC_CODE is clear on rodata sections. We use
2863 this flag to differentiate the two overlay section types. */
2864 fun->sec->flags |= SEC_CODE;
4f0d75be 2865
9dcc4794
AM
2866 if (spu_hash_table (info)->auto_overlay & OVERLAY_RODATA)
2867 {
2868 char *name = NULL;
9dcc4794
AM
2869
2870 /* Find the rodata section corresponding to this function's
2871 text section. */
2872 if (strcmp (fun->sec->name, ".text") == 0)
2873 {
2874 name = bfd_malloc (sizeof (".rodata"));
2875 if (name == NULL)
2876 return FALSE;
2877 memcpy (name, ".rodata", sizeof (".rodata"));
2878 }
2879 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2880 {
2881 size_t len = strlen (fun->sec->name);
2882 name = bfd_malloc (len + 3);
2883 if (name == NULL)
2884 return FALSE;
2885 memcpy (name, ".rodata", sizeof (".rodata"));
2886 memcpy (name + 7, fun->sec->name + 5, len - 4);
2887 }
2888 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2889 {
2890 size_t len = strlen (fun->sec->name) + 1;
2891 name = bfd_malloc (len);
2892 if (name == NULL)
2893 return FALSE;
2894 memcpy (name, fun->sec->name, len);
2895 name[14] = 'r';
2896 }
2897
2898 if (name != NULL)
2899 {
2900 asection *rodata = NULL;
2901 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2902 if (group_sec == NULL)
2903 rodata = bfd_get_section_by_name (fun->sec->owner, name);
2904 else
2905 while (group_sec != NULL && group_sec != fun->sec)
2906 {
2907 if (strcmp (group_sec->name, name) == 0)
2908 {
2909 rodata = group_sec;
2910 break;
2911 }
2912 group_sec = elf_section_data (group_sec)->next_in_group;
2913 }
2914 fun->rodata = rodata;
2915 if (fun->rodata)
2916 {
2917 fun->rodata->linker_mark = 1;
2918 fun->rodata->gc_mark = 1;
2919 fun->rodata->flags &= ~SEC_CODE;
2920 }
2921 free (name);
2922 }
9dcc4794 2923 }
4f0d75be
AM
2924 size = fun->sec->size;
2925 if (fun->rodata)
2926 size += fun->rodata->size;
2927 if (mos_param->max_overlay_size < size)
2928 mos_param->max_overlay_size = size;
9dcc4794
AM
2929 }
2930
2931 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2932 count += 1;
2933
2934 if (count > 1)
2935 {
2936 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2937 if (calls == NULL)
2938 return FALSE;
2939
2940 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2941 calls[count++] = call;
2942
2943 qsort (calls, count, sizeof (*calls), sort_calls);
2944
2945 fun->call_list = NULL;
2946 while (count != 0)
2947 {
2948 --count;
2949 calls[count]->next = fun->call_list;
2950 fun->call_list = calls[count];
2951 }
2952 free (calls);
2953 }
2954
2955 for (call = fun->call_list; call != NULL; call = call->next)
2956 {
2957 if (call->is_pasted)
2958 {
2959 /* There can only be one is_pasted call per function_info. */
2960 BFD_ASSERT (!fun->sec->segment_mark);
2961 fun->sec->segment_mark = 1;
2962 }
2963 if (!mark_overlay_section (call->fun, info, param))
2964 return FALSE;
2965 }
2966
2967 /* Don't put entry code into an overlay. The overlay manager needs
2968 a stack! */
2969 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2970 == info->output_bfd->start_address)
2971 {
2972 fun->sec->linker_mark = 0;
2973 if (fun->rodata != NULL)
2974 fun->rodata->linker_mark = 0;
2975 }
2976 return TRUE;
2977}
2978
99302af9
AM
2979/* If non-zero then unmark functions called from those within sections
2980 that we need to unmark. Unfortunately this isn't reliable since the
2981 call graph cannot know the destination of function pointer calls. */
2982#define RECURSE_UNMARK 0
2983
9dcc4794
AM
2984struct _uos_param {
2985 asection *exclude_input_section;
2986 asection *exclude_output_section;
2987 unsigned long clearing;
2988};
2989
2990/* Undo some of mark_overlay_section's work. */
2991
2992static bfd_boolean
2993unmark_overlay_section (struct function_info *fun,
2994 struct bfd_link_info *info,
2995 void *param)
2996{
2997 struct call_info *call;
2998 struct _uos_param *uos_param = param;
2999 unsigned int excluded = 0;
3000
3001 if (fun->visit5)
3002 return TRUE;
3003
3004 fun->visit5 = TRUE;
3005
3006 excluded = 0;
3007 if (fun->sec == uos_param->exclude_input_section
3008 || fun->sec->output_section == uos_param->exclude_output_section)
3009 excluded = 1;
3010
99302af9
AM
3011 if (RECURSE_UNMARK)
3012 uos_param->clearing += excluded;
9dcc4794 3013
99302af9 3014 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
9dcc4794
AM
3015 {
3016 fun->sec->linker_mark = 0;
3017 if (fun->rodata)
3018 fun->rodata->linker_mark = 0;
3019 }
3020
3021 for (call = fun->call_list; call != NULL; call = call->next)
3022 if (!unmark_overlay_section (call->fun, info, param))
3023 return FALSE;
3024
99302af9
AM
3025 if (RECURSE_UNMARK)
3026 uos_param->clearing -= excluded;
9dcc4794
AM
3027 return TRUE;
3028}
3029
3030struct _cl_param {
3031 unsigned int lib_size;
3032 asection **lib_sections;
3033};
3034
3035/* Add sections we have marked as belonging to overlays to an array
3036 for consideration as non-overlay sections. The array consist of
3037 pairs of sections, (text,rodata), for functions in the call graph. */
3038
3039static bfd_boolean
3040collect_lib_sections (struct function_info *fun,
3041 struct bfd_link_info *info,
3042 void *param)
3043{
3044 struct _cl_param *lib_param = param;
3045 struct call_info *call;
3046 unsigned int size;
3047
3048 if (fun->visit6)
3049 return TRUE;
3050
3051 fun->visit6 = TRUE;
3052 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3053 return TRUE;
3054
3055 size = fun->sec->size;
3056 if (fun->rodata)
3057 size += fun->rodata->size;
b0c41709 3058 if (size <= lib_param->lib_size)
9dcc4794 3059 {
b0c41709
AM
3060 *lib_param->lib_sections++ = fun->sec;
3061 fun->sec->gc_mark = 0;
3062 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3063 {
3064 *lib_param->lib_sections++ = fun->rodata;
3065 fun->rodata->gc_mark = 0;
3066 }
3067 else
3068 *lib_param->lib_sections++ = NULL;
9dcc4794 3069 }
9dcc4794
AM
3070
3071 for (call = fun->call_list; call != NULL; call = call->next)
3072 collect_lib_sections (call->fun, info, param);
3073
3074 return TRUE;
3075}
3076
3077/* qsort predicate to sort sections by call count. */
3078
3079static int
3080sort_lib (const void *a, const void *b)
3081{
3082 asection *const *s1 = a;
3083 asection *const *s2 = b;
3084 struct _spu_elf_section_data *sec_data;
3085 struct spu_elf_stack_info *sinfo;
3086 int delta;
3087
3088 delta = 0;
3089 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3090 && (sinfo = sec_data->u.i.stack_info) != NULL)
3091 {
3092 int i;
3093 for (i = 0; i < sinfo->num_fun; ++i)
3094 delta -= sinfo->fun[i].call_count;
3095 }
3096
3097 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3098 && (sinfo = sec_data->u.i.stack_info) != NULL)
3099 {
3100 int i;
3101 for (i = 0; i < sinfo->num_fun; ++i)
3102 delta += sinfo->fun[i].call_count;
3103 }
3104
3105 if (delta != 0)
3106 return delta;
3107
3108 return s1 - s2;
3109}
3110
3111/* Remove some sections from those marked to be in overlays. Choose
3112 those that are called from many places, likely library functions. */
3113
3114static unsigned int
3115auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3116{
3117 bfd *ibfd;
3118 asection **lib_sections;
3119 unsigned int i, lib_count;
3120 struct _cl_param collect_lib_param;
3121 struct function_info dummy_caller;
3122
3123 memset (&dummy_caller, 0, sizeof (dummy_caller));
3124 lib_count = 0;
3125 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3126 {
3127 extern const bfd_target bfd_elf32_spu_vec;
3128 asection *sec;
3129
3130 if (ibfd->xvec != &bfd_elf32_spu_vec)
3131 continue;
3132
3133 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3134 if (sec->linker_mark
3135 && sec->size < lib_size
3136 && (sec->flags & SEC_CODE) != 0)
3137 lib_count += 1;
3138 }
3139 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3140 if (lib_sections == NULL)
3141 return (unsigned int) -1;
3142 collect_lib_param.lib_size = lib_size;
3143 collect_lib_param.lib_sections = lib_sections;
3144 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3145 TRUE))
3146 return (unsigned int) -1;
3147 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3148
3149 /* Sort sections so that those with the most calls are first. */
3150 if (lib_count > 1)
3151 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3152
3153 for (i = 0; i < lib_count; i++)
3154 {
3155 unsigned int tmp, stub_size;
3156 asection *sec;
3157 struct _spu_elf_section_data *sec_data;
3158 struct spu_elf_stack_info *sinfo;
3159
3160 sec = lib_sections[2 * i];
3161 /* If this section is OK, its size must be less than lib_size. */
3162 tmp = sec->size;
3163 /* If it has a rodata section, then add that too. */
3164 if (lib_sections[2 * i + 1])
3165 tmp += lib_sections[2 * i + 1]->size;
3166 /* Add any new overlay call stubs needed by the section. */
3167 stub_size = 0;
3168 if (tmp < lib_size
3169 && (sec_data = spu_elf_section_data (sec)) != NULL
3170 && (sinfo = sec_data->u.i.stack_info) != NULL)
3171 {
3172 int k;
3173 struct call_info *call;
3174
3175 for (k = 0; k < sinfo->num_fun; ++k)
3176 for (call = sinfo->fun[k].call_list; call; call = call->next)
3177 if (call->fun->sec->linker_mark)
3178 {
3179 struct call_info *p;
3180 for (p = dummy_caller.call_list; p; p = p->next)
3181 if (p->fun == call->fun)
3182 break;
3183 if (!p)
3184 stub_size += OVL_STUB_SIZE;
3185 }
3186 }
3187 if (tmp + stub_size < lib_size)
3188 {
3189 struct call_info **pp, *p;
3190
3191 /* This section fits. Mark it as non-overlay. */
3192 lib_sections[2 * i]->linker_mark = 0;
3193 if (lib_sections[2 * i + 1])
3194 lib_sections[2 * i + 1]->linker_mark = 0;
3195 lib_size -= tmp + stub_size;
3196 /* Call stubs to the section we just added are no longer
3197 needed. */
3198 pp = &dummy_caller.call_list;
3199 while ((p = *pp) != NULL)
3200 if (!p->fun->sec->linker_mark)
3201 {
3202 lib_size += OVL_STUB_SIZE;
3203 *pp = p->next;
3204 free (p);
3205 }
3206 else
3207 pp = &p->next;
3208 /* Add new call stubs to dummy_caller. */
3209 if ((sec_data = spu_elf_section_data (sec)) != NULL
3210 && (sinfo = sec_data->u.i.stack_info) != NULL)
3211 {
3212 int k;
3213 struct call_info *call;
3214
3215 for (k = 0; k < sinfo->num_fun; ++k)
3216 for (call = sinfo->fun[k].call_list;
3217 call;
3218 call = call->next)
3219 if (call->fun->sec->linker_mark)
3220 {
3221 struct call_info *callee;
3222 callee = bfd_malloc (sizeof (*callee));
3223 if (callee == NULL)
3224 return (unsigned int) -1;
3225 *callee = *call;
3226 if (!insert_callee (&dummy_caller, callee))
3227 free (callee);
3228 }
3229 }
3230 }
3231 }
3232 while (dummy_caller.call_list != NULL)
3233 {
3234 struct call_info *call = dummy_caller.call_list;
3235 dummy_caller.call_list = call->next;
3236 free (call);
3237 }
3238 for (i = 0; i < 2 * lib_count; i++)
3239 if (lib_sections[i])
3240 lib_sections[i]->gc_mark = 1;
3241 free (lib_sections);
3242 return lib_size;
3243}
3244
3245/* Build an array of overlay sections. The deepest node's section is
2ec9638b 3246 added first, then its parent node's section, then everything called
9dcc4794
AM
3247 from the parent section. The idea being to group sections to
3248 minimise calls between different overlays. */
3249
3250static bfd_boolean
3251collect_overlays (struct function_info *fun,
3252 struct bfd_link_info *info,
3253 void *param)
3254{
3255 struct call_info *call;
3256 bfd_boolean added_fun;
3257 asection ***ovly_sections = param;
3258
3259 if (fun->visit7)
3260 return TRUE;
3261
3262 fun->visit7 = TRUE;
3263 for (call = fun->call_list; call != NULL; call = call->next)
3264 if (!call->is_pasted)
3265 {
3266 if (!collect_overlays (call->fun, info, ovly_sections))
3267 return FALSE;
3268 break;
3269 }
3270
3271 added_fun = FALSE;
3272 if (fun->sec->linker_mark && fun->sec->gc_mark)
3273 {
3274 fun->sec->gc_mark = 0;
3275 *(*ovly_sections)++ = fun->sec;
3276 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3277 {
3278 fun->rodata->gc_mark = 0;
3279 *(*ovly_sections)++ = fun->rodata;
3280 }
3281 else
3282 *(*ovly_sections)++ = NULL;
3283 added_fun = TRUE;
3284
3285 /* Pasted sections must stay with the first section. We don't
3286 put pasted sections in the array, just the first section.
3287 Mark subsequent sections as already considered. */
3288 if (fun->sec->segment_mark)
3289 {
3290 struct function_info *call_fun = fun;
3291 do
3292 {
3293 for (call = call_fun->call_list; call != NULL; call = call->next)
3294 if (call->is_pasted)
3295 {
3296 call_fun = call->fun;
3297 call_fun->sec->gc_mark = 0;
3298 if (call_fun->rodata)
3299 call_fun->rodata->gc_mark = 0;
3300 break;
3301 }
3302 if (call == NULL)
3303 abort ();
3304 }
3305 while (call_fun->sec->segment_mark);
3306 }
3307 }
3308
3309 for (call = fun->call_list; call != NULL; call = call->next)
3310 if (!collect_overlays (call->fun, info, ovly_sections))
3311 return FALSE;
3312
3313 if (added_fun)
3314 {
3315 struct _spu_elf_section_data *sec_data;
3316 struct spu_elf_stack_info *sinfo;
3317
3318 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3319 && (sinfo = sec_data->u.i.stack_info) != NULL)
3320 {
3321 int i;
3322 for (i = 0; i < sinfo->num_fun; ++i)
3323 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3324 return FALSE;
3325 }
3326 }
3327
3328 return TRUE;
49fa1e15
AM
3329}
3330
055ed83b
AM
3331struct _sum_stack_param {
3332 size_t cum_stack;
3333 size_t overall_stack;
3334 bfd_boolean emit_stack_syms;
3335};
3336
49fa1e15
AM
3337/* Descend the call graph for FUN, accumulating total stack required. */
3338
055ed83b 3339static bfd_boolean
49fa1e15
AM
3340sum_stack (struct function_info *fun,
3341 struct bfd_link_info *info,
055ed83b 3342 void *param)
49fa1e15
AM
3343{
3344 struct call_info *call;
055ed83b
AM
3345 struct function_info *max;
3346 size_t stack, cum_stack;
49fa1e15 3347 const char *f1;
9dcc4794 3348 bfd_boolean has_call;
055ed83b 3349 struct _sum_stack_param *sum_stack_param = param;
9dcc4794 3350 struct spu_link_hash_table *htab;
49fa1e15 3351
055ed83b
AM
3352 cum_stack = fun->stack;
3353 sum_stack_param->cum_stack = cum_stack;
49fa1e15 3354 if (fun->visit3)
055ed83b 3355 return TRUE;
49fa1e15 3356
9dcc4794 3357 has_call = FALSE;
055ed83b 3358 max = NULL;
49fa1e15
AM
3359 for (call = fun->call_list; call; call = call->next)
3360 {
9dcc4794
AM
3361 if (!call->is_pasted)
3362 has_call = TRUE;
055ed83b
AM
3363 if (!sum_stack (call->fun, info, sum_stack_param))
3364 return FALSE;
3365 stack = sum_stack_param->cum_stack;
49fa1e15
AM
3366 /* Include caller stack for normal calls, don't do so for
3367 tail calls. fun->stack here is local stack usage for
3368 this function. */
9dcc4794 3369 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
49fa1e15 3370 stack += fun->stack;
055ed83b 3371 if (cum_stack < stack)
49fa1e15 3372 {
055ed83b 3373 cum_stack = stack;
49fa1e15
AM
3374 max = call->fun;
3375 }
3376 }
3377
055ed83b
AM
3378 sum_stack_param->cum_stack = cum_stack;
3379 stack = fun->stack;
3380 /* Now fun->stack holds cumulative stack. */
3381 fun->stack = cum_stack;
3382 fun->visit3 = TRUE;
3383
3384 if (!fun->non_root
3385 && sum_stack_param->overall_stack < cum_stack)
3386 sum_stack_param->overall_stack = cum_stack;
3387
9dcc4794
AM
3388 htab = spu_hash_table (info);
3389 if (htab->auto_overlay)
3390 return TRUE;
3391
49fa1e15 3392 f1 = func_name (fun);
055ed83b
AM
3393 if (!fun->non_root)
3394 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
fad9eaf0 3395 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
055ed83b 3396 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
49fa1e15 3397
9dcc4794 3398 if (has_call)
49fa1e15
AM
3399 {
3400 info->callbacks->minfo (_(" calls:\n"));
3401 for (call = fun->call_list; call; call = call->next)
9dcc4794
AM
3402 if (!call->is_pasted)
3403 {
3404 const char *f2 = func_name (call->fun);
3405 const char *ann1 = call->fun == max ? "*" : " ";
3406 const char *ann2 = call->is_tail ? "t" : " ";
49fa1e15 3407
9dcc4794
AM
3408 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3409 }
49fa1e15
AM
3410 }
3411
055ed83b 3412 if (sum_stack_param->emit_stack_syms)
49fa1e15 3413 {
49fa1e15
AM
3414 char *name = bfd_malloc (18 + strlen (f1));
3415 struct elf_link_hash_entry *h;
3416
055ed83b
AM
3417 if (name == NULL)
3418 return FALSE;
3419
3420 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3421 sprintf (name, "__stack_%s", f1);
3422 else
3423 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3424
3425 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3426 free (name);
3427 if (h != NULL
3428 && (h->root.type == bfd_link_hash_new
3429 || h->root.type == bfd_link_hash_undefined
3430 || h->root.type == bfd_link_hash_undefweak))
49fa1e15 3431 {
055ed83b
AM
3432 h->root.type = bfd_link_hash_defined;
3433 h->root.u.def.section = bfd_abs_section_ptr;
3434 h->root.u.def.value = cum_stack;
3435 h->size = 0;
3436 h->type = 0;
3437 h->ref_regular = 1;
3438 h->def_regular = 1;
3439 h->ref_regular_nonweak = 1;
3440 h->forced_local = 1;
3441 h->non_elf = 0;
49fa1e15
AM
3442 }
3443 }
3444
055ed83b 3445 return TRUE;
49fa1e15
AM
3446}
3447
9dcc4794
AM
3448/* SEC is part of a pasted function. Return the call_info for the
3449 next section of this function. */
3450
3451static struct call_info *
3452find_pasted_call (asection *sec)
3453{
3454 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3455 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3456 struct call_info *call;
3457 int k;
3458
3459 for (k = 0; k < sinfo->num_fun; ++k)
3460 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3461 if (call->is_pasted)
3462 return call;
3463 abort ();
3464 return 0;
3465}
3466
3467/* qsort predicate to sort bfds by file name. */
3468
3469static int
3470sort_bfds (const void *a, const void *b)
3471{
3472 bfd *const *abfd1 = a;
3473 bfd *const *abfd2 = b;
3474
3475 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3476}
3477
3478/* Handle --auto-overlay. */
3479
3480static void spu_elf_auto_overlay (struct bfd_link_info *, void (*) (void))
3481 ATTRIBUTE_NORETURN;
3482
3483static void
3484spu_elf_auto_overlay (struct bfd_link_info *info,
3485 void (*spu_elf_load_ovl_mgr) (void))
3486{
3487 bfd *ibfd;
3488 bfd **bfd_arr;
3489 struct elf_segment_map *m;
3490 unsigned int fixed_size, lo, hi;
3491 struct spu_link_hash_table *htab;
3492 unsigned int base, i, count, bfd_count;
3493 int ovlynum;
3494 asection **ovly_sections, **ovly_p;
3495 FILE *script;
3496 unsigned int total_overlay_size, overlay_size;
3497 struct elf_link_hash_entry *h;
3498 struct _mos_param mos_param;
3499 struct _uos_param uos_param;
3500 struct function_info dummy_caller;
3501
3502 /* Find the extents of our loadable image. */
3503 lo = (unsigned int) -1;
3504 hi = 0;
3505 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3506 if (m->p_type == PT_LOAD)
3507 for (i = 0; i < m->count; i++)
3508 if (m->sections[i]->size != 0)
3509 {
3510 if (m->sections[i]->vma < lo)
3511 lo = m->sections[i]->vma;
3512 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3513 hi = m->sections[i]->vma + m->sections[i]->size - 1;
3514 }
3515 fixed_size = hi + 1 - lo;
3516
3517 if (!discover_functions (info))
3518 goto err_exit;
3519
3520 if (!build_call_tree (info))
3521 goto err_exit;
3522
3523 uos_param.exclude_input_section = 0;
3524 uos_param.exclude_output_section
3525 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3526
3527 htab = spu_hash_table (info);
3528 h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3529 FALSE, FALSE, FALSE);
3530 if (h != NULL
3531 && (h->root.type == bfd_link_hash_defined
3532 || h->root.type == bfd_link_hash_defweak)
3533 && h->def_regular)
3534 {
3535 /* We have a user supplied overlay manager. */
3536 uos_param.exclude_input_section = h->root.u.def.section;
3537 }
3538 else
3539 {
3540 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3541 builtin version to .text, and will adjust .text size. */
3542 asection *text = bfd_get_section_by_name (info->output_bfd, ".text");
3543 if (text != NULL)
3544 fixed_size -= text->size;
3545 spu_elf_load_ovl_mgr ();
3546 text = bfd_get_section_by_name (info->output_bfd, ".text");
3547 if (text != NULL)
3548 fixed_size += text->size;
3549 }
3550
3551 /* Mark overlay sections, and find max overlay section size. */
3552 mos_param.max_overlay_size = 0;
3553 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3554 goto err_exit;
3555
3556 /* We can't put the overlay manager or interrupt routines in
3557 overlays. */
3558 uos_param.clearing = 0;
3559 if ((uos_param.exclude_input_section
3560 || uos_param.exclude_output_section)
3561 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3562 goto err_exit;
3563
3564 bfd_count = 0;
3565 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3566 ++bfd_count;
3567 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3568 if (bfd_arr == NULL)
3569 goto err_exit;
3570
3571 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3572 count = 0;
3573 bfd_count = 0;
3574 total_overlay_size = 0;
3575 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3576 {
3577 extern const bfd_target bfd_elf32_spu_vec;
3578 asection *sec;
3579 unsigned int old_count;
3580
3581 if (ibfd->xvec != &bfd_elf32_spu_vec)
3582 continue;
3583
3584 old_count = count;
3585 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3586 if (sec->linker_mark)
3587 {
3588 if ((sec->flags & SEC_CODE) != 0)
3589 count += 1;
3590 fixed_size -= sec->size;
3591 total_overlay_size += sec->size;
3592 }
3593 if (count != old_count)
3594 bfd_arr[bfd_count++] = ibfd;
3595 }
3596
3597 /* Since the overlay link script selects sections by file name and
3598 section name, ensure that file names are unique. */
3599 if (bfd_count > 1)
3600 {
3601 bfd_boolean ok = TRUE;
3602
3603 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3604 for (i = 1; i < bfd_count; ++i)
3605 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3606 {
97407faf 3607 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
9dcc4794 3608 {
97407faf 3609 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
9dcc4794 3610 info->callbacks->einfo (_("%s duplicated in %s\n"),
97407faf 3611 bfd_arr[i]->filename,
9dcc4794 3612 bfd_arr[i]->my_archive->filename);
97407faf
AM
3613 else
3614 info->callbacks->einfo (_("%s duplicated\n"),
3615 bfd_arr[i]->filename);
3616 ok = FALSE;
9dcc4794 3617 }
9dcc4794
AM
3618 }
3619 if (!ok)
3620 {
9dcc4794
AM
3621 info->callbacks->einfo (_("sorry, no support for duplicate "
3622 "object files in auto-overlay script\n"));
3623 bfd_set_error (bfd_error_bad_value);
3624 goto err_exit;
3625 }
3626 }
3627 free (bfd_arr);
3628
3629 if (htab->reserved == 0)
3630 {
3631 struct _sum_stack_param sum_stack_param;
3632
3633 sum_stack_param.emit_stack_syms = 0;
3634 sum_stack_param.overall_stack = 0;
3635 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3636 goto err_exit;
99302af9 3637 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
9dcc4794
AM
3638 }
3639 fixed_size += htab->reserved;
3640 fixed_size += htab->non_ovly_stub * OVL_STUB_SIZE;
3641 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3642 {
3643 /* Guess number of overlays. Assuming overlay buffer is on
3644 average only half full should be conservative. */
3645 ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3646 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3647 fixed_size += ovlynum * 16 + 16 + 4 + 16;
3648 }
3649
3650 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4f0d75be
AM
3651 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3652 "size of 0x%v exceeds local store\n"),
3653 (bfd_vma) fixed_size,
3654 (bfd_vma) mos_param.max_overlay_size);
9dcc4794
AM
3655
3656 /* Now see if we should put some functions in the non-overlay area. */
e5e6a5ff 3657 else if (fixed_size < htab->overlay_fixed)
9dcc4794 3658 {
e5e6a5ff
AM
3659 unsigned int max_fixed, lib_size;
3660
3661 max_fixed = htab->local_store - mos_param.max_overlay_size;
3662 if (max_fixed > htab->overlay_fixed)
3663 max_fixed = htab->overlay_fixed;
3664 lib_size = max_fixed - fixed_size;
9dcc4794
AM
3665 lib_size = auto_ovl_lib_functions (info, lib_size);
3666 if (lib_size == (unsigned int) -1)
3667 goto err_exit;
e5e6a5ff 3668 fixed_size = max_fixed - lib_size;
9dcc4794
AM
3669 }
3670
3671 /* Build an array of sections, suitably sorted to place into
3672 overlays. */
3673 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3674 if (ovly_sections == NULL)
3675 goto err_exit;
3676 ovly_p = ovly_sections;
3677 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3678 goto err_exit;
3679 count = (size_t) (ovly_p - ovly_sections) / 2;
3680
3681 script = htab->spu_elf_open_overlay_script ();
3682
3683 if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3684 goto file_err;
3685
3686 memset (&dummy_caller, 0, sizeof (dummy_caller));
3687 overlay_size = htab->local_store - fixed_size;
3688 base = 0;
3689 ovlynum = 0;
3690 while (base < count)
3691 {
3692 unsigned int size = 0;
3693 unsigned int j;
3694
3695 for (i = base; i < count; i++)
3696 {
3697 asection *sec;
3698 unsigned int tmp;
3699 unsigned int stub_size;
3700 struct call_info *call, *pasty;
3701 struct _spu_elf_section_data *sec_data;
3702 struct spu_elf_stack_info *sinfo;
3703 int k;
3704
3705 /* See whether we can add this section to the current
3706 overlay without overflowing our overlay buffer. */
3707 sec = ovly_sections[2 * i];
3708 tmp = size + sec->size;
3709 if (ovly_sections[2 * i + 1])
3710 tmp += ovly_sections[2 * i + 1]->size;
3711 if (tmp > overlay_size)
3712 break;
3713 if (sec->segment_mark)
3714 {
3715 /* Pasted sections must stay together, so add their
3716 sizes too. */
3717 struct call_info *pasty = find_pasted_call (sec);
3718 while (pasty != NULL)
3719 {
3720 struct function_info *call_fun = pasty->fun;
3721 tmp += call_fun->sec->size;
3722 if (call_fun->rodata)
3723 tmp += call_fun->rodata->size;
3724 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3725 if (pasty->is_pasted)
3726 break;
3727 }
3728 }
3729 if (tmp > overlay_size)
3730 break;
3731
3732 /* If we add this section, we might need new overlay call
3733 stubs. Add any overlay section calls to dummy_call. */
3734 pasty = NULL;
3735 sec_data = spu_elf_section_data (sec);
3736 sinfo = sec_data->u.i.stack_info;
3737 for (k = 0; k < sinfo->num_fun; ++k)
3738 for (call = sinfo->fun[k].call_list; call; call = call->next)
3739 if (call->is_pasted)
3740 {
3741 BFD_ASSERT (pasty == NULL);
3742 pasty = call;
3743 }
3744 else if (call->fun->sec->linker_mark)
3745 {
3746 if (!copy_callee (&dummy_caller, call))
3747 goto err_exit;
3748 }
3749 while (pasty != NULL)
3750 {
3751 struct function_info *call_fun = pasty->fun;
3752 pasty = NULL;
3753 for (call = call_fun->call_list; call; call = call->next)
3754 if (call->is_pasted)
3755 {
3756 BFD_ASSERT (pasty == NULL);
3757 pasty = call;
3758 }
3759 else if (!copy_callee (&dummy_caller, call))
3760 goto err_exit;
3761 }
3762
3763 /* Calculate call stub size. */
3764 stub_size = 0;
3765 for (call = dummy_caller.call_list; call; call = call->next)
3766 {
3767 unsigned int k;
3768
3769 stub_size += OVL_STUB_SIZE;
3770 /* If the call is within this overlay, we won't need a
3771 stub. */
3772 for (k = base; k < i + 1; k++)
3773 if (call->fun->sec == ovly_sections[2 * k])
3774 {
3775 stub_size -= OVL_STUB_SIZE;
3776 break;
3777 }
3778 }
3779 if (tmp + stub_size > overlay_size)
3780 break;
3781
3782 size = tmp;
3783 }
3784
3785 if (i == base)
3786 {
3787 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3788 ovly_sections[2 * i]->owner,
3789 ovly_sections[2 * i],
3790 ovly_sections[2 * i + 1] ? " + rodata" : "");
3791 bfd_set_error (bfd_error_bad_value);
3792 goto err_exit;
3793 }
3794
3795 if (fprintf (script, " .ovly%d {\n", ++ovlynum) <= 0)
3796 goto file_err;
3797 for (j = base; j < i; j++)
3798 {
3799 asection *sec = ovly_sections[2 * j];
3800
97407faf
AM
3801 if (fprintf (script, " %s%c%s (%s)\n",
3802 (sec->owner->my_archive != NULL
3803 ? sec->owner->my_archive->filename : ""),
3804 info->path_separator,
3805 sec->owner->filename,
9dcc4794
AM
3806 sec->name) <= 0)
3807 goto file_err;
3808 if (sec->segment_mark)
3809 {
3810 struct call_info *call = find_pasted_call (sec);
3811 while (call != NULL)
3812 {
3813 struct function_info *call_fun = call->fun;
3814 sec = call_fun->sec;
97407faf
AM
3815 if (fprintf (script, " %s%c%s (%s)\n",
3816 (sec->owner->my_archive != NULL
3817 ? sec->owner->my_archive->filename : ""),
3818 info->path_separator,
3819 sec->owner->filename,
9dcc4794
AM
3820 sec->name) <= 0)
3821 goto file_err;
3822 for (call = call_fun->call_list; call; call = call->next)
3823 if (call->is_pasted)
3824 break;
3825 }
3826 }
3827 }
3828
3829 for (j = base; j < i; j++)
3830 {
3831 asection *sec = ovly_sections[2 * j + 1];
97407faf
AM
3832 if (sec != NULL
3833 && fprintf (script, " %s%c%s (%s)\n",
3834 (sec->owner->my_archive != NULL
3835 ? sec->owner->my_archive->filename : ""),
3836 info->path_separator,
3837 sec->owner->filename,
3838 sec->name) <= 0)
9dcc4794
AM
3839 goto file_err;
3840
3841 sec = ovly_sections[2 * j];
3842 if (sec->segment_mark)
3843 {
3844 struct call_info *call = find_pasted_call (sec);
3845 while (call != NULL)
3846 {
3847 struct function_info *call_fun = call->fun;
3848 sec = call_fun->rodata;
97407faf
AM
3849 if (sec != NULL
3850 && fprintf (script, " %s%c%s (%s)\n",
3851 (sec->owner->my_archive != NULL
3852 ? sec->owner->my_archive->filename : ""),
3853 info->path_separator,
3854 sec->owner->filename,
3855 sec->name) <= 0)
9dcc4794
AM
3856 goto file_err;
3857 for (call = call_fun->call_list; call; call = call->next)
3858 if (call->is_pasted)
3859 break;
3860 }
3861 }
3862 }
3863
3864 if (fprintf (script, " }\n") <= 0)
3865 goto file_err;
3866
3867 while (dummy_caller.call_list != NULL)
3868 {
3869 struct call_info *call = dummy_caller.call_list;
3870 dummy_caller.call_list = call->next;
3871 free (call);
3872 }
3873
3874 base = i;
3875 }
3876 free (ovly_sections);
3877
3878 if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3879 goto file_err;
3880 if (fclose (script) != 0)
3881 goto file_err;
3882
3883 if (htab->auto_overlay & AUTO_RELINK)
3884 htab->spu_elf_relink ();
3885
3886 xexit (0);
3887
3888 file_err:
3889 bfd_set_error (bfd_error_system_call);
3890 err_exit:
3891 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3892 xexit (1);
3893}
3894
49fa1e15
AM
3895/* Provide an estimate of total stack required. */
3896
3897static bfd_boolean
c65be8d7 3898spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
49fa1e15 3899{
055ed83b 3900 struct _sum_stack_param sum_stack_param;
49fa1e15 3901
c65be8d7 3902 if (!discover_functions (info))
49fa1e15
AM
3903 return FALSE;
3904
c65be8d7 3905 if (!build_call_tree (info))
49fa1e15
AM
3906 return FALSE;
3907
3908 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3909 info->callbacks->minfo (_("\nStack size for functions. "
3910 "Annotations: '*' max stack, 't' tail call\n"));
49fa1e15 3911
055ed83b
AM
3912 sum_stack_param.emit_stack_syms = emit_stack_syms;
3913 sum_stack_param.overall_stack = 0;
3914 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3915 return FALSE;
49fa1e15 3916
055ed83b
AM
3917 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3918 (bfd_vma) sum_stack_param.overall_stack);
49fa1e15
AM
3919 return TRUE;
3920}
3921
3922/* Perform a final link. */
3923
3924static bfd_boolean
3925spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3926{
3927 struct spu_link_hash_table *htab = spu_hash_table (info);
3928
9dcc4794
AM
3929 if (htab->auto_overlay)
3930 spu_elf_auto_overlay (info, htab->spu_elf_load_ovl_mgr);
3931
49fa1e15 3932 if (htab->stack_analysis
c65be8d7 3933 && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
49fa1e15
AM
3934 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3935
3936 return bfd_elf_final_link (output_bfd, info);
3937}
3938
ece5ef60
AM
3939/* Called when not normally emitting relocs, ie. !info->relocatable
3940 and !info->emitrelocations. Returns a count of special relocs
3941 that need to be emitted. */
3942
3943static unsigned int
58217f29 3944spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
ece5ef60 3945{
58217f29 3946 Elf_Internal_Rela *relocs;
ece5ef60 3947 unsigned int count = 0;
ece5ef60 3948
58217f29
AM
3949 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
3950 info->keep_memory);
3951 if (relocs != NULL)
ece5ef60 3952 {
58217f29
AM
3953 Elf_Internal_Rela *rel;
3954 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3955
3956 for (rel = relocs; rel < relend; rel++)
3957 {
3958 int r_type = ELF32_R_TYPE (rel->r_info);
3959 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3960 ++count;
3961 }
3962
3963 if (elf_section_data (sec)->relocs != relocs)
3964 free (relocs);
ece5ef60
AM
3965 }
3966
3967 return count;
3968}
3969
e9f53129
AM
3970/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3971
d16c7321 3972static int
e9f53129
AM
3973spu_elf_relocate_section (bfd *output_bfd,
3974 struct bfd_link_info *info,
3975 bfd *input_bfd,
3976 asection *input_section,
3977 bfd_byte *contents,
3978 Elf_Internal_Rela *relocs,
3979 Elf_Internal_Sym *local_syms,
3980 asection **local_sections)
3981{
3982 Elf_Internal_Shdr *symtab_hdr;
3983 struct elf_link_hash_entry **sym_hashes;
3984 Elf_Internal_Rela *rel, *relend;
3985 struct spu_link_hash_table *htab;
8374f9d4 3986 asection *ea = bfd_get_section_by_name (output_bfd, "._ea");
d16c7321 3987 int ret = TRUE;
ece5ef60 3988 bfd_boolean emit_these_relocs = FALSE;
cc5ca406 3989 bfd_boolean is_ea_sym;
fdba2fcd 3990 bfd_boolean stubs;
e9f53129 3991
e9f53129 3992 htab = spu_hash_table (info);
fdba2fcd
AM
3993 stubs = (htab->stub_sec != NULL
3994 && maybe_needs_stubs (input_section, output_bfd));
e9f53129
AM
3995 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3996 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3997
3998 rel = relocs;
3999 relend = relocs + input_section->reloc_count;
4000 for (; rel < relend; rel++)
4001 {
4002 int r_type;
4003 reloc_howto_type *howto;
8374f9d4 4004 unsigned int r_symndx;
e9f53129
AM
4005 Elf_Internal_Sym *sym;
4006 asection *sec;
4007 struct elf_link_hash_entry *h;
4008 const char *sym_name;
4009 bfd_vma relocation;
4010 bfd_vma addend;
4011 bfd_reloc_status_type r;
4012 bfd_boolean unresolved_reloc;
4013 bfd_boolean warned;
124b52c6 4014 enum _stub_type stub_type;
e9f53129
AM
4015
4016 r_symndx = ELF32_R_SYM (rel->r_info);
4017 r_type = ELF32_R_TYPE (rel->r_info);
4018 howto = elf_howto_table + r_type;
4019 unresolved_reloc = FALSE;
4020 warned = FALSE;
e9f53129
AM
4021 h = NULL;
4022 sym = NULL;
4023 sec = NULL;
4024 if (r_symndx < symtab_hdr->sh_info)
4025 {
4026 sym = local_syms + r_symndx;
4027 sec = local_sections[r_symndx];
4028 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4029 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4030 }
4031 else
4032 {
dc1859a6
AM
4033 if (sym_hashes == NULL)
4034 return FALSE;
4035
4036 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4037
4038 while (h->root.type == bfd_link_hash_indirect
4039 || h->root.type == bfd_link_hash_warning)
4040 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4041
4042 relocation = 0;
4043 if (h->root.type == bfd_link_hash_defined
4044 || h->root.type == bfd_link_hash_defweak)
4045 {
4046 sec = h->root.u.def.section;
4047 if (sec == NULL
4048 || sec->output_section == NULL)
4049 /* Set a flag that will be cleared later if we find a
4050 relocation value for this symbol. output_section
4051 is typically NULL for symbols satisfied by a shared
4052 library. */
4053 unresolved_reloc = TRUE;
4054 else
4055 relocation = (h->root.u.def.value
4056 + sec->output_section->vma
4057 + sec->output_offset);
4058 }
4059 else if (h->root.type == bfd_link_hash_undefweak)
4060 ;
4061 else if (info->unresolved_syms_in_objects == RM_IGNORE
4062 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4063 ;
4064 else if (!info->relocatable
4065 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4066 {
4067 bfd_boolean err;
4068 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4069 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4070 if (!info->callbacks->undefined_symbol (info,
4071 h->root.root.string,
4072 input_bfd,
4073 input_section,
4074 rel->r_offset, err))
4075 return FALSE;
4076 warned = TRUE;
4077 }
e9f53129
AM
4078 sym_name = h->root.root.string;
4079 }
4080
ab96bf03
AM
4081 if (sec != NULL && elf_discarded_section (sec))
4082 {
4083 /* For relocs against symbols from removed linkonce sections,
4084 or sections discarded by a linker script, we just want the
4085 section contents zeroed. Avoid any special processing. */
4086 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4087 rel->r_info = 0;
4088 rel->r_addend = 0;
4089 continue;
4090 }
4091
4092 if (info->relocatable)
4093 continue;
4094
cc5ca406
AM
4095 is_ea_sym = (ea != NULL
4096 && sec != NULL
4097 && sec->output_section == ea);
4098
8374f9d4
AM
4099 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4100 {
cc5ca406 4101 if (is_ea_sym)
8374f9d4
AM
4102 {
4103 /* ._ea is a special section that isn't allocated in SPU
4104 memory, but rather occupies space in PPU memory as
4105 part of an embedded ELF image. If this reloc is
4106 against a symbol defined in ._ea, then transform the
4107 reloc into an equivalent one without a symbol
4108 relative to the start of the ELF image. */
4109 rel->r_addend += (relocation
4110 - ea->vma
4111 + elf_section_data (ea)->this_hdr.sh_offset);
4112 rel->r_info = ELF32_R_INFO (0, r_type);
4113 }
4114 emit_these_relocs = TRUE;
4115 continue;
4116 }
4117
cc5ca406 4118 if (is_ea_sym)
8374f9d4
AM
4119 unresolved_reloc = TRUE;
4120
e9f53129
AM
4121 if (unresolved_reloc)
4122 {
4123 (*_bfd_error_handler)
4124 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4125 input_bfd,
4126 bfd_get_section_name (input_bfd, input_section),
4127 (long) rel->r_offset,
4128 howto->name,
4129 sym_name);
4130 ret = FALSE;
4131 }
4132
4133 /* If this symbol is in an overlay area, we may need to relocate
4134 to the overlay stub. */
4135 addend = rel->r_addend;
124b52c6
AM
4136 if (stubs
4137 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4138 contents, info)) != no_stub)
e9f53129 4139 {
124b52c6
AM
4140 unsigned int ovl = 0;
4141 struct got_entry *g, **head;
47f6dab9 4142
124b52c6
AM
4143 if (stub_type != nonovl_stub)
4144 ovl = (spu_elf_section_data (input_section->output_section)
4145 ->u.o.ovl_index);
5f5fb9ec 4146
124b52c6
AM
4147 if (h != NULL)
4148 head = &h->got.glist;
4149 else
4150 head = elf_local_got_ents (input_bfd) + r_symndx;
47f6dab9 4151
124b52c6
AM
4152 for (g = *head; g != NULL; g = g->next)
4153 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4154 break;
4155 if (g == NULL)
4156 abort ();
5f5fb9ec 4157
124b52c6
AM
4158 relocation = g->stub_addr;
4159 addend = 0;
e9f53129
AM
4160 }
4161
4162 r = _bfd_final_link_relocate (howto,
4163 input_bfd,
4164 input_section,
4165 contents,
4166 rel->r_offset, relocation, addend);
4167
4168 if (r != bfd_reloc_ok)
4169 {
4170 const char *msg = (const char *) 0;
4171
4172 switch (r)
4173 {
4174 case bfd_reloc_overflow:
4175 if (!((*info->callbacks->reloc_overflow)
4176 (info, (h ? &h->root : NULL), sym_name, howto->name,
4177 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4178 return FALSE;
4179 break;
4180
4181 case bfd_reloc_undefined:
4182 if (!((*info->callbacks->undefined_symbol)
4183 (info, sym_name, input_bfd, input_section,
4184 rel->r_offset, TRUE)))
4185 return FALSE;
4186 break;
4187
4188 case bfd_reloc_outofrange:
4189 msg = _("internal error: out of range error");
4190 goto common_error;
4191
4192 case bfd_reloc_notsupported:
4193 msg = _("internal error: unsupported relocation error");
4194 goto common_error;
4195
4196 case bfd_reloc_dangerous:
4197 msg = _("internal error: dangerous error");
4198 goto common_error;
4199
4200 default:
4201 msg = _("internal error: unknown error");
4202 /* fall through */
4203
4204 common_error:
d16c7321 4205 ret = FALSE;
e9f53129
AM
4206 if (!((*info->callbacks->warning)
4207 (info, msg, sym_name, input_bfd, input_section,
4208 rel->r_offset)))
4209 return FALSE;
4210 break;
4211 }
4212 }
4213 }
4214
ece5ef60
AM
4215 if (ret
4216 && emit_these_relocs
ece5ef60
AM
4217 && !info->emitrelocations)
4218 {
4219 Elf_Internal_Rela *wrel;
4220 Elf_Internal_Shdr *rel_hdr;
4221
4222 wrel = rel = relocs;
4223 relend = relocs + input_section->reloc_count;
4224 for (; rel < relend; rel++)
4225 {
4226 int r_type;
4227
4228 r_type = ELF32_R_TYPE (rel->r_info);
4229 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4230 *wrel++ = *rel;
4231 }
4232 input_section->reloc_count = wrel - relocs;
4233 /* Backflips for _bfd_elf_link_output_relocs. */
4234 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4235 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4236 ret = 2;
4237 }
4238
e9f53129
AM
4239 return ret;
4240}
4241
c1b2796f
AM
4242/* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4243
4244static bfd_boolean
4245spu_elf_output_symbol_hook (struct bfd_link_info *info,
4246 const char *sym_name ATTRIBUTE_UNUSED,
4247 Elf_Internal_Sym *sym,
4248 asection *sym_sec ATTRIBUTE_UNUSED,
4249 struct elf_link_hash_entry *h)
4250{
4251 struct spu_link_hash_table *htab = spu_hash_table (info);
4252
4253 if (!info->relocatable
47f6dab9 4254 && htab->stub_sec != NULL
c1b2796f
AM
4255 && h != NULL
4256 && (h->root.type == bfd_link_hash_defined
4257 || h->root.type == bfd_link_hash_defweak)
4258 && h->def_regular
4259 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4260 {
4a628337 4261 struct got_entry *g;
c1b2796f 4262
4a628337
AM
4263 for (g = h->got.glist; g != NULL; g = g->next)
4264 if (g->addend == 0 && g->ovl == 0)
4265 {
4266 sym->st_shndx = (_bfd_elf_section_from_bfd_section
4267 (htab->stub_sec[0]->output_section->owner,
4268 htab->stub_sec[0]->output_section));
4269 sym->st_value = g->stub_addr;
4270 break;
4271 }
c1b2796f
AM
4272 }
4273
4274 return TRUE;
4275}
4276
e9f53129
AM
4277static int spu_plugin = 0;
4278
4279void
4280spu_elf_plugin (int val)
4281{
4282 spu_plugin = val;
4283}
4284
4285/* Set ELF header e_type for plugins. */
4286
4287static void
4288spu_elf_post_process_headers (bfd *abfd,
4289 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4290{
4291 if (spu_plugin)
4292 {
4293 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4294
4295 i_ehdrp->e_type = ET_DYN;
4296 }
4297}
4298
4299/* We may add an extra PT_LOAD segment for .toe. We also need extra
4300 segments for overlays. */
4301
4302static int
4303spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4304{
ceae84aa 4305 int extra = 0;
e9f53129
AM
4306 asection *sec;
4307
ceae84aa
AM
4308 if (info != NULL)
4309 {
4310 struct spu_link_hash_table *htab = spu_hash_table (info);
4311 extra = htab->num_overlays;
4312 }
4313
e9f53129
AM
4314 if (extra)
4315 ++extra;
4316
4317 sec = bfd_get_section_by_name (abfd, ".toe");
4318 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4319 ++extra;
4320
4321 return extra;
4322}
4323
4324/* Remove .toe section from other PT_LOAD segments and put it in
4325 a segment of its own. Put overlays in separate segments too. */
4326
4327static bfd_boolean
4328spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4329{
4330 asection *toe, *s;
4331 struct elf_segment_map *m;
4332 unsigned int i;
4333
4334 if (info == NULL)
4335 return TRUE;
4336
4337 toe = bfd_get_section_by_name (abfd, ".toe");
4338 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4339 if (m->p_type == PT_LOAD && m->count > 1)
4340 for (i = 0; i < m->count; i++)
4341 if ((s = m->sections[i]) == toe
47f6dab9 4342 || spu_elf_section_data (s)->u.o.ovl_index != 0)
e9f53129
AM
4343 {
4344 struct elf_segment_map *m2;
4345 bfd_vma amt;
4346
4347 if (i + 1 < m->count)
4348 {
4349 amt = sizeof (struct elf_segment_map);
4350 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4351 m2 = bfd_zalloc (abfd, amt);
4352 if (m2 == NULL)
4353 return FALSE;
4354 m2->count = m->count - (i + 1);
4355 memcpy (m2->sections, m->sections + i + 1,
4356 m2->count * sizeof (m->sections[0]));
4357 m2->p_type = PT_LOAD;
4358 m2->next = m->next;
4359 m->next = m2;
4360 }
4361 m->count = 1;
4362 if (i != 0)
4363 {
4364 m->count = i;
4365 amt = sizeof (struct elf_segment_map);
4366 m2 = bfd_zalloc (abfd, amt);
4367 if (m2 == NULL)
4368 return FALSE;
4369 m2->p_type = PT_LOAD;
4370 m2->count = 1;
4371 m2->sections[0] = s;
4372 m2->next = m->next;
4373 m->next = m2;
4374 }
4375 break;
4376 }
4377
4378 return TRUE;
4379}
4380
7d3287cb
AM
4381/* Tweak the section type of .note.spu_name. */
4382
4383static bfd_boolean
4384spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4385 Elf_Internal_Shdr *hdr,
4386 asection *sec)
4387{
4388 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4389 hdr->sh_type = SHT_NOTE;
4390 return TRUE;
4391}
4392
e9f53129
AM
4393/* Tweak phdrs before writing them out. */
4394
4395static int
4396spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4397{
4398 const struct elf_backend_data *bed;
4399 struct elf_obj_tdata *tdata;
4400 Elf_Internal_Phdr *phdr, *last;
4401 struct spu_link_hash_table *htab;
4402 unsigned int count;
4403 unsigned int i;
4404
4405 if (info == NULL)
4406 return TRUE;
4407
4408 bed = get_elf_backend_data (abfd);
4409 tdata = elf_tdata (abfd);
4410 phdr = tdata->phdr;
4411 count = tdata->program_header_size / bed->s->sizeof_phdr;
4412 htab = spu_hash_table (info);
4413 if (htab->num_overlays != 0)
4414 {
4415 struct elf_segment_map *m;
4416 unsigned int o;
4417
4418 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4419 if (m->count != 0
47f6dab9 4420 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
e9f53129
AM
4421 {
4422 /* Mark this as an overlay header. */
4423 phdr[i].p_flags |= PF_OVERLAY;
4424
4425 if (htab->ovtab != NULL && htab->ovtab->size != 0)
4426 {
4427 bfd_byte *p = htab->ovtab->contents;
47f6dab9 4428 unsigned int off = o * 16 + 8;
e9f53129
AM
4429
4430 /* Write file_off into _ovly_table. */
4431 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4432 }
4433 }
4434 }
4435
4436 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4437 of 16. This should always be possible when using the standard
4438 linker scripts, but don't create overlapping segments if
4439 someone is playing games with linker scripts. */
4440 last = NULL;
4441 for (i = count; i-- != 0; )
4442 if (phdr[i].p_type == PT_LOAD)
4443 {
4444 unsigned adjust;
4445
4446 adjust = -phdr[i].p_filesz & 15;
4447 if (adjust != 0
4448 && last != NULL
4449 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4450 break;
4451
4452 adjust = -phdr[i].p_memsz & 15;
4453 if (adjust != 0
4454 && last != NULL
4455 && phdr[i].p_filesz != 0
4456 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4457 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4458 break;
4459
4460 if (phdr[i].p_filesz != 0)
4461 last = &phdr[i];
4462 }
4463
4464 if (i == (unsigned int) -1)
4465 for (i = count; i-- != 0; )
4466 if (phdr[i].p_type == PT_LOAD)
4467 {
4468 unsigned adjust;
4469
4470 adjust = -phdr[i].p_filesz & 15;
4471 phdr[i].p_filesz += adjust;
4472
4473 adjust = -phdr[i].p_memsz & 15;
4474 phdr[i].p_memsz += adjust;
4475 }
4476
4477 return TRUE;
4478}
4479
e9f53129
AM
4480#define TARGET_BIG_SYM bfd_elf32_spu_vec
4481#define TARGET_BIG_NAME "elf32-spu"
4482#define ELF_ARCH bfd_arch_spu
4483#define ELF_MACHINE_CODE EM_SPU
4484/* This matches the alignment need for DMA. */
4485#define ELF_MAXPAGESIZE 0x80
4486#define elf_backend_rela_normal 1
4487#define elf_backend_can_gc_sections 1
4488
4489#define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
157090f7 4490#define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
e9f53129 4491#define elf_info_to_howto spu_elf_info_to_howto
ece5ef60 4492#define elf_backend_count_relocs spu_elf_count_relocs
e9f53129
AM
4493#define elf_backend_relocate_section spu_elf_relocate_section
4494#define elf_backend_symbol_processing spu_elf_backend_symbol_processing
c1b2796f 4495#define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
124b52c6 4496#define elf_backend_object_p spu_elf_object_p
e9f53129
AM
4497#define bfd_elf32_new_section_hook spu_elf_new_section_hook
4498#define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
e9f53129
AM
4499
4500#define elf_backend_additional_program_headers spu_elf_additional_program_headers
4501#define elf_backend_modify_segment_map spu_elf_modify_segment_map
4502#define elf_backend_modify_program_headers spu_elf_modify_program_headers
4503#define elf_backend_post_process_headers spu_elf_post_process_headers
7d3287cb 4504#define elf_backend_fake_sections spu_elf_fake_sections
e9f53129 4505#define elf_backend_special_sections spu_elf_special_sections
49fa1e15 4506#define bfd_elf32_bfd_final_link spu_elf_final_link
e9f53129
AM
4507
4508#include "elf32-target.h"
This page took 0.320318 seconds and 4 git commands to generate.