2008-06-19 Eric Blake <ebb9@byu.net>
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
CommitLineData
e9f53129
AM
1/* SPU specific support for 32-bit ELF
2
d16c7321 3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
e9f53129
AM
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
cd123cb7 9 the Free Software Foundation; either version 3 of the License, or
e9f53129
AM
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
e9f53129 21#include "sysdep.h"
9dcc4794 22#include "libiberty.h"
3db64b00 23#include "bfd.h"
e9f53129
AM
24#include "bfdlink.h"
25#include "libbfd.h"
26#include "elf-bfd.h"
27#include "elf/spu.h"
28#include "elf32-spu.h"
29
30/* We use RELA style relocs. Don't define USE_REL. */
31
32static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36/* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
b427ea91 58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
e9f53129
AM
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
b427ea91 79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
e9f53129
AM
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
4f4416b5
AM
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
b427ea91 85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
ece5ef60
AM
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
b427ea91 88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
ece5ef60
AM
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
e9f53129
AM
91};
92
93static struct bfd_elf_special_section const spu_elf_special_sections[] = {
8374f9d4 94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
e9f53129
AM
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96 { NULL, 0, 0, 0, 0 }
97};
98
99static enum elf_spu_reloc_type
100spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101{
102 switch (code)
103 {
104 default:
105 return R_SPU_NONE;
106 case BFD_RELOC_SPU_IMM10W:
107 return R_SPU_ADDR10;
108 case BFD_RELOC_SPU_IMM16W:
109 return R_SPU_ADDR16;
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
115 return R_SPU_ADDR18;
116 case BFD_RELOC_SPU_PCREL16:
117 return R_SPU_REL16;
118 case BFD_RELOC_SPU_IMM7:
119 return R_SPU_ADDR7;
120 case BFD_RELOC_SPU_IMM8:
121 return R_SPU_NONE;
122 case BFD_RELOC_SPU_PCREL9a:
123 return R_SPU_REL9;
124 case BFD_RELOC_SPU_PCREL9b:
125 return R_SPU_REL9I;
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
130 case BFD_RELOC_32:
131 return R_SPU_ADDR32;
132 case BFD_RELOC_32_PCREL:
133 return R_SPU_REL32;
ece5ef60
AM
134 case BFD_RELOC_SPU_PPU32:
135 return R_SPU_PPU32;
136 case BFD_RELOC_SPU_PPU64:
137 return R_SPU_PPU64;
e9f53129
AM
138 }
139}
140
141static void
142spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143 arelent *cache_ptr,
144 Elf_Internal_Rela *dst)
145{
146 enum elf_spu_reloc_type r_type;
147
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
151}
152
153static reloc_howto_type *
154spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
156{
b16f296e
AM
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
158
159 if (r_type == R_SPU_NONE)
160 return NULL;
161
162 return elf_howto_table + r_type;
e9f53129
AM
163}
164
157090f7
AM
165static reloc_howto_type *
166spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 const char *r_name)
168{
169 unsigned int i;
170
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
175
176 return NULL;
177}
178
e9f53129
AM
179/* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
180
181static bfd_reloc_status_type
182spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
185{
186 bfd_size_type octets;
187 bfd_vma val;
188 long insn;
189
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
192 link time. */
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
196
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
200
201 /* Get symbol value. */
202 val = 0;
203 if (!bfd_is_com_section (symbol->section))
204 val = symbol->value;
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
207
208 val += reloc_entry->addend;
209
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
212
213 val >>= 2;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
216
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
218
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225 return bfd_reloc_ok;
226}
227
228static bfd_boolean
229spu_elf_new_section_hook (bfd *abfd, asection *sec)
230{
231 if (!sec->used_by_bfd)
232 {
233 struct _spu_elf_section_data *sdata;
234
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
236 if (sdata == NULL)
237 return FALSE;
238 sec->used_by_bfd = sdata;
239 }
240
241 return _bfd_elf_new_section_hook (abfd, sec);
242}
243
124b52c6
AM
244/* Set up overlay info for executables. */
245
246static bfd_boolean
247spu_elf_object_p (bfd *abfd)
248{
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
250 {
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
255
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
258 {
259 unsigned int j;
260
261 ++num_ovl;
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264 ++num_buf;
265 last_phdr = phdr;
266 for (j = 1; j < elf_numsections (abfd); j++)
267 {
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
269
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
271 {
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
275 }
276 }
277 }
278 }
279 return TRUE;
280}
281
e9f53129
AM
282/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
284
285static void
286spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
287{
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
292}
293
294/* SPU ELF linker hash table. */
295
296struct spu_link_hash_table
297{
298 struct elf_link_hash_table elf;
299
e9f53129 300 /* Shortcuts to overlay sections. */
e9f53129 301 asection *ovtab;
47f6dab9
AM
302 asection *toe;
303 asection **ovl_sec;
304
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count;
307
308 /* The stub section for each overlay section. */
309 asection **stub_sec;
e9f53129
AM
310
311 struct elf_link_hash_entry *ovly_load;
47f6dab9 312 struct elf_link_hash_entry *ovly_return;
2cb5950e 313 unsigned long ovly_load_r_symndx;
e9f53129 314
e9f53129
AM
315 /* Number of overlay buffers. */
316 unsigned int num_buf;
317
318 /* Total number of overlays. */
319 unsigned int num_overlays;
320
9dcc4794
AM
321 /* How much memory we have. */
322 unsigned int local_store;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved;
99302af9
AM
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space;
9dcc4794
AM
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub;
335
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr) (void);
338 FILE *(*spu_elf_open_overlay_script) (void);
339 void (*spu_elf_relink) (void);
340
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay : 3;
345#define AUTO_OVERLAY 1
346#define AUTO_RELINK 2
347#define OVERLAY_RODATA 4
348
e9f53129
AM
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms:1;
351
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs : 1;
355
356 /* Set on error. */
47f6dab9 357 unsigned int stub_err : 1;
49fa1e15
AM
358
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis : 1;
361
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms : 1;
e9f53129
AM
364};
365
47f6dab9 366/* Hijack the generic got fields for overlay stub accounting. */
e9f53129 367
47f6dab9 368struct got_entry
e9f53129 369{
47f6dab9
AM
370 struct got_entry *next;
371 unsigned int ovl;
4a628337 372 bfd_vma addend;
47f6dab9 373 bfd_vma stub_addr;
e9f53129
AM
374};
375
47f6dab9
AM
376#define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
e9f53129
AM
378
379/* Create a spu ELF linker hash table. */
380
381static struct bfd_link_hash_table *
382spu_elf_link_hash_table_create (bfd *abfd)
383{
384 struct spu_link_hash_table *htab;
385
386 htab = bfd_malloc (sizeof (*htab));
387 if (htab == NULL)
388 return NULL;
389
390 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
391 _bfd_elf_link_hash_newfunc,
392 sizeof (struct elf_link_hash_entry)))
393 {
394 free (htab);
395 return NULL;
396 }
397
47f6dab9
AM
398 memset (&htab->ovtab, 0,
399 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
e9f53129 400
47f6dab9
AM
401 htab->elf.init_got_refcount.refcount = 0;
402 htab->elf.init_got_refcount.glist = NULL;
403 htab->elf.init_got_offset.offset = 0;
404 htab->elf.init_got_offset.glist = NULL;
e9f53129
AM
405 return &htab->elf.root;
406}
407
e9f53129
AM
408/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
411
412static bfd_boolean
413get_sym_h (struct elf_link_hash_entry **hp,
414 Elf_Internal_Sym **symp,
415 asection **symsecp,
416 Elf_Internal_Sym **locsymsp,
417 unsigned long r_symndx,
418 bfd *ibfd)
419{
420 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
421
422 if (r_symndx >= symtab_hdr->sh_info)
423 {
424 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
425 struct elf_link_hash_entry *h;
426
427 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
428 while (h->root.type == bfd_link_hash_indirect
429 || h->root.type == bfd_link_hash_warning)
430 h = (struct elf_link_hash_entry *) h->root.u.i.link;
431
432 if (hp != NULL)
433 *hp = h;
434
435 if (symp != NULL)
436 *symp = NULL;
437
438 if (symsecp != NULL)
439 {
440 asection *symsec = NULL;
441 if (h->root.type == bfd_link_hash_defined
442 || h->root.type == bfd_link_hash_defweak)
443 symsec = h->root.u.def.section;
444 *symsecp = symsec;
445 }
446 }
447 else
448 {
449 Elf_Internal_Sym *sym;
450 Elf_Internal_Sym *locsyms = *locsymsp;
451
452 if (locsyms == NULL)
453 {
454 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
455 if (locsyms == NULL)
1f27ab8d
AM
456 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
457 symtab_hdr->sh_info,
458 0, NULL, NULL, NULL);
e9f53129
AM
459 if (locsyms == NULL)
460 return FALSE;
461 *locsymsp = locsyms;
462 }
463 sym = locsyms + r_symndx;
464
465 if (hp != NULL)
466 *hp = NULL;
467
468 if (symp != NULL)
469 *symp = sym;
470
471 if (symsecp != NULL)
cb33740c 472 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
e9f53129 473 }
49fa1e15 474
e9f53129
AM
475 return TRUE;
476}
477
e9f53129
AM
478/* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
480
481bfd_boolean
c65be8d7 482spu_elf_create_sections (struct bfd_link_info *info,
49fa1e15
AM
483 int stack_analysis,
484 int emit_stack_syms)
e9f53129
AM
485{
486 bfd *ibfd;
49fa1e15
AM
487 struct spu_link_hash_table *htab = spu_hash_table (info);
488
489 /* Stash some options away where we can get at them later. */
490 htab->stack_analysis = stack_analysis;
491 htab->emit_stack_syms = emit_stack_syms;
e9f53129 492
58eb693e 493 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
e9f53129
AM
494 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
495 break;
496
497 if (ibfd == NULL)
498 {
499 /* Make SPU_PTNOTE_SPUNAME section. */
500 asection *s;
501 size_t name_len;
502 size_t size;
503 bfd_byte *data;
504 flagword flags;
505
506 ibfd = info->input_bfds;
507 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
508 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
509 if (s == NULL
510 || !bfd_set_section_alignment (ibfd, s, 4))
511 return FALSE;
512
c65be8d7 513 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
e9f53129
AM
514 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
515 size += (name_len + 3) & -4;
516
517 if (!bfd_set_section_size (ibfd, s, size))
518 return FALSE;
519
520 data = bfd_zalloc (ibfd, size);
521 if (data == NULL)
522 return FALSE;
523
524 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
525 bfd_put_32 (ibfd, name_len, data + 4);
526 bfd_put_32 (ibfd, 1, data + 8);
527 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
528 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
c65be8d7 529 bfd_get_filename (info->output_bfd), name_len);
e9f53129
AM
530 s->contents = data;
531 }
532
533 return TRUE;
534}
535
e9f53129
AM
536/* qsort predicate to sort sections by vma. */
537
538static int
539sort_sections (const void *a, const void *b)
540{
541 const asection *const *s1 = a;
542 const asection *const *s2 = b;
543 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
544
545 if (delta != 0)
546 return delta < 0 ? -1 : 1;
547
548 return (*s1)->index - (*s2)->index;
549}
550
551/* Identify overlays in the output bfd, and number them. */
552
553bfd_boolean
c65be8d7 554spu_elf_find_overlays (struct bfd_link_info *info)
e9f53129
AM
555{
556 struct spu_link_hash_table *htab = spu_hash_table (info);
557 asection **alloc_sec;
558 unsigned int i, n, ovl_index, num_buf;
559 asection *s;
560 bfd_vma ovl_end;
561
c65be8d7 562 if (info->output_bfd->section_count < 2)
e9f53129
AM
563 return FALSE;
564
c65be8d7
AM
565 alloc_sec
566 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
e9f53129
AM
567 if (alloc_sec == NULL)
568 return FALSE;
569
570 /* Pick out all the alloced sections. */
c65be8d7 571 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
e9f53129
AM
572 if ((s->flags & SEC_ALLOC) != 0
573 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
574 && s->size != 0)
575 alloc_sec[n++] = s;
576
577 if (n == 0)
578 {
579 free (alloc_sec);
580 return FALSE;
581 }
582
583 /* Sort them by vma. */
584 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
585
586 /* Look for overlapping vmas. Any with overlap must be overlays.
47f6dab9 587 Count them. Also count the number of overlay regions. */
e9f53129
AM
588 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
589 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
590 {
591 s = alloc_sec[i];
592 if (s->vma < ovl_end)
593 {
594 asection *s0 = alloc_sec[i - 1];
595
47f6dab9 596 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
e9f53129 597 {
47f6dab9
AM
598 alloc_sec[ovl_index] = s0;
599 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
600 spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
e9f53129 601 }
47f6dab9
AM
602 alloc_sec[ovl_index] = s;
603 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
604 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
605 if (s0->vma != s->vma)
e9f53129 606 {
47f6dab9
AM
607 info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
609 s0, s);
610 return FALSE;
e9f53129 611 }
47f6dab9
AM
612 if (ovl_end < s->vma + s->size)
613 ovl_end = s->vma + s->size;
e9f53129
AM
614 }
615 else
616 ovl_end = s->vma + s->size;
617 }
618
619 htab->num_overlays = ovl_index;
620 htab->num_buf = num_buf;
47f6dab9 621 htab->ovl_sec = alloc_sec;
fdba2fcd
AM
622 htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
623 FALSE, FALSE, FALSE);
624 htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
625 FALSE, FALSE, FALSE);
47f6dab9 626 return ovl_index != 0;
e9f53129
AM
627}
628
47f6dab9
AM
629/* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631#ifndef OVL_STUB_SIZE
632/* Default to faster. */
633#define OVL_STUB_SIZE 16
634/* #define OVL_STUB_SIZE 8 */
635#endif
636#define BRSL 0x33000000
637#define BR 0x32000000
e9f53129 638#define NOP 0x40200000
47f6dab9
AM
639#define LNOP 0x00200000
640#define ILA 0x42000000
e9f53129 641
49fa1e15 642/* Return true for all relative and absolute branch instructions.
e9f53129
AM
643 bra 00110000 0..
644 brasl 00110001 0..
645 br 00110010 0..
646 brsl 00110011 0..
647 brz 00100000 0..
648 brnz 00100001 0..
649 brhz 00100010 0..
49fa1e15
AM
650 brhnz 00100011 0.. */
651
652static bfd_boolean
653is_branch (const unsigned char *insn)
654{
655 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
656}
657
fad9eaf0
AM
658/* Return true for all indirect branch instructions.
659 bi 00110101 000
660 bisl 00110101 001
661 iret 00110101 010
662 bisled 00110101 011
663 biz 00100101 000
664 binz 00100101 001
665 bihz 00100101 010
666 bihnz 00100101 011 */
667
668static bfd_boolean
669is_indirect_branch (const unsigned char *insn)
670{
671 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
672}
673
49fa1e15 674/* Return true for branch hint instructions.
e9f53129
AM
675 hbra 0001000..
676 hbrr 0001001.. */
677
678static bfd_boolean
49fa1e15 679is_hint (const unsigned char *insn)
e9f53129 680{
49fa1e15 681 return (insn[0] & 0xfc) == 0x10;
e9f53129
AM
682}
683
fdba2fcd 684/* True if INPUT_SECTION might need overlay stubs. */
aa7a0635
AM
685
686static bfd_boolean
fdba2fcd
AM
687maybe_needs_stubs (asection *input_section, bfd *output_bfd)
688{
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section->flags & SEC_ALLOC) == 0)
691 return FALSE;
692
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section->output_section == NULL
695 || input_section->output_section->owner != output_bfd)
696 return FALSE;
697
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section->name, ".eh_frame") == 0)
700 return FALSE;
701
702 return TRUE;
703}
704
705enum _stub_type
706{
707 no_stub,
708 ovl_stub,
709 nonovl_stub,
710 stub_error
711};
712
713/* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
715
716static enum _stub_type
717needs_ovl_stub (struct elf_link_hash_entry *h,
718 Elf_Internal_Sym *sym,
aa7a0635
AM
719 asection *sym_sec,
720 asection *input_section,
fdba2fcd
AM
721 Elf_Internal_Rela *irela,
722 bfd_byte *contents,
723 struct bfd_link_info *info)
aa7a0635 724{
fdba2fcd
AM
725 struct spu_link_hash_table *htab = spu_hash_table (info);
726 enum elf_spu_reloc_type r_type;
727 unsigned int sym_type;
728 bfd_boolean branch;
729 enum _stub_type ret = no_stub;
aa7a0635
AM
730
731 if (sym_sec == NULL
2c67c5f3 732 || sym_sec->output_section == NULL
fdba2fcd 733 || sym_sec->output_section->owner != info->output_bfd
2c67c5f3 734 || spu_elf_section_data (sym_sec->output_section) == NULL)
fdba2fcd 735 return ret;
aa7a0635 736
fdba2fcd
AM
737 if (h != NULL)
738 {
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h == htab->ovly_load || h == htab->ovly_return)
741 return ret;
742
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h->root.root.string, "setjmp", 6) == 0
747 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
748 ret = ovl_stub;
749 }
aa7a0635
AM
750
751 /* Usually, symbols in non-overlay sections don't need stubs. */
47f6dab9 752 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
aa7a0635 753 && !htab->non_overlay_stubs)
fdba2fcd
AM
754 return ret;
755
756 if (h != NULL)
757 sym_type = h->type;
758 else
759 sym_type = ELF_ST_TYPE (sym->st_info);
760
761 r_type = ELF32_R_TYPE (irela->r_info);
762 branch = FALSE;
763 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
764 {
765 bfd_byte insn[4];
766
767 if (contents == NULL)
768 {
769 contents = insn;
770 if (!bfd_get_section_contents (input_section->owner,
771 input_section,
772 contents,
773 irela->r_offset, 4))
774 return stub_error;
775 }
776 else
777 contents += irela->r_offset;
778
779 if (is_branch (contents) || is_hint (contents))
780 {
781 branch = TRUE;
782 if ((contents[0] & 0xfd) == 0x31
783 && sym_type != STT_FUNC
9dcc4794 784 && contents != insn)
fdba2fcd
AM
785 {
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name;
793
794 if (h != NULL)
795 sym_name = h->root.root.string;
796 else
797 {
798 Elf_Internal_Shdr *symtab_hdr;
799 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
800 sym_name = bfd_elf_sym_name (input_section->owner,
801 symtab_hdr,
802 sym,
803 sym_sec);
804 }
805 (*_bfd_error_handler) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec->owner, sym_name);
808
809 }
810 }
811 }
812
813 if (sym_type != STT_FUNC
814 && !branch
815 && (sym_sec->flags & SEC_CODE) == 0)
816 return ret;
aa7a0635
AM
817
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
47f6dab9
AM
820 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
821 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
fdba2fcd 822 return ovl_stub;
aa7a0635
AM
823
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
fdba2fcd 826 return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
aa7a0635
AM
827}
828
47f6dab9
AM
829static bfd_boolean
830count_stub (struct spu_link_hash_table *htab,
831 bfd *ibfd,
832 asection *isec,
fdba2fcd 833 enum _stub_type stub_type,
47f6dab9
AM
834 struct elf_link_hash_entry *h,
835 const Elf_Internal_Rela *irela)
836{
837 unsigned int ovl = 0;
838 struct got_entry *g, **head;
4a628337 839 bfd_vma addend;
47f6dab9
AM
840
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
fdba2fcd 846 if (stub_type != nonovl_stub)
47f6dab9
AM
847 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
848
849 if (h != NULL)
850 head = &h->got.glist;
851 else
852 {
853 if (elf_local_got_ents (ibfd) == NULL)
854 {
855 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
856 * sizeof (*elf_local_got_ents (ibfd)));
857 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
858 if (elf_local_got_ents (ibfd) == NULL)
859 return FALSE;
860 }
861 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
862 }
863
4a628337
AM
864 addend = 0;
865 if (irela != NULL)
866 addend = irela->r_addend;
47f6dab9
AM
867
868 if (ovl == 0)
869 {
870 struct got_entry *gnext;
871
4a628337
AM
872 for (g = *head; g != NULL; g = g->next)
873 if (g->addend == addend && g->ovl == 0)
874 break;
875
876 if (g == NULL)
47f6dab9 877 {
4a628337
AM
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g = *head; g != NULL; g = gnext)
880 {
881 gnext = g->next;
882 if (g->addend == addend)
883 {
884 htab->stub_count[g->ovl] -= 1;
885 free (g);
886 }
887 }
47f6dab9
AM
888 }
889 }
890 else
891 {
4a628337
AM
892 for (g = *head; g != NULL; g = g->next)
893 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
47f6dab9
AM
894 break;
895 }
896
897 if (g == NULL)
898 {
899 g = bfd_malloc (sizeof *g);
900 if (g == NULL)
901 return FALSE;
902 g->ovl = ovl;
4a628337 903 g->addend = addend;
47f6dab9
AM
904 g->stub_addr = (bfd_vma) -1;
905 g->next = *head;
906 *head = g;
907
908 htab->stub_count[ovl] += 1;
909 }
910
911 return TRUE;
912}
913
914/* Two instruction overlay stubs look like:
915
916 brsl $75,__ovly_load
917 .word target_ovl_and_address
918
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
921
922 Four instruction overlay stubs look like:
923
924 ila $78,ovl_number
925 lnop
926 ila $79,target_address
927 br __ovly_load */
928
929static bfd_boolean
930build_stub (struct spu_link_hash_table *htab,
931 bfd *ibfd,
932 asection *isec,
fdba2fcd 933 enum _stub_type stub_type,
47f6dab9
AM
934 struct elf_link_hash_entry *h,
935 const Elf_Internal_Rela *irela,
936 bfd_vma dest,
937 asection *dest_sec)
938{
939 unsigned int ovl;
940 struct got_entry *g, **head;
941 asection *sec;
4a628337 942 bfd_vma addend, val, from, to;
47f6dab9
AM
943
944 ovl = 0;
fdba2fcd 945 if (stub_type != nonovl_stub)
47f6dab9
AM
946 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
947
948 if (h != NULL)
949 head = &h->got.glist;
950 else
951 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
952
4a628337
AM
953 addend = 0;
954 if (irela != NULL)
955 addend = irela->r_addend;
47f6dab9 956
4a628337
AM
957 for (g = *head; g != NULL; g = g->next)
958 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
47f6dab9
AM
959 break;
960 if (g == NULL)
961 abort ();
962
4a628337
AM
963 if (g->ovl == 0 && ovl != 0)
964 return TRUE;
965
47f6dab9
AM
966 if (g->stub_addr != (bfd_vma) -1)
967 return TRUE;
968
969 sec = htab->stub_sec[ovl];
970 dest += dest_sec->output_offset + dest_sec->output_section->vma;
971 from = sec->size + sec->output_offset + sec->output_section->vma;
972 g->stub_addr = from;
973 to = (htab->ovly_load->root.u.def.value
974 + htab->ovly_load->root.u.def.section->output_offset
975 + htab->ovly_load->root.u.def.section->output_section->vma);
976 val = to - from;
977 if (OVL_STUB_SIZE == 16)
978 val -= 12;
979 if (((dest | to | from) & 3) != 0
980 || val + 0x20000 >= 0x40000)
981 {
982 htab->stub_err = 1;
983 return FALSE;
984 }
985 ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
986
987 if (OVL_STUB_SIZE == 16)
988 {
989 bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
990 sec->contents + sec->size);
991 bfd_put_32 (sec->owner, LNOP,
992 sec->contents + sec->size + 4);
993 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
994 sec->contents + sec->size + 8);
995 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
996 sec->contents + sec->size + 12);
997 }
998 else if (OVL_STUB_SIZE == 8)
999 {
1000 bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
1001 sec->contents + sec->size);
1002
124b52c6 1003 val = (dest & 0x3ffff) | (ovl << 18);
47f6dab9
AM
1004 bfd_put_32 (sec->owner, val,
1005 sec->contents + sec->size + 4);
1006 }
1007 else
1008 abort ();
1009 sec->size += OVL_STUB_SIZE;
1010
1011 if (htab->emit_stub_syms)
1012 {
1013 size_t len;
1014 char *name;
1015 int add;
1016
1017 len = 8 + sizeof (".ovl_call.") - 1;
1018 if (h != NULL)
1019 len += strlen (h->root.root.string);
1020 else
1021 len += 8 + 1 + 8;
1022 add = 0;
1023 if (irela != NULL)
1024 add = (int) irela->r_addend & 0xffffffff;
1025 if (add != 0)
1026 len += 1 + 8;
1027 name = bfd_malloc (len);
1028 if (name == NULL)
1029 return FALSE;
1030
1031 sprintf (name, "%08x.ovl_call.", g->ovl);
1032 if (h != NULL)
1033 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1034 else
1035 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec->id & 0xffffffff,
1037 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1038 if (add != 0)
1039 sprintf (name + len - 9, "+%x", add);
1040
1041 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1042 free (name);
1043 if (h == NULL)
1044 return FALSE;
1045 if (h->root.type == bfd_link_hash_new)
1046 {
1047 h->root.type = bfd_link_hash_defined;
1048 h->root.u.def.section = sec;
1049 h->root.u.def.value = sec->size - OVL_STUB_SIZE;
1050 h->size = OVL_STUB_SIZE;
1051 h->type = STT_FUNC;
1052 h->ref_regular = 1;
1053 h->def_regular = 1;
1054 h->ref_regular_nonweak = 1;
1055 h->forced_local = 1;
1056 h->non_elf = 0;
1057 }
1058 }
1059
1060 return TRUE;
1061}
1062
f4b39977
AM
1063/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1064 symbols. */
1065
1066static bfd_boolean
1067allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1068{
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
380814a6
AM
1071 struct bfd_link_info *info = inf;
1072 struct spu_link_hash_table *htab = spu_hash_table (info);
1073 asection *sym_sec;
1074
f4b39977
AM
1075 if ((h->root.type == bfd_link_hash_defined
1076 || h->root.type == bfd_link_hash_defweak)
1077 && h->def_regular
380814a6
AM
1078 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1079 && (sym_sec = h->root.u.def.section) != NULL
1080 && sym_sec->output_section != NULL
1081 && sym_sec->output_section->owner == info->output_bfd
1082 && spu_elf_section_data (sym_sec->output_section) != NULL
1083 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1084 || htab->non_overlay_stubs))
f4b39977 1085 {
fdba2fcd 1086 count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
f4b39977
AM
1087 }
1088
1089 return TRUE;
1090}
1091
e9f53129 1092static bfd_boolean
47f6dab9 1093build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
e9f53129 1094{
47f6dab9
AM
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
380814a6
AM
1097 struct bfd_link_info *info = inf;
1098 struct spu_link_hash_table *htab = spu_hash_table (info);
1099 asection *sym_sec;
1100
47f6dab9
AM
1101 if ((h->root.type == bfd_link_hash_defined
1102 || h->root.type == bfd_link_hash_defweak)
1103 && h->def_regular
380814a6
AM
1104 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1105 && (sym_sec = h->root.u.def.section) != NULL
1106 && sym_sec->output_section != NULL
1107 && sym_sec->output_section->owner == info->output_bfd
1108 && spu_elf_section_data (sym_sec->output_section) != NULL
1109 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1110 || htab->non_overlay_stubs))
47f6dab9 1111 {
fdba2fcd 1112 build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
380814a6 1113 h->root.u.def.value, sym_sec);
47f6dab9
AM
1114 }
1115
e9f53129
AM
1116 return TRUE;
1117}
1118
47f6dab9 1119/* Size or build stubs. */
e9f53129 1120
47f6dab9 1121static bfd_boolean
c65be8d7 1122process_stubs (struct bfd_link_info *info, bfd_boolean build)
e9f53129
AM
1123{
1124 struct spu_link_hash_table *htab = spu_hash_table (info);
1125 bfd *ibfd;
e9f53129 1126
e9f53129
AM
1127 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1128 {
1129 extern const bfd_target bfd_elf32_spu_vec;
1130 Elf_Internal_Shdr *symtab_hdr;
47f6dab9 1131 asection *isec;
e9f53129
AM
1132 Elf_Internal_Sym *local_syms = NULL;
1133
1134 if (ibfd->xvec != &bfd_elf32_spu_vec)
1135 continue;
1136
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1139 if (symtab_hdr->sh_info == 0)
1140 continue;
1141
1142 /* Walk over each section attached to the input bfd. */
47f6dab9 1143 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
e9f53129
AM
1144 {
1145 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1146
1147 /* If there aren't any relocs, then there's nothing more to do. */
47f6dab9 1148 if ((isec->flags & SEC_RELOC) == 0
47f6dab9 1149 || isec->reloc_count == 0)
e9f53129
AM
1150 continue;
1151
c65be8d7 1152 if (!maybe_needs_stubs (isec, info->output_bfd))
e9f53129
AM
1153 continue;
1154
1155 /* Get the relocs. */
47f6dab9
AM
1156 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1157 info->keep_memory);
e9f53129
AM
1158 if (internal_relocs == NULL)
1159 goto error_ret_free_local;
1160
1161 /* Now examine each relocation. */
1162 irela = internal_relocs;
47f6dab9 1163 irelaend = irela + isec->reloc_count;
e9f53129
AM
1164 for (; irela < irelaend; irela++)
1165 {
1166 enum elf_spu_reloc_type r_type;
1167 unsigned int r_indx;
1168 asection *sym_sec;
1169 Elf_Internal_Sym *sym;
1170 struct elf_link_hash_entry *h;
fdba2fcd 1171 enum _stub_type stub_type;
e9f53129
AM
1172
1173 r_type = ELF32_R_TYPE (irela->r_info);
1174 r_indx = ELF32_R_SYM (irela->r_info);
1175
1176 if (r_type >= R_SPU_max)
1177 {
1178 bfd_set_error (bfd_error_bad_value);
47f6dab9
AM
1179 error_ret_free_internal:
1180 if (elf_section_data (isec)->relocs != internal_relocs)
1181 free (internal_relocs);
1182 error_ret_free_local:
1183 if (local_syms != NULL
1184 && (symtab_hdr->contents
1185 != (unsigned char *) local_syms))
1186 free (local_syms);
1187 return FALSE;
e9f53129
AM
1188 }
1189
1190 /* Determine the reloc target section. */
1f27ab8d 1191 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
e9f53129
AM
1192 goto error_ret_free_internal;
1193
fdba2fcd
AM
1194 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1195 NULL, info);
1196 if (stub_type == no_stub)
e9f53129 1197 continue;
fdba2fcd
AM
1198 else if (stub_type == stub_error)
1199 goto error_ret_free_internal;
e9f53129 1200
47f6dab9 1201 if (htab->stub_count == NULL)
e9f53129 1202 {
47f6dab9
AM
1203 bfd_size_type amt;
1204 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1205 htab->stub_count = bfd_zmalloc (amt);
1206 if (htab->stub_count == NULL)
1207 goto error_ret_free_internal;
e9f53129
AM
1208 }
1209
47f6dab9 1210 if (!build)
e9f53129 1211 {
fdba2fcd 1212 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
47f6dab9 1213 goto error_ret_free_internal;
e9f53129 1214 }
e9f53129 1215 else
47f6dab9
AM
1216 {
1217 bfd_vma dest;
1218
1219 if (h != NULL)
1220 dest = h->root.u.def.value;
1221 else
1222 dest = sym->st_value;
4a628337 1223 dest += irela->r_addend;
fdba2fcd 1224 if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
47f6dab9
AM
1225 dest, sym_sec))
1226 goto error_ret_free_internal;
1227 }
e9f53129
AM
1228 }
1229
1230 /* We're done with the internal relocs, free them. */
47f6dab9 1231 if (elf_section_data (isec)->relocs != internal_relocs)
e9f53129
AM
1232 free (internal_relocs);
1233 }
1234
1235 if (local_syms != NULL
1236 && symtab_hdr->contents != (unsigned char *) local_syms)
1237 {
1238 if (!info->keep_memory)
1239 free (local_syms);
1240 else
1241 symtab_hdr->contents = (unsigned char *) local_syms;
1242 }
1243 }
1244
47f6dab9
AM
1245 return TRUE;
1246}
1247
1248/* Allocate space for overlay call and return stubs. */
1249
1250int
c65be8d7 1251spu_elf_size_stubs (struct bfd_link_info *info,
47f6dab9
AM
1252 void (*place_spu_section) (asection *, asection *,
1253 const char *),
1254 int non_overlay_stubs)
1255{
1256 struct spu_link_hash_table *htab = spu_hash_table (info);
1257 bfd *ibfd;
1258 bfd_size_type amt;
1259 flagword flags;
1260 unsigned int i;
1261 asection *stub;
1262
1263 htab->non_overlay_stubs = non_overlay_stubs;
c65be8d7 1264 if (!process_stubs (info, FALSE))
47f6dab9
AM
1265 return 0;
1266
380814a6 1267 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
47f6dab9
AM
1268 if (htab->stub_err)
1269 return 0;
f4b39977 1270
47f6dab9
AM
1271 if (htab->stub_count == NULL)
1272 return 1;
e9f53129
AM
1273
1274 ibfd = info->input_bfds;
47f6dab9
AM
1275 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1276 htab->stub_sec = bfd_zmalloc (amt);
1277 if (htab->stub_sec == NULL)
1278 return 0;
e9f53129 1279
47f6dab9 1280 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
e9f53129 1281 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
47f6dab9
AM
1282 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1283 htab->stub_sec[0] = stub;
1284 if (stub == NULL
1285 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1286 return 0;
1287 stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1288 (*place_spu_section) (stub, NULL, ".text");
e9f53129 1289
47f6dab9 1290 for (i = 0; i < htab->num_overlays; ++i)
e9f53129 1291 {
47f6dab9
AM
1292 asection *osec = htab->ovl_sec[i];
1293 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1294 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1295 htab->stub_sec[ovl] = stub;
1296 if (stub == NULL
1297 || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1298 return 0;
1299 stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1300 (*place_spu_section) (stub, osec, NULL);
e9f53129 1301 }
e9f53129
AM
1302
1303 /* htab->ovtab consists of two arrays.
1304 . struct {
1305 . u32 vma;
1306 . u32 size;
1307 . u32 file_off;
1308 . u32 buf;
1309 . } _ovly_table[];
1310 .
1311 . struct {
1312 . u32 mapped;
47f6dab9
AM
1313 . } _ovly_buf_table[];
1314 . */
e9f53129 1315
47f6dab9
AM
1316 flags = (SEC_ALLOC | SEC_LOAD
1317 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1318 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1319 if (htab->ovtab == NULL
1320 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1321 return 0;
e9f53129 1322
2e444bea 1323 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
47f6dab9
AM
1324 (*place_spu_section) (htab->ovtab, NULL, ".data");
1325
1326 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1327 if (htab->toe == NULL
1328 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1329 return 0;
1330 htab->toe->size = 16;
1331 (*place_spu_section) (htab->toe, NULL, ".toe");
1332
1333 return 2;
e9f53129
AM
1334}
1335
1336/* Functions to handle embedded spu_ovl.o object. */
1337
1338static void *
1339ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1340{
1341 return stream;
1342}
1343
1344static file_ptr
1345ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1346 void *stream,
1347 void *buf,
1348 file_ptr nbytes,
1349 file_ptr offset)
1350{
1351 struct _ovl_stream *os;
1352 size_t count;
1353 size_t max;
1354
1355 os = (struct _ovl_stream *) stream;
7a8757b3 1356 max = (const char *) os->end - (const char *) os->start;
e9f53129
AM
1357
1358 if ((ufile_ptr) offset >= max)
1359 return 0;
1360
1361 count = nbytes;
1362 if (count > max - offset)
1363 count = max - offset;
1364
7a8757b3 1365 memcpy (buf, (const char *) os->start + offset, count);
e9f53129
AM
1366 return count;
1367}
1368
1369bfd_boolean
1370spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1371{
1372 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1373 "elf32-spu",
1374 ovl_mgr_open,
1375 (void *) stream,
1376 ovl_mgr_pread,
f6cf9273 1377 NULL,
e9f53129
AM
1378 NULL);
1379 return *ovl_bfd != NULL;
1380}
1381
e9f53129
AM
1382/* Define an STT_OBJECT symbol. */
1383
1384static struct elf_link_hash_entry *
1385define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1386{
1387 struct elf_link_hash_entry *h;
1388
1389 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1390 if (h == NULL)
1391 return NULL;
1392
1393 if (h->root.type != bfd_link_hash_defined
1394 || !h->def_regular)
1395 {
1396 h->root.type = bfd_link_hash_defined;
1397 h->root.u.def.section = htab->ovtab;
1398 h->type = STT_OBJECT;
1399 h->ref_regular = 1;
1400 h->def_regular = 1;
1401 h->ref_regular_nonweak = 1;
1402 h->non_elf = 0;
1403 }
1404 else
1405 {
1406 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1407 h->root.u.def.section->owner,
1408 h->root.root.string);
1409 bfd_set_error (bfd_error_bad_value);
1410 return NULL;
1411 }
1412
1413 return h;
1414}
1415
1416/* Fill in all stubs and the overlay tables. */
1417
1418bfd_boolean
47f6dab9 1419spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
e9f53129
AM
1420{
1421 struct spu_link_hash_table *htab = spu_hash_table (info);
1422 struct elf_link_hash_entry *h;
1423 bfd_byte *p;
1424 asection *s;
1425 bfd *obfd;
1426 unsigned int i;
1427
1428 htab->emit_stub_syms = emit_syms;
47f6dab9
AM
1429 if (htab->stub_count == NULL)
1430 return TRUE;
1431
1432 for (i = 0; i <= htab->num_overlays; i++)
1433 if (htab->stub_sec[i]->size != 0)
1434 {
1435 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1436 htab->stub_sec[i]->size);
1437 if (htab->stub_sec[i]->contents == NULL)
1438 return FALSE;
1439 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1440 htab->stub_sec[i]->size = 0;
1441 }
e9f53129
AM
1442
1443 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1444 htab->ovly_load = h;
1445 BFD_ASSERT (h != NULL
1446 && (h->root.type == bfd_link_hash_defined
1447 || h->root.type == bfd_link_hash_defweak)
1448 && h->def_regular);
1449
1450 s = h->root.u.def.section->output_section;
47f6dab9 1451 if (spu_elf_section_data (s)->u.o.ovl_index)
e9f53129
AM
1452 {
1453 (*_bfd_error_handler) (_("%s in overlay section"),
2ec9638b 1454 h->root.root.string);
e9f53129
AM
1455 bfd_set_error (bfd_error_bad_value);
1456 return FALSE;
1457 }
1458
47f6dab9
AM
1459 h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1460 htab->ovly_return = h;
1461
c65be8d7
AM
1462 /* Fill in all the stubs. */
1463 process_stubs (info, TRUE);
47f6dab9 1464
380814a6 1465 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
47f6dab9
AM
1466 if (htab->stub_err)
1467 return FALSE;
e9f53129 1468
47f6dab9
AM
1469 for (i = 0; i <= htab->num_overlays; i++)
1470 {
1471 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1472 {
1473 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1474 bfd_set_error (bfd_error_bad_value);
1475 return FALSE;
1476 }
1477 htab->stub_sec[i]->rawsize = 0;
1478 }
1479
1480 if (htab->stub_err)
e9f53129
AM
1481 {
1482 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1483 bfd_set_error (bfd_error_bad_value);
1484 return FALSE;
1485 }
1486
1487 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1488 if (htab->ovtab->contents == NULL)
1489 return FALSE;
1490
1491 /* Write out _ovly_table. */
1492 p = htab->ovtab->contents;
2e444bea
AM
1493 /* set low bit of .size to mark non-overlay area as present. */
1494 p[7] = 1;
c65be8d7 1495 obfd = htab->ovtab->output_section->owner;
e9f53129
AM
1496 for (s = obfd->sections; s != NULL; s = s->next)
1497 {
47f6dab9 1498 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
e9f53129
AM
1499
1500 if (ovl_index != 0)
1501 {
47f6dab9
AM
1502 unsigned long off = ovl_index * 16;
1503 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1504
e9f53129
AM
1505 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1506 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1507 /* file_off written later in spu_elf_modify_program_headers. */
2e444bea 1508 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
e9f53129
AM
1509 }
1510 }
1511
e9f53129
AM
1512 h = define_ovtab_symbol (htab, "_ovly_table");
1513 if (h == NULL)
1514 return FALSE;
47f6dab9 1515 h->root.u.def.value = 16;
e9f53129
AM
1516 h->size = htab->num_overlays * 16;
1517
1518 h = define_ovtab_symbol (htab, "_ovly_table_end");
1519 if (h == NULL)
1520 return FALSE;
47f6dab9 1521 h->root.u.def.value = htab->num_overlays * 16 + 16;
e9f53129
AM
1522 h->size = 0;
1523
1524 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1525 if (h == NULL)
1526 return FALSE;
47f6dab9 1527 h->root.u.def.value = htab->num_overlays * 16 + 16;
2e444bea 1528 h->size = htab->num_buf * 4;
e9f53129
AM
1529
1530 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1531 if (h == NULL)
1532 return FALSE;
2e444bea 1533 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
e9f53129
AM
1534 h->size = 0;
1535
1536 h = define_ovtab_symbol (htab, "_EAR_");
1537 if (h == NULL)
1538 return FALSE;
47f6dab9 1539 h->root.u.def.section = htab->toe;
e9f53129
AM
1540 h->root.u.def.value = 0;
1541 h->size = 16;
1542
1543 return TRUE;
1544}
1545
c65be8d7 1546/* Check that all loadable section VMAs lie in the range
9dcc4794 1547 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
c65be8d7
AM
1548
1549asection *
9dcc4794
AM
1550spu_elf_check_vma (struct bfd_link_info *info,
1551 int auto_overlay,
1552 unsigned int lo,
1553 unsigned int hi,
1554 unsigned int overlay_fixed,
1555 unsigned int reserved,
99302af9 1556 int extra_stack_space,
9dcc4794
AM
1557 void (*spu_elf_load_ovl_mgr) (void),
1558 FILE *(*spu_elf_open_overlay_script) (void),
1559 void (*spu_elf_relink) (void))
c65be8d7
AM
1560{
1561 struct elf_segment_map *m;
1562 unsigned int i;
9dcc4794 1563 struct spu_link_hash_table *htab = spu_hash_table (info);
c65be8d7
AM
1564 bfd *abfd = info->output_bfd;
1565
9dcc4794
AM
1566 if (auto_overlay & AUTO_OVERLAY)
1567 htab->auto_overlay = auto_overlay;
1568 htab->local_store = hi + 1 - lo;
1569 htab->overlay_fixed = overlay_fixed;
1570 htab->reserved = reserved;
99302af9 1571 htab->extra_stack_space = extra_stack_space;
9dcc4794
AM
1572 htab->spu_elf_load_ovl_mgr = spu_elf_load_ovl_mgr;
1573 htab->spu_elf_open_overlay_script = spu_elf_open_overlay_script;
1574 htab->spu_elf_relink = spu_elf_relink;
1575
c65be8d7
AM
1576 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1577 if (m->p_type == PT_LOAD)
1578 for (i = 0; i < m->count; i++)
1579 if (m->sections[i]->size != 0
1580 && (m->sections[i]->vma < lo
1581 || m->sections[i]->vma > hi
1582 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1583 return m->sections[i];
1584
9dcc4794
AM
1585 /* No need for overlays if it all fits. */
1586 htab->auto_overlay = 0;
c65be8d7
AM
1587 return NULL;
1588}
1589
49fa1e15
AM
1590/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1591 Search for stack adjusting insns, and return the sp delta. */
1592
1593static int
1594find_function_stack_adjust (asection *sec, bfd_vma offset)
1595{
1596 int unrecog;
1597 int reg[128];
1598
1599 memset (reg, 0, sizeof (reg));
1600 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1601 {
1602 unsigned char buf[4];
1603 int rt, ra;
1604 int imm;
1605
1606 /* Assume no relocs on stack adjusing insns. */
1607 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1608 break;
1609
1610 if (buf[0] == 0x24 /* stqd */)
1611 continue;
1612
1613 rt = buf[3] & 0x7f;
1614 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1615 /* Partly decoded immediate field. */
1616 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1617
1618 if (buf[0] == 0x1c /* ai */)
1619 {
1620 imm >>= 7;
1621 imm = (imm ^ 0x200) - 0x200;
1622 reg[rt] = reg[ra] + imm;
1623
1624 if (rt == 1 /* sp */)
1625 {
1626 if (imm > 0)
1627 break;
1628 return reg[rt];
1629 }
1630 }
1631 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1632 {
1633 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1634
1635 reg[rt] = reg[ra] + reg[rb];
1636 if (rt == 1)
1637 return reg[rt];
1638 }
1639 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1640 {
1641 if (buf[0] >= 0x42 /* ila */)
1642 imm |= (buf[0] & 1) << 17;
1643 else
1644 {
1645 imm &= 0xffff;
1646
1647 if (buf[0] == 0x40 /* il */)
1648 {
1649 if ((buf[1] & 0x80) == 0)
1650 goto unknown_insn;
1651 imm = (imm ^ 0x8000) - 0x8000;
1652 }
1653 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1654 imm <<= 16;
1655 }
1656 reg[rt] = imm;
1657 continue;
1658 }
1659 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1660 {
1661 reg[rt] |= imm & 0xffff;
1662 continue;
1663 }
1664 else if (buf[0] == 0x04 /* ori */)
1665 {
1666 imm >>= 7;
1667 imm = (imm ^ 0x200) - 0x200;
1668 reg[rt] = reg[ra] | imm;
1669 continue;
1670 }
1671 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1672 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1673 {
1674 /* Used in pic reg load. Say rt is trashed. */
1675 reg[rt] = 0;
1676 continue;
1677 }
fad9eaf0 1678 else if (is_branch (buf) || is_indirect_branch (buf))
49fa1e15
AM
1679 /* If we hit a branch then we must be out of the prologue. */
1680 break;
1681 unknown_insn:
1682 ++unrecog;
1683 }
1684
1685 return 0;
1686}
1687
1688/* qsort predicate to sort symbols by section and value. */
1689
1690static Elf_Internal_Sym *sort_syms_syms;
1691static asection **sort_syms_psecs;
1692
1693static int
1694sort_syms (const void *a, const void *b)
1695{
1696 Elf_Internal_Sym *const *s1 = a;
1697 Elf_Internal_Sym *const *s2 = b;
1698 asection *sec1,*sec2;
1699 bfd_signed_vma delta;
1700
1701 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1702 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1703
1704 if (sec1 != sec2)
1705 return sec1->index - sec2->index;
1706
1707 delta = (*s1)->st_value - (*s2)->st_value;
1708 if (delta != 0)
1709 return delta < 0 ? -1 : 1;
1710
1711 delta = (*s2)->st_size - (*s1)->st_size;
1712 if (delta != 0)
1713 return delta < 0 ? -1 : 1;
1714
1715 return *s1 < *s2 ? -1 : 1;
1716}
1717
1718struct call_info
1719{
1720 struct function_info *fun;
1721 struct call_info *next;
9dcc4794
AM
1722 unsigned int count;
1723 unsigned int max_depth;
c65be8d7 1724 unsigned int is_tail : 1;
9dcc4794 1725 unsigned int is_pasted : 1;
49fa1e15
AM
1726};
1727
1728struct function_info
1729{
1730 /* List of functions called. Also branches to hot/cold part of
1731 function. */
1732 struct call_info *call_list;
1733 /* For hot/cold part of function, point to owner. */
1734 struct function_info *start;
1735 /* Symbol at start of function. */
1736 union {
1737 Elf_Internal_Sym *sym;
1738 struct elf_link_hash_entry *h;
1739 } u;
1740 /* Function section. */
1741 asection *sec;
9dcc4794
AM
1742 asection *rodata;
1743 /* Where last called from, and number of sections called from. */
1744 asection *last_caller;
1745 unsigned int call_count;
49fa1e15
AM
1746 /* Address range of (this part of) function. */
1747 bfd_vma lo, hi;
1748 /* Stack usage. */
1749 int stack;
9dcc4794
AM
1750 /* Distance from root of call tree. Tail and hot/cold branches
1751 count as one deeper. We aren't counting stack frames here. */
1752 unsigned int depth;
49fa1e15
AM
1753 /* Set if global symbol. */
1754 unsigned int global : 1;
1755 /* Set if known to be start of function (as distinct from a hunk
1756 in hot/cold section. */
1757 unsigned int is_func : 1;
9dcc4794 1758 /* Set if not a root node. */
49fa1e15 1759 unsigned int non_root : 1;
9dcc4794
AM
1760 /* Flags used during call tree traversal. It's cheaper to replicate
1761 the visit flags than have one which needs clearing after a traversal. */
1762 unsigned int visit1 : 1;
49fa1e15
AM
1763 unsigned int visit2 : 1;
1764 unsigned int marking : 1;
1765 unsigned int visit3 : 1;
9dcc4794
AM
1766 unsigned int visit4 : 1;
1767 unsigned int visit5 : 1;
1768 unsigned int visit6 : 1;
1769 unsigned int visit7 : 1;
49fa1e15
AM
1770};
1771
1772struct spu_elf_stack_info
1773{
1774 int num_fun;
1775 int max_fun;
1776 /* Variable size array describing functions, one per contiguous
1777 address range belonging to a function. */
1778 struct function_info fun[1];
1779};
1780
1781/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1782 entries for section SEC. */
1783
1784static struct spu_elf_stack_info *
1785alloc_stack_info (asection *sec, int max_fun)
1786{
1787 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1788 bfd_size_type amt;
1789
1790 amt = sizeof (struct spu_elf_stack_info);
1791 amt += (max_fun - 1) * sizeof (struct function_info);
47f6dab9
AM
1792 sec_data->u.i.stack_info = bfd_zmalloc (amt);
1793 if (sec_data->u.i.stack_info != NULL)
1794 sec_data->u.i.stack_info->max_fun = max_fun;
1795 return sec_data->u.i.stack_info;
49fa1e15
AM
1796}
1797
1798/* Add a new struct function_info describing a (part of a) function
1799 starting at SYM_H. Keep the array sorted by address. */
1800
1801static struct function_info *
1802maybe_insert_function (asection *sec,
1803 void *sym_h,
1804 bfd_boolean global,
1805 bfd_boolean is_func)
1806{
1807 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 1808 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
1809 int i;
1810 bfd_vma off, size;
1811
1812 if (sinfo == NULL)
1813 {
1814 sinfo = alloc_stack_info (sec, 20);
1815 if (sinfo == NULL)
1816 return NULL;
1817 }
1818
1819 if (!global)
1820 {
1821 Elf_Internal_Sym *sym = sym_h;
1822 off = sym->st_value;
1823 size = sym->st_size;
1824 }
1825 else
1826 {
1827 struct elf_link_hash_entry *h = sym_h;
1828 off = h->root.u.def.value;
1829 size = h->size;
1830 }
1831
1832 for (i = sinfo->num_fun; --i >= 0; )
1833 if (sinfo->fun[i].lo <= off)
1834 break;
1835
1836 if (i >= 0)
1837 {
1838 /* Don't add another entry for an alias, but do update some
1839 info. */
1840 if (sinfo->fun[i].lo == off)
1841 {
1842 /* Prefer globals over local syms. */
1843 if (global && !sinfo->fun[i].global)
1844 {
1845 sinfo->fun[i].global = TRUE;
1846 sinfo->fun[i].u.h = sym_h;
1847 }
1848 if (is_func)
1849 sinfo->fun[i].is_func = TRUE;
1850 return &sinfo->fun[i];
1851 }
1852 /* Ignore a zero-size symbol inside an existing function. */
1853 else if (sinfo->fun[i].hi > off && size == 0)
1854 return &sinfo->fun[i];
1855 }
1856
1f27ab8d 1857 if (sinfo->num_fun >= sinfo->max_fun)
49fa1e15
AM
1858 {
1859 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1860 bfd_size_type old = amt;
1861
1862 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1863 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1864 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1865 sinfo = bfd_realloc (sinfo, amt);
1866 if (sinfo == NULL)
1867 return NULL;
1868 memset ((char *) sinfo + old, 0, amt - old);
47f6dab9 1869 sec_data->u.i.stack_info = sinfo;
49fa1e15 1870 }
1f27ab8d
AM
1871
1872 if (++i < sinfo->num_fun)
1873 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1874 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
49fa1e15
AM
1875 sinfo->fun[i].is_func = is_func;
1876 sinfo->fun[i].global = global;
1877 sinfo->fun[i].sec = sec;
1878 if (global)
1879 sinfo->fun[i].u.h = sym_h;
1880 else
1881 sinfo->fun[i].u.sym = sym_h;
1882 sinfo->fun[i].lo = off;
1883 sinfo->fun[i].hi = off + size;
1884 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1885 sinfo->num_fun += 1;
1886 return &sinfo->fun[i];
1887}
1888
1889/* Return the name of FUN. */
1890
1891static const char *
1892func_name (struct function_info *fun)
1893{
1894 asection *sec;
1895 bfd *ibfd;
1896 Elf_Internal_Shdr *symtab_hdr;
1897
1898 while (fun->start != NULL)
1899 fun = fun->start;
1900
1901 if (fun->global)
1902 return fun->u.h->root.root.string;
1903
1904 sec = fun->sec;
1905 if (fun->u.sym->st_name == 0)
1906 {
1907 size_t len = strlen (sec->name);
1908 char *name = bfd_malloc (len + 10);
1909 if (name == NULL)
1910 return "(null)";
1911 sprintf (name, "%s+%lx", sec->name,
1912 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1913 return name;
1914 }
1915 ibfd = sec->owner;
1916 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1917 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1918}
1919
1920/* Read the instruction at OFF in SEC. Return true iff the instruction
1921 is a nop, lnop, or stop 0 (all zero insn). */
1922
1923static bfd_boolean
1924is_nop (asection *sec, bfd_vma off)
1925{
1926 unsigned char insn[4];
1927
1928 if (off + 4 > sec->size
1929 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1930 return FALSE;
1931 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1932 return TRUE;
1933 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1934 return TRUE;
1935 return FALSE;
1936}
1937
1938/* Extend the range of FUN to cover nop padding up to LIMIT.
1939 Return TRUE iff some instruction other than a NOP was found. */
1940
1941static bfd_boolean
1942insns_at_end (struct function_info *fun, bfd_vma limit)
1943{
1944 bfd_vma off = (fun->hi + 3) & -4;
1945
1946 while (off < limit && is_nop (fun->sec, off))
1947 off += 4;
1948 if (off < limit)
1949 {
1950 fun->hi = off;
1951 return TRUE;
1952 }
1953 fun->hi = limit;
1954 return FALSE;
1955}
1956
1957/* Check and fix overlapping function ranges. Return TRUE iff there
1958 are gaps in the current info we have about functions in SEC. */
1959
1960static bfd_boolean
1961check_function_ranges (asection *sec, struct bfd_link_info *info)
1962{
1963 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 1964 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
1965 int i;
1966 bfd_boolean gaps = FALSE;
1967
1968 if (sinfo == NULL)
1969 return FALSE;
1970
1971 for (i = 1; i < sinfo->num_fun; i++)
1972 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1973 {
1974 /* Fix overlapping symbols. */
1975 const char *f1 = func_name (&sinfo->fun[i - 1]);
1976 const char *f2 = func_name (&sinfo->fun[i]);
1977
1978 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1979 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1980 }
1981 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1982 gaps = TRUE;
1983
1984 if (sinfo->num_fun == 0)
1985 gaps = TRUE;
1986 else
1987 {
1988 if (sinfo->fun[0].lo != 0)
1989 gaps = TRUE;
1990 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1991 {
1992 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1993
1994 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1995 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1996 }
1997 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1998 gaps = TRUE;
1999 }
2000 return gaps;
2001}
2002
2003/* Search current function info for a function that contains address
2004 OFFSET in section SEC. */
2005
2006static struct function_info *
2007find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2008{
2009 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
47f6dab9 2010 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
2011 int lo, hi, mid;
2012
2013 lo = 0;
2014 hi = sinfo->num_fun;
2015 while (lo < hi)
2016 {
2017 mid = (lo + hi) / 2;
2018 if (offset < sinfo->fun[mid].lo)
2019 hi = mid;
2020 else if (offset >= sinfo->fun[mid].hi)
2021 lo = mid + 1;
2022 else
2023 return &sinfo->fun[mid];
2024 }
2025 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2026 sec, offset);
2027 return NULL;
2028}
2029
9dcc4794
AM
2030/* Add CALLEE to CALLER call list if not already present. Return TRUE
2031 if CALLEE was new. If this function return FALSE, CALLEE should
2032 be freed. */
49fa1e15
AM
2033
2034static bfd_boolean
2035insert_callee (struct function_info *caller, struct call_info *callee)
2036{
055ed83b
AM
2037 struct call_info **pp, *p;
2038
2039 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
49fa1e15
AM
2040 if (p->fun == callee->fun)
2041 {
2042 /* Tail calls use less stack than normal calls. Retain entry
2043 for normal call over one for tail call. */
c65be8d7
AM
2044 p->is_tail &= callee->is_tail;
2045 if (!p->is_tail)
2046 {
2047 p->fun->start = NULL;
2048 p->fun->is_func = TRUE;
2049 }
9dcc4794 2050 p->count += 1;
055ed83b
AM
2051 /* Reorder list so most recent call is first. */
2052 *pp = p->next;
2053 p->next = caller->call_list;
2054 caller->call_list = p;
49fa1e15
AM
2055 return FALSE;
2056 }
2057 callee->next = caller->call_list;
9dcc4794 2058 callee->count += 1;
49fa1e15
AM
2059 caller->call_list = callee;
2060 return TRUE;
2061}
2062
9dcc4794
AM
2063/* Copy CALL and insert the copy into CALLER. */
2064
2065static bfd_boolean
2066copy_callee (struct function_info *caller, const struct call_info *call)
2067{
2068 struct call_info *callee;
2069 callee = bfd_malloc (sizeof (*callee));
2070 if (callee == NULL)
2071 return FALSE;
2072 *callee = *call;
2073 if (!insert_callee (caller, callee))
2074 free (callee);
2075 return TRUE;
2076}
2077
055ed83b
AM
2078/* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2079 overlay stub sections. */
2080
2081static bfd_boolean
2082interesting_section (asection *s, bfd *obfd)
2083{
2084 return (s->output_section != NULL
2085 && s->output_section->owner == obfd
2086 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2087 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2088 && s->size != 0);
2089}
2090
49fa1e15
AM
2091/* Rummage through the relocs for SEC, looking for function calls.
2092 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2093 mark destination symbols on calls as being functions. Also
2094 look at branches, which may be tail calls or go to hot/cold
2095 section part of same function. */
2096
2097static bfd_boolean
2098mark_functions_via_relocs (asection *sec,
2099 struct bfd_link_info *info,
2100 int call_tree)
2101{
2102 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2ec9638b 2103 Elf_Internal_Shdr *symtab_hdr;
d0249648 2104 void *psyms;
49fa1e15
AM
2105 static bfd_boolean warned;
2106
055ed83b
AM
2107 if (!interesting_section (sec, info->output_bfd)
2108 || sec->reloc_count == 0)
2109 return TRUE;
2110
49fa1e15
AM
2111 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2112 info->keep_memory);
2113 if (internal_relocs == NULL)
2114 return FALSE;
2115
2116 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
d0249648 2117 psyms = &symtab_hdr->contents;
49fa1e15
AM
2118 irela = internal_relocs;
2119 irelaend = irela + sec->reloc_count;
2120 for (; irela < irelaend; irela++)
2121 {
2122 enum elf_spu_reloc_type r_type;
2123 unsigned int r_indx;
2124 asection *sym_sec;
2125 Elf_Internal_Sym *sym;
2126 struct elf_link_hash_entry *h;
2127 bfd_vma val;
9dcc4794 2128 bfd_boolean reject, is_call;
49fa1e15
AM
2129 struct function_info *caller;
2130 struct call_info *callee;
2131
9dcc4794 2132 reject = FALSE;
49fa1e15
AM
2133 r_type = ELF32_R_TYPE (irela->r_info);
2134 if (r_type != R_SPU_REL16
2135 && r_type != R_SPU_ADDR16)
9dcc4794
AM
2136 {
2137 reject = TRUE;
2138 if (!(call_tree && spu_hash_table (info)->auto_overlay))
2139 continue;
2140 }
49fa1e15
AM
2141
2142 r_indx = ELF32_R_SYM (irela->r_info);
2143 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2144 return FALSE;
2145
2146 if (sym_sec == NULL
2147 || sym_sec->output_section == NULL
055ed83b 2148 || sym_sec->output_section->owner != info->output_bfd)
49fa1e15
AM
2149 continue;
2150
9dcc4794
AM
2151 is_call = FALSE;
2152 if (!reject)
2153 {
2154 unsigned char insn[4];
2155
2156 if (!bfd_get_section_contents (sec->owner, sec, insn,
2157 irela->r_offset, 4))
2158 return FALSE;
2159 if (is_branch (insn))
2160 {
2161 is_call = (insn[0] & 0xfd) == 0x31;
2162 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2163 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2164 {
2165 if (!warned)
2166 info->callbacks->einfo
2167 (_("%B(%A+0x%v): call to non-code section"
2168 " %B(%A), analysis incomplete\n"),
2169 sec->owner, sec, irela->r_offset,
2170 sym_sec->owner, sym_sec);
2171 warned = TRUE;
2172 continue;
2173 }
2174 }
2175 else
2176 {
2177 reject = TRUE;
2178 if (!(call_tree && spu_hash_table (info)->auto_overlay)
2179 || is_hint (insn))
2180 continue;
2181 }
2182 }
49fa1e15 2183
9dcc4794 2184 if (reject)
49fa1e15 2185 {
9dcc4794
AM
2186 /* For --auto-overlay, count possible stubs we need for
2187 function pointer references. */
2188 unsigned int sym_type;
2189 if (h)
2190 sym_type = h->type;
2191 else
2192 sym_type = ELF_ST_TYPE (sym->st_info);
2193 if (sym_type == STT_FUNC)
2194 spu_hash_table (info)->non_ovly_stub += 1;
49fa1e15
AM
2195 continue;
2196 }
2197
49fa1e15
AM
2198 if (h)
2199 val = h->root.u.def.value;
2200 else
2201 val = sym->st_value;
2202 val += irela->r_addend;
2203
2204 if (!call_tree)
2205 {
2206 struct function_info *fun;
2207
2208 if (irela->r_addend != 0)
2209 {
2210 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2211 if (fake == NULL)
2212 return FALSE;
2213 fake->st_value = val;
2214 fake->st_shndx
2215 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2216 sym = fake;
2217 }
2218 if (sym)
2219 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2220 else
2221 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2222 if (fun == NULL)
2223 return FALSE;
2224 if (irela->r_addend != 0
2225 && fun->u.sym != sym)
2226 free (sym);
2227 continue;
2228 }
2229
2230 caller = find_function (sec, irela->r_offset, info);
2231 if (caller == NULL)
2232 return FALSE;
2233 callee = bfd_malloc (sizeof *callee);
2234 if (callee == NULL)
2235 return FALSE;
2236
2237 callee->fun = find_function (sym_sec, val, info);
2238 if (callee->fun == NULL)
2239 return FALSE;
2240 callee->is_tail = !is_call;
9dcc4794
AM
2241 callee->is_pasted = FALSE;
2242 callee->count = 0;
2243 if (callee->fun->last_caller != sec)
2244 {
2245 callee->fun->last_caller = sec;
2246 callee->fun->call_count += 1;
2247 }
49fa1e15
AM
2248 if (!insert_callee (caller, callee))
2249 free (callee);
2250 else if (!is_call
2251 && !callee->fun->is_func
2252 && callee->fun->stack == 0)
2253 {
2254 /* This is either a tail call or a branch from one part of
2255 the function to another, ie. hot/cold section. If the
2256 destination has been called by some other function then
2257 it is a separate function. We also assume that functions
2258 are not split across input files. */
911f096e 2259 if (sec->owner != sym_sec->owner)
49fa1e15
AM
2260 {
2261 callee->fun->start = NULL;
2262 callee->fun->is_func = TRUE;
2263 }
911f096e 2264 else if (callee->fun->start == NULL)
49fa1e15 2265 callee->fun->start = caller;
911f096e
AM
2266 else
2267 {
2268 struct function_info *callee_start;
2269 struct function_info *caller_start;
2270 callee_start = callee->fun;
2271 while (callee_start->start)
2272 callee_start = callee_start->start;
2273 caller_start = caller;
2274 while (caller_start->start)
2275 caller_start = caller_start->start;
2276 if (caller_start != callee_start)
2277 {
2278 callee->fun->start = NULL;
2279 callee->fun->is_func = TRUE;
2280 }
2281 }
49fa1e15
AM
2282 }
2283 }
2284
2285 return TRUE;
2286}
2287
2288/* Handle something like .init or .fini, which has a piece of a function.
2289 These sections are pasted together to form a single function. */
2290
2291static bfd_boolean
2292pasted_function (asection *sec, struct bfd_link_info *info)
2293{
2294 struct bfd_link_order *l;
2295 struct _spu_elf_section_data *sec_data;
2296 struct spu_elf_stack_info *sinfo;
2297 Elf_Internal_Sym *fake;
2298 struct function_info *fun, *fun_start;
2299
2300 fake = bfd_zmalloc (sizeof (*fake));
2301 if (fake == NULL)
2302 return FALSE;
2303 fake->st_value = 0;
2304 fake->st_size = sec->size;
2305 fake->st_shndx
2306 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2307 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2308 if (!fun)
2309 return FALSE;
2310
2311 /* Find a function immediately preceding this section. */
2312 fun_start = NULL;
2313 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2314 {
2315 if (l->u.indirect.section == sec)
2316 {
2317 if (fun_start != NULL)
9dcc4794
AM
2318 {
2319 struct call_info *callee = bfd_malloc (sizeof *callee);
2320 if (callee == NULL)
2321 return FALSE;
2322
2323 fun->start = fun_start;
2324 callee->fun = fun;
2325 callee->is_tail = TRUE;
2326 callee->is_pasted = TRUE;
2327 callee->count = 0;
2328 if (!insert_callee (fun_start, callee))
2329 free (callee);
2330 return TRUE;
2331 }
2332 break;
49fa1e15
AM
2333 }
2334 if (l->type == bfd_indirect_link_order
2335 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
47f6dab9 2336 && (sinfo = sec_data->u.i.stack_info) != NULL
49fa1e15
AM
2337 && sinfo->num_fun != 0)
2338 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2339 }
2340
2341 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2342 return FALSE;
2343}
2344
49fa1e15
AM
2345/* Map address ranges in code sections to functions. */
2346
2347static bfd_boolean
c65be8d7 2348discover_functions (struct bfd_link_info *info)
49fa1e15 2349{
49fa1e15
AM
2350 bfd *ibfd;
2351 int bfd_idx;
2352 Elf_Internal_Sym ***psym_arr;
2353 asection ***sec_arr;
2354 bfd_boolean gaps = FALSE;
2355
2356 bfd_idx = 0;
2357 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2358 bfd_idx++;
2359
2360 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2361 if (psym_arr == NULL)
2362 return FALSE;
2363 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2364 if (sec_arr == NULL)
2365 return FALSE;
2366
2367
2368 for (ibfd = info->input_bfds, bfd_idx = 0;
2369 ibfd != NULL;
2370 ibfd = ibfd->link_next, bfd_idx++)
2371 {
2372 extern const bfd_target bfd_elf32_spu_vec;
2373 Elf_Internal_Shdr *symtab_hdr;
2374 asection *sec;
2375 size_t symcount;
2376 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2377 asection **psecs, **p;
2378
2379 if (ibfd->xvec != &bfd_elf32_spu_vec)
2380 continue;
2381
2382 /* Read all the symbols. */
2383 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2384 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2385 if (symcount == 0)
055ed83b
AM
2386 {
2387 if (!gaps)
2388 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2389 if (interesting_section (sec, info->output_bfd))
2390 {
2391 gaps = TRUE;
2392 break;
2393 }
2394 continue;
2395 }
49fa1e15 2396
1f27ab8d 2397 if (symtab_hdr->contents != NULL)
49fa1e15 2398 {
1f27ab8d
AM
2399 /* Don't use cached symbols since the generic ELF linker
2400 code only reads local symbols, and we need globals too. */
2401 free (symtab_hdr->contents);
2402 symtab_hdr->contents = NULL;
49fa1e15 2403 }
1f27ab8d
AM
2404 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2405 NULL, NULL, NULL);
2406 symtab_hdr->contents = (void *) syms;
2407 if (syms == NULL)
2408 return FALSE;
49fa1e15
AM
2409
2410 /* Select defined function symbols that are going to be output. */
2411 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2412 if (psyms == NULL)
2413 return FALSE;
2414 psym_arr[bfd_idx] = psyms;
2415 psecs = bfd_malloc (symcount * sizeof (*psecs));
2416 if (psecs == NULL)
2417 return FALSE;
2418 sec_arr[bfd_idx] = psecs;
2419 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2420 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2421 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2422 {
2423 asection *s;
2424
2425 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
c65be8d7 2426 if (s != NULL && interesting_section (s, info->output_bfd))
49fa1e15
AM
2427 *psy++ = sy;
2428 }
2429 symcount = psy - psyms;
2430 *psy = NULL;
2431
2432 /* Sort them by section and offset within section. */
2433 sort_syms_syms = syms;
2434 sort_syms_psecs = psecs;
2435 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2436
2437 /* Now inspect the function symbols. */
2438 for (psy = psyms; psy < psyms + symcount; )
2439 {
2440 asection *s = psecs[*psy - syms];
2441 Elf_Internal_Sym **psy2;
2442
2443 for (psy2 = psy; ++psy2 < psyms + symcount; )
2444 if (psecs[*psy2 - syms] != s)
2445 break;
2446
2447 if (!alloc_stack_info (s, psy2 - psy))
2448 return FALSE;
2449 psy = psy2;
2450 }
2451
2452 /* First install info about properly typed and sized functions.
2453 In an ideal world this will cover all code sections, except
2454 when partitioning functions into hot and cold sections,
2455 and the horrible pasted together .init and .fini functions. */
2456 for (psy = psyms; psy < psyms + symcount; ++psy)
2457 {
2458 sy = *psy;
2459 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2460 {
2461 asection *s = psecs[sy - syms];
2462 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2463 return FALSE;
2464 }
2465 }
2466
2467 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
c65be8d7 2468 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2469 gaps |= check_function_ranges (sec, info);
2470 }
2471
2472 if (gaps)
2473 {
2474 /* See if we can discover more function symbols by looking at
2475 relocations. */
2476 for (ibfd = info->input_bfds, bfd_idx = 0;
2477 ibfd != NULL;
2478 ibfd = ibfd->link_next, bfd_idx++)
2479 {
2480 asection *sec;
2481
2482 if (psym_arr[bfd_idx] == NULL)
2483 continue;
2484
2485 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
055ed83b
AM
2486 if (!mark_functions_via_relocs (sec, info, FALSE))
2487 return FALSE;
49fa1e15
AM
2488 }
2489
2490 for (ibfd = info->input_bfds, bfd_idx = 0;
2491 ibfd != NULL;
2492 ibfd = ibfd->link_next, bfd_idx++)
2493 {
2494 Elf_Internal_Shdr *symtab_hdr;
2495 asection *sec;
2496 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2497 asection **psecs;
2498
2499 if ((psyms = psym_arr[bfd_idx]) == NULL)
2500 continue;
2501
2502 psecs = sec_arr[bfd_idx];
2503
2504 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2505 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2506
2507 gaps = FALSE;
2508 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
c65be8d7 2509 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2510 gaps |= check_function_ranges (sec, info);
2511 if (!gaps)
2512 continue;
2513
2514 /* Finally, install all globals. */
2515 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2516 {
2517 asection *s;
2518
2519 s = psecs[sy - syms];
2520
2521 /* Global syms might be improperly typed functions. */
2522 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2523 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2524 {
2525 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2526 return FALSE;
2527 }
2528 }
055ed83b
AM
2529 }
2530
2531 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2532 {
2533 extern const bfd_target bfd_elf32_spu_vec;
2534 asection *sec;
2535
2536 if (ibfd->xvec != &bfd_elf32_spu_vec)
2537 continue;
49fa1e15
AM
2538
2539 /* Some of the symbols we've installed as marking the
2540 beginning of functions may have a size of zero. Extend
2541 the range of such functions to the beginning of the
2542 next symbol of interest. */
2543 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
c65be8d7 2544 if (interesting_section (sec, info->output_bfd))
49fa1e15
AM
2545 {
2546 struct _spu_elf_section_data *sec_data;
2547 struct spu_elf_stack_info *sinfo;
2548
2549 sec_data = spu_elf_section_data (sec);
47f6dab9 2550 sinfo = sec_data->u.i.stack_info;
49fa1e15
AM
2551 if (sinfo != NULL)
2552 {
2553 int fun_idx;
2554 bfd_vma hi = sec->size;
2555
2556 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2557 {
2558 sinfo->fun[fun_idx].hi = hi;
2559 hi = sinfo->fun[fun_idx].lo;
2560 }
2561 }
2562 /* No symbols in this section. Must be .init or .fini
2563 or something similar. */
2564 else if (!pasted_function (sec, info))
2565 return FALSE;
2566 }
2567 }
2568 }
2569
2570 for (ibfd = info->input_bfds, bfd_idx = 0;
2571 ibfd != NULL;
2572 ibfd = ibfd->link_next, bfd_idx++)
2573 {
2574 if (psym_arr[bfd_idx] == NULL)
2575 continue;
2576
2577 free (psym_arr[bfd_idx]);
2578 free (sec_arr[bfd_idx]);
2579 }
2580
2581 free (psym_arr);
2582 free (sec_arr);
2583
2584 return TRUE;
2585}
2586
055ed83b
AM
2587/* Iterate over all function_info we have collected, calling DOIT on
2588 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2589 if ROOT_ONLY. */
2590
2591static bfd_boolean
2592for_each_node (bfd_boolean (*doit) (struct function_info *,
2593 struct bfd_link_info *,
2594 void *),
2595 struct bfd_link_info *info,
2596 void *param,
2597 int root_only)
2598{
2599 bfd *ibfd;
2600
2601 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2602 {
2603 extern const bfd_target bfd_elf32_spu_vec;
2604 asection *sec;
2605
2606 if (ibfd->xvec != &bfd_elf32_spu_vec)
2607 continue;
2608
2609 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2610 {
2611 struct _spu_elf_section_data *sec_data;
2612 struct spu_elf_stack_info *sinfo;
2613
2614 if ((sec_data = spu_elf_section_data (sec)) != NULL
2615 && (sinfo = sec_data->u.i.stack_info) != NULL)
2616 {
2617 int i;
2618 for (i = 0; i < sinfo->num_fun; ++i)
2619 if (!root_only || !sinfo->fun[i].non_root)
2620 if (!doit (&sinfo->fun[i], info, param))
2621 return FALSE;
2622 }
2623 }
2624 }
2625 return TRUE;
2626}
2627
2628/* Transfer call info attached to struct function_info entries for
2629 all of a given function's sections to the first entry. */
2630
2631static bfd_boolean
2632transfer_calls (struct function_info *fun,
2633 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2634 void *param ATTRIBUTE_UNUSED)
2635{
2636 struct function_info *start = fun->start;
2637
2638 if (start != NULL)
2639 {
2640 struct call_info *call, *call_next;
2641
2642 while (start->start != NULL)
2643 start = start->start;
2644 for (call = fun->call_list; call != NULL; call = call_next)
2645 {
2646 call_next = call->next;
2647 if (!insert_callee (start, call))
2648 free (call);
2649 }
2650 fun->call_list = NULL;
2651 }
2652 return TRUE;
2653}
2654
49fa1e15
AM
2655/* Mark nodes in the call graph that are called by some other node. */
2656
055ed83b
AM
2657static bfd_boolean
2658mark_non_root (struct function_info *fun,
2659 struct bfd_link_info *info ATTRIBUTE_UNUSED,
2660 void *param ATTRIBUTE_UNUSED)
49fa1e15
AM
2661{
2662 struct call_info *call;
2663
055ed83b
AM
2664 if (fun->visit1)
2665 return TRUE;
49fa1e15
AM
2666 fun->visit1 = TRUE;
2667 for (call = fun->call_list; call; call = call->next)
2668 {
2669 call->fun->non_root = TRUE;
055ed83b 2670 mark_non_root (call->fun, 0, 0);
49fa1e15 2671 }
055ed83b 2672 return TRUE;
49fa1e15
AM
2673}
2674
9dcc4794 2675/* Remove cycles from the call graph. Set depth of nodes. */
49fa1e15 2676
055ed83b
AM
2677static bfd_boolean
2678remove_cycles (struct function_info *fun,
2679 struct bfd_link_info *info,
9dcc4794 2680 void *param)
49fa1e15
AM
2681{
2682 struct call_info **callp, *call;
9dcc4794
AM
2683 unsigned int depth = *(unsigned int *) param;
2684 unsigned int max_depth = depth;
49fa1e15 2685
9dcc4794 2686 fun->depth = depth;
49fa1e15
AM
2687 fun->visit2 = TRUE;
2688 fun->marking = TRUE;
2689
2690 callp = &fun->call_list;
2691 while ((call = *callp) != NULL)
2692 {
2693 if (!call->fun->visit2)
055ed83b 2694 {
9dcc4794
AM
2695 call->max_depth = depth + !call->is_pasted;
2696 if (!remove_cycles (call->fun, info, &call->max_depth))
055ed83b 2697 return FALSE;
9dcc4794
AM
2698 if (max_depth < call->max_depth)
2699 max_depth = call->max_depth;
055ed83b 2700 }
49fa1e15
AM
2701 else if (call->fun->marking)
2702 {
9dcc4794
AM
2703 if (!spu_hash_table (info)->auto_overlay)
2704 {
2705 const char *f1 = func_name (fun);
2706 const char *f2 = func_name (call->fun);
49fa1e15 2707
9dcc4794
AM
2708 info->callbacks->info (_("Stack analysis will ignore the call "
2709 "from %s to %s\n"),
2710 f1, f2);
2711 }
49fa1e15 2712 *callp = call->next;
055ed83b 2713 free (call);
49fa1e15
AM
2714 continue;
2715 }
2716 callp = &call->next;
2717 }
2718 fun->marking = FALSE;
9dcc4794 2719 *(unsigned int *) param = max_depth;
055ed83b 2720 return TRUE;
49fa1e15
AM
2721}
2722
2723/* Populate call_list for each function. */
2724
2725static bfd_boolean
c65be8d7 2726build_call_tree (struct bfd_link_info *info)
49fa1e15 2727{
49fa1e15 2728 bfd *ibfd;
9dcc4794 2729 unsigned int depth;
49fa1e15
AM
2730
2731 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2732 {
2733 extern const bfd_target bfd_elf32_spu_vec;
2734 asection *sec;
2735
2736 if (ibfd->xvec != &bfd_elf32_spu_vec)
2737 continue;
2738
2739 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
055ed83b
AM
2740 if (!mark_functions_via_relocs (sec, info, TRUE))
2741 return FALSE;
49fa1e15
AM
2742 }
2743
055ed83b
AM
2744 /* Transfer call info from hot/cold section part of function
2745 to main entry. */
9dcc4794
AM
2746 if (!spu_hash_table (info)->auto_overlay
2747 && !for_each_node (transfer_calls, info, 0, FALSE))
055ed83b 2748 return FALSE;
49fa1e15 2749
055ed83b
AM
2750 /* Find the call graph root(s). */
2751 if (!for_each_node (mark_non_root, info, 0, FALSE))
2752 return FALSE;
49fa1e15
AM
2753
2754 /* Remove cycles from the call graph. We start from the root node(s)
2755 so that we break cycles in a reasonable place. */
9dcc4794
AM
2756 depth = 0;
2757 return for_each_node (remove_cycles, info, &depth, TRUE);
2758}
2759
2760/* qsort predicate to sort calls by max_depth then count. */
2761
2762static int
2763sort_calls (const void *a, const void *b)
2764{
2765 struct call_info *const *c1 = a;
2766 struct call_info *const *c2 = b;
2767 int delta;
2768
2769 delta = (*c2)->max_depth - (*c1)->max_depth;
2770 if (delta != 0)
2771 return delta;
2772
2773 delta = (*c2)->count - (*c1)->count;
2774 if (delta != 0)
2775 return delta;
2776
2777 return c1 - c2;
2778}
2779
2780struct _mos_param {
2781 unsigned int max_overlay_size;
2782};
2783
2784/* Set linker_mark and gc_mark on any sections that we will put in
2785 overlays. These flags are used by the generic ELF linker, but we
2786 won't be continuing on to bfd_elf_final_link so it is OK to use
2787 them. linker_mark is clear before we get here. Set segment_mark
2788 on sections that are part of a pasted function (excluding the last
2789 section).
2790
2791 Set up function rodata section if --overlay-rodata. We don't
2792 currently include merged string constant rodata sections since
2793
2794 Sort the call graph so that the deepest nodes will be visited
2795 first. */
2796
2797static bfd_boolean
2798mark_overlay_section (struct function_info *fun,
2799 struct bfd_link_info *info,
2800 void *param)
2801{
2802 struct call_info *call;
2803 unsigned int count;
2804 struct _mos_param *mos_param = param;
2805
2806 if (fun->visit4)
2807 return TRUE;
2808
2809 fun->visit4 = TRUE;
2810 if (!fun->sec->linker_mark)
2811 {
2812 fun->sec->linker_mark = 1;
2813 fun->sec->gc_mark = 1;
2814 fun->sec->segment_mark = 0;
2815 /* Ensure SEC_CODE is set on this text section (it ought to
2816 be!), and SEC_CODE is clear on rodata sections. We use
2817 this flag to differentiate the two overlay section types. */
2818 fun->sec->flags |= SEC_CODE;
2819 if (spu_hash_table (info)->auto_overlay & OVERLAY_RODATA)
2820 {
2821 char *name = NULL;
2822 unsigned int size;
2823
2824 /* Find the rodata section corresponding to this function's
2825 text section. */
2826 if (strcmp (fun->sec->name, ".text") == 0)
2827 {
2828 name = bfd_malloc (sizeof (".rodata"));
2829 if (name == NULL)
2830 return FALSE;
2831 memcpy (name, ".rodata", sizeof (".rodata"));
2832 }
2833 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2834 {
2835 size_t len = strlen (fun->sec->name);
2836 name = bfd_malloc (len + 3);
2837 if (name == NULL)
2838 return FALSE;
2839 memcpy (name, ".rodata", sizeof (".rodata"));
2840 memcpy (name + 7, fun->sec->name + 5, len - 4);
2841 }
2842 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2843 {
2844 size_t len = strlen (fun->sec->name) + 1;
2845 name = bfd_malloc (len);
2846 if (name == NULL)
2847 return FALSE;
2848 memcpy (name, fun->sec->name, len);
2849 name[14] = 'r';
2850 }
2851
2852 if (name != NULL)
2853 {
2854 asection *rodata = NULL;
2855 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2856 if (group_sec == NULL)
2857 rodata = bfd_get_section_by_name (fun->sec->owner, name);
2858 else
2859 while (group_sec != NULL && group_sec != fun->sec)
2860 {
2861 if (strcmp (group_sec->name, name) == 0)
2862 {
2863 rodata = group_sec;
2864 break;
2865 }
2866 group_sec = elf_section_data (group_sec)->next_in_group;
2867 }
2868 fun->rodata = rodata;
2869 if (fun->rodata)
2870 {
2871 fun->rodata->linker_mark = 1;
2872 fun->rodata->gc_mark = 1;
2873 fun->rodata->flags &= ~SEC_CODE;
2874 }
2875 free (name);
2876 }
2877 size = fun->sec->size;
2878 if (fun->rodata)
2879 size += fun->rodata->size;
2880 if (mos_param->max_overlay_size < size)
2881 mos_param->max_overlay_size = size;
2882 }
2883 }
2884
2885 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2886 count += 1;
2887
2888 if (count > 1)
2889 {
2890 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2891 if (calls == NULL)
2892 return FALSE;
2893
2894 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2895 calls[count++] = call;
2896
2897 qsort (calls, count, sizeof (*calls), sort_calls);
2898
2899 fun->call_list = NULL;
2900 while (count != 0)
2901 {
2902 --count;
2903 calls[count]->next = fun->call_list;
2904 fun->call_list = calls[count];
2905 }
2906 free (calls);
2907 }
2908
2909 for (call = fun->call_list; call != NULL; call = call->next)
2910 {
2911 if (call->is_pasted)
2912 {
2913 /* There can only be one is_pasted call per function_info. */
2914 BFD_ASSERT (!fun->sec->segment_mark);
2915 fun->sec->segment_mark = 1;
2916 }
2917 if (!mark_overlay_section (call->fun, info, param))
2918 return FALSE;
2919 }
2920
2921 /* Don't put entry code into an overlay. The overlay manager needs
2922 a stack! */
2923 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2924 == info->output_bfd->start_address)
2925 {
2926 fun->sec->linker_mark = 0;
2927 if (fun->rodata != NULL)
2928 fun->rodata->linker_mark = 0;
2929 }
2930 return TRUE;
2931}
2932
99302af9
AM
2933/* If non-zero then unmark functions called from those within sections
2934 that we need to unmark. Unfortunately this isn't reliable since the
2935 call graph cannot know the destination of function pointer calls. */
2936#define RECURSE_UNMARK 0
2937
9dcc4794
AM
2938struct _uos_param {
2939 asection *exclude_input_section;
2940 asection *exclude_output_section;
2941 unsigned long clearing;
2942};
2943
2944/* Undo some of mark_overlay_section's work. */
2945
2946static bfd_boolean
2947unmark_overlay_section (struct function_info *fun,
2948 struct bfd_link_info *info,
2949 void *param)
2950{
2951 struct call_info *call;
2952 struct _uos_param *uos_param = param;
2953 unsigned int excluded = 0;
2954
2955 if (fun->visit5)
2956 return TRUE;
2957
2958 fun->visit5 = TRUE;
2959
2960 excluded = 0;
2961 if (fun->sec == uos_param->exclude_input_section
2962 || fun->sec->output_section == uos_param->exclude_output_section)
2963 excluded = 1;
2964
99302af9
AM
2965 if (RECURSE_UNMARK)
2966 uos_param->clearing += excluded;
9dcc4794 2967
99302af9 2968 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
9dcc4794
AM
2969 {
2970 fun->sec->linker_mark = 0;
2971 if (fun->rodata)
2972 fun->rodata->linker_mark = 0;
2973 }
2974
2975 for (call = fun->call_list; call != NULL; call = call->next)
2976 if (!unmark_overlay_section (call->fun, info, param))
2977 return FALSE;
2978
99302af9
AM
2979 if (RECURSE_UNMARK)
2980 uos_param->clearing -= excluded;
9dcc4794
AM
2981 return TRUE;
2982}
2983
2984struct _cl_param {
2985 unsigned int lib_size;
2986 asection **lib_sections;
2987};
2988
2989/* Add sections we have marked as belonging to overlays to an array
2990 for consideration as non-overlay sections. The array consist of
2991 pairs of sections, (text,rodata), for functions in the call graph. */
2992
2993static bfd_boolean
2994collect_lib_sections (struct function_info *fun,
2995 struct bfd_link_info *info,
2996 void *param)
2997{
2998 struct _cl_param *lib_param = param;
2999 struct call_info *call;
3000 unsigned int size;
3001
3002 if (fun->visit6)
3003 return TRUE;
3004
3005 fun->visit6 = TRUE;
3006 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3007 return TRUE;
3008
3009 size = fun->sec->size;
3010 if (fun->rodata)
3011 size += fun->rodata->size;
3012 if (size > lib_param->lib_size)
3013 return TRUE;
3014
3015 *lib_param->lib_sections++ = fun->sec;
3016 fun->sec->gc_mark = 0;
3017 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3018 {
3019 *lib_param->lib_sections++ = fun->rodata;
3020 fun->rodata->gc_mark = 0;
3021 }
3022 else
3023 *lib_param->lib_sections++ = NULL;
3024
3025 for (call = fun->call_list; call != NULL; call = call->next)
3026 collect_lib_sections (call->fun, info, param);
3027
3028 return TRUE;
3029}
3030
3031/* qsort predicate to sort sections by call count. */
3032
3033static int
3034sort_lib (const void *a, const void *b)
3035{
3036 asection *const *s1 = a;
3037 asection *const *s2 = b;
3038 struct _spu_elf_section_data *sec_data;
3039 struct spu_elf_stack_info *sinfo;
3040 int delta;
3041
3042 delta = 0;
3043 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3044 && (sinfo = sec_data->u.i.stack_info) != NULL)
3045 {
3046 int i;
3047 for (i = 0; i < sinfo->num_fun; ++i)
3048 delta -= sinfo->fun[i].call_count;
3049 }
3050
3051 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3052 && (sinfo = sec_data->u.i.stack_info) != NULL)
3053 {
3054 int i;
3055 for (i = 0; i < sinfo->num_fun; ++i)
3056 delta += sinfo->fun[i].call_count;
3057 }
3058
3059 if (delta != 0)
3060 return delta;
3061
3062 return s1 - s2;
3063}
3064
3065/* Remove some sections from those marked to be in overlays. Choose
3066 those that are called from many places, likely library functions. */
3067
3068static unsigned int
3069auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3070{
3071 bfd *ibfd;
3072 asection **lib_sections;
3073 unsigned int i, lib_count;
3074 struct _cl_param collect_lib_param;
3075 struct function_info dummy_caller;
3076
3077 memset (&dummy_caller, 0, sizeof (dummy_caller));
3078 lib_count = 0;
3079 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3080 {
3081 extern const bfd_target bfd_elf32_spu_vec;
3082 asection *sec;
3083
3084 if (ibfd->xvec != &bfd_elf32_spu_vec)
3085 continue;
3086
3087 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3088 if (sec->linker_mark
3089 && sec->size < lib_size
3090 && (sec->flags & SEC_CODE) != 0)
3091 lib_count += 1;
3092 }
3093 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3094 if (lib_sections == NULL)
3095 return (unsigned int) -1;
3096 collect_lib_param.lib_size = lib_size;
3097 collect_lib_param.lib_sections = lib_sections;
3098 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3099 TRUE))
3100 return (unsigned int) -1;
3101 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3102
3103 /* Sort sections so that those with the most calls are first. */
3104 if (lib_count > 1)
3105 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3106
3107 for (i = 0; i < lib_count; i++)
3108 {
3109 unsigned int tmp, stub_size;
3110 asection *sec;
3111 struct _spu_elf_section_data *sec_data;
3112 struct spu_elf_stack_info *sinfo;
3113
3114 sec = lib_sections[2 * i];
3115 /* If this section is OK, its size must be less than lib_size. */
3116 tmp = sec->size;
3117 /* If it has a rodata section, then add that too. */
3118 if (lib_sections[2 * i + 1])
3119 tmp += lib_sections[2 * i + 1]->size;
3120 /* Add any new overlay call stubs needed by the section. */
3121 stub_size = 0;
3122 if (tmp < lib_size
3123 && (sec_data = spu_elf_section_data (sec)) != NULL
3124 && (sinfo = sec_data->u.i.stack_info) != NULL)
3125 {
3126 int k;
3127 struct call_info *call;
3128
3129 for (k = 0; k < sinfo->num_fun; ++k)
3130 for (call = sinfo->fun[k].call_list; call; call = call->next)
3131 if (call->fun->sec->linker_mark)
3132 {
3133 struct call_info *p;
3134 for (p = dummy_caller.call_list; p; p = p->next)
3135 if (p->fun == call->fun)
3136 break;
3137 if (!p)
3138 stub_size += OVL_STUB_SIZE;
3139 }
3140 }
3141 if (tmp + stub_size < lib_size)
3142 {
3143 struct call_info **pp, *p;
3144
3145 /* This section fits. Mark it as non-overlay. */
3146 lib_sections[2 * i]->linker_mark = 0;
3147 if (lib_sections[2 * i + 1])
3148 lib_sections[2 * i + 1]->linker_mark = 0;
3149 lib_size -= tmp + stub_size;
3150 /* Call stubs to the section we just added are no longer
3151 needed. */
3152 pp = &dummy_caller.call_list;
3153 while ((p = *pp) != NULL)
3154 if (!p->fun->sec->linker_mark)
3155 {
3156 lib_size += OVL_STUB_SIZE;
3157 *pp = p->next;
3158 free (p);
3159 }
3160 else
3161 pp = &p->next;
3162 /* Add new call stubs to dummy_caller. */
3163 if ((sec_data = spu_elf_section_data (sec)) != NULL
3164 && (sinfo = sec_data->u.i.stack_info) != NULL)
3165 {
3166 int k;
3167 struct call_info *call;
3168
3169 for (k = 0; k < sinfo->num_fun; ++k)
3170 for (call = sinfo->fun[k].call_list;
3171 call;
3172 call = call->next)
3173 if (call->fun->sec->linker_mark)
3174 {
3175 struct call_info *callee;
3176 callee = bfd_malloc (sizeof (*callee));
3177 if (callee == NULL)
3178 return (unsigned int) -1;
3179 *callee = *call;
3180 if (!insert_callee (&dummy_caller, callee))
3181 free (callee);
3182 }
3183 }
3184 }
3185 }
3186 while (dummy_caller.call_list != NULL)
3187 {
3188 struct call_info *call = dummy_caller.call_list;
3189 dummy_caller.call_list = call->next;
3190 free (call);
3191 }
3192 for (i = 0; i < 2 * lib_count; i++)
3193 if (lib_sections[i])
3194 lib_sections[i]->gc_mark = 1;
3195 free (lib_sections);
3196 return lib_size;
3197}
3198
3199/* Build an array of overlay sections. The deepest node's section is
2ec9638b 3200 added first, then its parent node's section, then everything called
9dcc4794
AM
3201 from the parent section. The idea being to group sections to
3202 minimise calls between different overlays. */
3203
3204static bfd_boolean
3205collect_overlays (struct function_info *fun,
3206 struct bfd_link_info *info,
3207 void *param)
3208{
3209 struct call_info *call;
3210 bfd_boolean added_fun;
3211 asection ***ovly_sections = param;
3212
3213 if (fun->visit7)
3214 return TRUE;
3215
3216 fun->visit7 = TRUE;
3217 for (call = fun->call_list; call != NULL; call = call->next)
3218 if (!call->is_pasted)
3219 {
3220 if (!collect_overlays (call->fun, info, ovly_sections))
3221 return FALSE;
3222 break;
3223 }
3224
3225 added_fun = FALSE;
3226 if (fun->sec->linker_mark && fun->sec->gc_mark)
3227 {
3228 fun->sec->gc_mark = 0;
3229 *(*ovly_sections)++ = fun->sec;
3230 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3231 {
3232 fun->rodata->gc_mark = 0;
3233 *(*ovly_sections)++ = fun->rodata;
3234 }
3235 else
3236 *(*ovly_sections)++ = NULL;
3237 added_fun = TRUE;
3238
3239 /* Pasted sections must stay with the first section. We don't
3240 put pasted sections in the array, just the first section.
3241 Mark subsequent sections as already considered. */
3242 if (fun->sec->segment_mark)
3243 {
3244 struct function_info *call_fun = fun;
3245 do
3246 {
3247 for (call = call_fun->call_list; call != NULL; call = call->next)
3248 if (call->is_pasted)
3249 {
3250 call_fun = call->fun;
3251 call_fun->sec->gc_mark = 0;
3252 if (call_fun->rodata)
3253 call_fun->rodata->gc_mark = 0;
3254 break;
3255 }
3256 if (call == NULL)
3257 abort ();
3258 }
3259 while (call_fun->sec->segment_mark);
3260 }
3261 }
3262
3263 for (call = fun->call_list; call != NULL; call = call->next)
3264 if (!collect_overlays (call->fun, info, ovly_sections))
3265 return FALSE;
3266
3267 if (added_fun)
3268 {
3269 struct _spu_elf_section_data *sec_data;
3270 struct spu_elf_stack_info *sinfo;
3271
3272 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3273 && (sinfo = sec_data->u.i.stack_info) != NULL)
3274 {
3275 int i;
3276 for (i = 0; i < sinfo->num_fun; ++i)
3277 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3278 return FALSE;
3279 }
3280 }
3281
3282 return TRUE;
49fa1e15
AM
3283}
3284
055ed83b
AM
3285struct _sum_stack_param {
3286 size_t cum_stack;
3287 size_t overall_stack;
3288 bfd_boolean emit_stack_syms;
3289};
3290
49fa1e15
AM
3291/* Descend the call graph for FUN, accumulating total stack required. */
3292
055ed83b 3293static bfd_boolean
49fa1e15
AM
3294sum_stack (struct function_info *fun,
3295 struct bfd_link_info *info,
055ed83b 3296 void *param)
49fa1e15
AM
3297{
3298 struct call_info *call;
055ed83b
AM
3299 struct function_info *max;
3300 size_t stack, cum_stack;
49fa1e15 3301 const char *f1;
9dcc4794 3302 bfd_boolean has_call;
055ed83b 3303 struct _sum_stack_param *sum_stack_param = param;
9dcc4794 3304 struct spu_link_hash_table *htab;
49fa1e15 3305
055ed83b
AM
3306 cum_stack = fun->stack;
3307 sum_stack_param->cum_stack = cum_stack;
49fa1e15 3308 if (fun->visit3)
055ed83b 3309 return TRUE;
49fa1e15 3310
9dcc4794 3311 has_call = FALSE;
055ed83b 3312 max = NULL;
49fa1e15
AM
3313 for (call = fun->call_list; call; call = call->next)
3314 {
9dcc4794
AM
3315 if (!call->is_pasted)
3316 has_call = TRUE;
055ed83b
AM
3317 if (!sum_stack (call->fun, info, sum_stack_param))
3318 return FALSE;
3319 stack = sum_stack_param->cum_stack;
49fa1e15
AM
3320 /* Include caller stack for normal calls, don't do so for
3321 tail calls. fun->stack here is local stack usage for
3322 this function. */
9dcc4794 3323 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
49fa1e15 3324 stack += fun->stack;
055ed83b 3325 if (cum_stack < stack)
49fa1e15 3326 {
055ed83b 3327 cum_stack = stack;
49fa1e15
AM
3328 max = call->fun;
3329 }
3330 }
3331
055ed83b
AM
3332 sum_stack_param->cum_stack = cum_stack;
3333 stack = fun->stack;
3334 /* Now fun->stack holds cumulative stack. */
3335 fun->stack = cum_stack;
3336 fun->visit3 = TRUE;
3337
3338 if (!fun->non_root
3339 && sum_stack_param->overall_stack < cum_stack)
3340 sum_stack_param->overall_stack = cum_stack;
3341
9dcc4794
AM
3342 htab = spu_hash_table (info);
3343 if (htab->auto_overlay)
3344 return TRUE;
3345
49fa1e15 3346 f1 = func_name (fun);
055ed83b
AM
3347 if (!fun->non_root)
3348 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
fad9eaf0 3349 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
055ed83b 3350 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
49fa1e15 3351
9dcc4794 3352 if (has_call)
49fa1e15
AM
3353 {
3354 info->callbacks->minfo (_(" calls:\n"));
3355 for (call = fun->call_list; call; call = call->next)
9dcc4794
AM
3356 if (!call->is_pasted)
3357 {
3358 const char *f2 = func_name (call->fun);
3359 const char *ann1 = call->fun == max ? "*" : " ";
3360 const char *ann2 = call->is_tail ? "t" : " ";
49fa1e15 3361
9dcc4794
AM
3362 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3363 }
49fa1e15
AM
3364 }
3365
055ed83b 3366 if (sum_stack_param->emit_stack_syms)
49fa1e15 3367 {
49fa1e15
AM
3368 char *name = bfd_malloc (18 + strlen (f1));
3369 struct elf_link_hash_entry *h;
3370
055ed83b
AM
3371 if (name == NULL)
3372 return FALSE;
3373
3374 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3375 sprintf (name, "__stack_%s", f1);
3376 else
3377 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3378
3379 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3380 free (name);
3381 if (h != NULL
3382 && (h->root.type == bfd_link_hash_new
3383 || h->root.type == bfd_link_hash_undefined
3384 || h->root.type == bfd_link_hash_undefweak))
49fa1e15 3385 {
055ed83b
AM
3386 h->root.type = bfd_link_hash_defined;
3387 h->root.u.def.section = bfd_abs_section_ptr;
3388 h->root.u.def.value = cum_stack;
3389 h->size = 0;
3390 h->type = 0;
3391 h->ref_regular = 1;
3392 h->def_regular = 1;
3393 h->ref_regular_nonweak = 1;
3394 h->forced_local = 1;
3395 h->non_elf = 0;
49fa1e15
AM
3396 }
3397 }
3398
055ed83b 3399 return TRUE;
49fa1e15
AM
3400}
3401
9dcc4794
AM
3402/* SEC is part of a pasted function. Return the call_info for the
3403 next section of this function. */
3404
3405static struct call_info *
3406find_pasted_call (asection *sec)
3407{
3408 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3409 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3410 struct call_info *call;
3411 int k;
3412
3413 for (k = 0; k < sinfo->num_fun; ++k)
3414 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3415 if (call->is_pasted)
3416 return call;
3417 abort ();
3418 return 0;
3419}
3420
3421/* qsort predicate to sort bfds by file name. */
3422
3423static int
3424sort_bfds (const void *a, const void *b)
3425{
3426 bfd *const *abfd1 = a;
3427 bfd *const *abfd2 = b;
3428
3429 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3430}
3431
3432/* Handle --auto-overlay. */
3433
3434static void spu_elf_auto_overlay (struct bfd_link_info *, void (*) (void))
3435 ATTRIBUTE_NORETURN;
3436
3437static void
3438spu_elf_auto_overlay (struct bfd_link_info *info,
3439 void (*spu_elf_load_ovl_mgr) (void))
3440{
3441 bfd *ibfd;
3442 bfd **bfd_arr;
3443 struct elf_segment_map *m;
3444 unsigned int fixed_size, lo, hi;
3445 struct spu_link_hash_table *htab;
3446 unsigned int base, i, count, bfd_count;
3447 int ovlynum;
3448 asection **ovly_sections, **ovly_p;
3449 FILE *script;
3450 unsigned int total_overlay_size, overlay_size;
3451 struct elf_link_hash_entry *h;
3452 struct _mos_param mos_param;
3453 struct _uos_param uos_param;
3454 struct function_info dummy_caller;
3455
3456 /* Find the extents of our loadable image. */
3457 lo = (unsigned int) -1;
3458 hi = 0;
3459 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3460 if (m->p_type == PT_LOAD)
3461 for (i = 0; i < m->count; i++)
3462 if (m->sections[i]->size != 0)
3463 {
3464 if (m->sections[i]->vma < lo)
3465 lo = m->sections[i]->vma;
3466 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3467 hi = m->sections[i]->vma + m->sections[i]->size - 1;
3468 }
3469 fixed_size = hi + 1 - lo;
3470
3471 if (!discover_functions (info))
3472 goto err_exit;
3473
3474 if (!build_call_tree (info))
3475 goto err_exit;
3476
3477 uos_param.exclude_input_section = 0;
3478 uos_param.exclude_output_section
3479 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3480
3481 htab = spu_hash_table (info);
3482 h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3483 FALSE, FALSE, FALSE);
3484 if (h != NULL
3485 && (h->root.type == bfd_link_hash_defined
3486 || h->root.type == bfd_link_hash_defweak)
3487 && h->def_regular)
3488 {
3489 /* We have a user supplied overlay manager. */
3490 uos_param.exclude_input_section = h->root.u.def.section;
3491 }
3492 else
3493 {
3494 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3495 builtin version to .text, and will adjust .text size. */
3496 asection *text = bfd_get_section_by_name (info->output_bfd, ".text");
3497 if (text != NULL)
3498 fixed_size -= text->size;
3499 spu_elf_load_ovl_mgr ();
3500 text = bfd_get_section_by_name (info->output_bfd, ".text");
3501 if (text != NULL)
3502 fixed_size += text->size;
3503 }
3504
3505 /* Mark overlay sections, and find max overlay section size. */
3506 mos_param.max_overlay_size = 0;
3507 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3508 goto err_exit;
3509
3510 /* We can't put the overlay manager or interrupt routines in
3511 overlays. */
3512 uos_param.clearing = 0;
3513 if ((uos_param.exclude_input_section
3514 || uos_param.exclude_output_section)
3515 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3516 goto err_exit;
3517
3518 bfd_count = 0;
3519 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3520 ++bfd_count;
3521 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3522 if (bfd_arr == NULL)
3523 goto err_exit;
3524
3525 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3526 count = 0;
3527 bfd_count = 0;
3528 total_overlay_size = 0;
3529 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3530 {
3531 extern const bfd_target bfd_elf32_spu_vec;
3532 asection *sec;
3533 unsigned int old_count;
3534
3535 if (ibfd->xvec != &bfd_elf32_spu_vec)
3536 continue;
3537
3538 old_count = count;
3539 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3540 if (sec->linker_mark)
3541 {
3542 if ((sec->flags & SEC_CODE) != 0)
3543 count += 1;
3544 fixed_size -= sec->size;
3545 total_overlay_size += sec->size;
3546 }
3547 if (count != old_count)
3548 bfd_arr[bfd_count++] = ibfd;
3549 }
3550
3551 /* Since the overlay link script selects sections by file name and
3552 section name, ensure that file names are unique. */
3553 if (bfd_count > 1)
3554 {
3555 bfd_boolean ok = TRUE;
3556
3557 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3558 for (i = 1; i < bfd_count; ++i)
3559 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3560 {
97407faf 3561 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
9dcc4794 3562 {
97407faf 3563 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
9dcc4794 3564 info->callbacks->einfo (_("%s duplicated in %s\n"),
97407faf 3565 bfd_arr[i]->filename,
9dcc4794 3566 bfd_arr[i]->my_archive->filename);
97407faf
AM
3567 else
3568 info->callbacks->einfo (_("%s duplicated\n"),
3569 bfd_arr[i]->filename);
3570 ok = FALSE;
9dcc4794 3571 }
9dcc4794
AM
3572 }
3573 if (!ok)
3574 {
9dcc4794
AM
3575 info->callbacks->einfo (_("sorry, no support for duplicate "
3576 "object files in auto-overlay script\n"));
3577 bfd_set_error (bfd_error_bad_value);
3578 goto err_exit;
3579 }
3580 }
3581 free (bfd_arr);
3582
3583 if (htab->reserved == 0)
3584 {
3585 struct _sum_stack_param sum_stack_param;
3586
3587 sum_stack_param.emit_stack_syms = 0;
3588 sum_stack_param.overall_stack = 0;
3589 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3590 goto err_exit;
99302af9 3591 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
9dcc4794
AM
3592 }
3593 fixed_size += htab->reserved;
3594 fixed_size += htab->non_ovly_stub * OVL_STUB_SIZE;
3595 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3596 {
3597 /* Guess number of overlays. Assuming overlay buffer is on
3598 average only half full should be conservative. */
3599 ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3600 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3601 fixed_size += ovlynum * 16 + 16 + 4 + 16;
3602 }
3603
3604 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
3605 info->callbacks->einfo (_("non-overlay plus maximum overlay size "
3606 "of 0x%x exceeds local store\n"),
3607 fixed_size + mos_param.max_overlay_size);
3608
3609 /* Now see if we should put some functions in the non-overlay area. */
3610 if (fixed_size < htab->overlay_fixed
3611 && htab->overlay_fixed + mos_param.max_overlay_size < htab->local_store)
3612 {
3613 unsigned int lib_size = htab->overlay_fixed - fixed_size;
3614 lib_size = auto_ovl_lib_functions (info, lib_size);
3615 if (lib_size == (unsigned int) -1)
3616 goto err_exit;
3617 fixed_size = htab->overlay_fixed - lib_size;
3618 }
3619
3620 /* Build an array of sections, suitably sorted to place into
3621 overlays. */
3622 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3623 if (ovly_sections == NULL)
3624 goto err_exit;
3625 ovly_p = ovly_sections;
3626 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3627 goto err_exit;
3628 count = (size_t) (ovly_p - ovly_sections) / 2;
3629
3630 script = htab->spu_elf_open_overlay_script ();
3631
3632 if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3633 goto file_err;
3634
3635 memset (&dummy_caller, 0, sizeof (dummy_caller));
3636 overlay_size = htab->local_store - fixed_size;
3637 base = 0;
3638 ovlynum = 0;
3639 while (base < count)
3640 {
3641 unsigned int size = 0;
3642 unsigned int j;
3643
3644 for (i = base; i < count; i++)
3645 {
3646 asection *sec;
3647 unsigned int tmp;
3648 unsigned int stub_size;
3649 struct call_info *call, *pasty;
3650 struct _spu_elf_section_data *sec_data;
3651 struct spu_elf_stack_info *sinfo;
3652 int k;
3653
3654 /* See whether we can add this section to the current
3655 overlay without overflowing our overlay buffer. */
3656 sec = ovly_sections[2 * i];
3657 tmp = size + sec->size;
3658 if (ovly_sections[2 * i + 1])
3659 tmp += ovly_sections[2 * i + 1]->size;
3660 if (tmp > overlay_size)
3661 break;
3662 if (sec->segment_mark)
3663 {
3664 /* Pasted sections must stay together, so add their
3665 sizes too. */
3666 struct call_info *pasty = find_pasted_call (sec);
3667 while (pasty != NULL)
3668 {
3669 struct function_info *call_fun = pasty->fun;
3670 tmp += call_fun->sec->size;
3671 if (call_fun->rodata)
3672 tmp += call_fun->rodata->size;
3673 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3674 if (pasty->is_pasted)
3675 break;
3676 }
3677 }
3678 if (tmp > overlay_size)
3679 break;
3680
3681 /* If we add this section, we might need new overlay call
3682 stubs. Add any overlay section calls to dummy_call. */
3683 pasty = NULL;
3684 sec_data = spu_elf_section_data (sec);
3685 sinfo = sec_data->u.i.stack_info;
3686 for (k = 0; k < sinfo->num_fun; ++k)
3687 for (call = sinfo->fun[k].call_list; call; call = call->next)
3688 if (call->is_pasted)
3689 {
3690 BFD_ASSERT (pasty == NULL);
3691 pasty = call;
3692 }
3693 else if (call->fun->sec->linker_mark)
3694 {
3695 if (!copy_callee (&dummy_caller, call))
3696 goto err_exit;
3697 }
3698 while (pasty != NULL)
3699 {
3700 struct function_info *call_fun = pasty->fun;
3701 pasty = NULL;
3702 for (call = call_fun->call_list; call; call = call->next)
3703 if (call->is_pasted)
3704 {
3705 BFD_ASSERT (pasty == NULL);
3706 pasty = call;
3707 }
3708 else if (!copy_callee (&dummy_caller, call))
3709 goto err_exit;
3710 }
3711
3712 /* Calculate call stub size. */
3713 stub_size = 0;
3714 for (call = dummy_caller.call_list; call; call = call->next)
3715 {
3716 unsigned int k;
3717
3718 stub_size += OVL_STUB_SIZE;
3719 /* If the call is within this overlay, we won't need a
3720 stub. */
3721 for (k = base; k < i + 1; k++)
3722 if (call->fun->sec == ovly_sections[2 * k])
3723 {
3724 stub_size -= OVL_STUB_SIZE;
3725 break;
3726 }
3727 }
3728 if (tmp + stub_size > overlay_size)
3729 break;
3730
3731 size = tmp;
3732 }
3733
3734 if (i == base)
3735 {
3736 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3737 ovly_sections[2 * i]->owner,
3738 ovly_sections[2 * i],
3739 ovly_sections[2 * i + 1] ? " + rodata" : "");
3740 bfd_set_error (bfd_error_bad_value);
3741 goto err_exit;
3742 }
3743
3744 if (fprintf (script, " .ovly%d {\n", ++ovlynum) <= 0)
3745 goto file_err;
3746 for (j = base; j < i; j++)
3747 {
3748 asection *sec = ovly_sections[2 * j];
3749
97407faf
AM
3750 if (fprintf (script, " %s%c%s (%s)\n",
3751 (sec->owner->my_archive != NULL
3752 ? sec->owner->my_archive->filename : ""),
3753 info->path_separator,
3754 sec->owner->filename,
9dcc4794
AM
3755 sec->name) <= 0)
3756 goto file_err;
3757 if (sec->segment_mark)
3758 {
3759 struct call_info *call = find_pasted_call (sec);
3760 while (call != NULL)
3761 {
3762 struct function_info *call_fun = call->fun;
3763 sec = call_fun->sec;
97407faf
AM
3764 if (fprintf (script, " %s%c%s (%s)\n",
3765 (sec->owner->my_archive != NULL
3766 ? sec->owner->my_archive->filename : ""),
3767 info->path_separator,
3768 sec->owner->filename,
9dcc4794
AM
3769 sec->name) <= 0)
3770 goto file_err;
3771 for (call = call_fun->call_list; call; call = call->next)
3772 if (call->is_pasted)
3773 break;
3774 }
3775 }
3776 }
3777
3778 for (j = base; j < i; j++)
3779 {
3780 asection *sec = ovly_sections[2 * j + 1];
97407faf
AM
3781 if (sec != NULL
3782 && fprintf (script, " %s%c%s (%s)\n",
3783 (sec->owner->my_archive != NULL
3784 ? sec->owner->my_archive->filename : ""),
3785 info->path_separator,
3786 sec->owner->filename,
3787 sec->name) <= 0)
9dcc4794
AM
3788 goto file_err;
3789
3790 sec = ovly_sections[2 * j];
3791 if (sec->segment_mark)
3792 {
3793 struct call_info *call = find_pasted_call (sec);
3794 while (call != NULL)
3795 {
3796 struct function_info *call_fun = call->fun;
3797 sec = call_fun->rodata;
97407faf
AM
3798 if (sec != NULL
3799 && fprintf (script, " %s%c%s (%s)\n",
3800 (sec->owner->my_archive != NULL
3801 ? sec->owner->my_archive->filename : ""),
3802 info->path_separator,
3803 sec->owner->filename,
3804 sec->name) <= 0)
9dcc4794
AM
3805 goto file_err;
3806 for (call = call_fun->call_list; call; call = call->next)
3807 if (call->is_pasted)
3808 break;
3809 }
3810 }
3811 }
3812
3813 if (fprintf (script, " }\n") <= 0)
3814 goto file_err;
3815
3816 while (dummy_caller.call_list != NULL)
3817 {
3818 struct call_info *call = dummy_caller.call_list;
3819 dummy_caller.call_list = call->next;
3820 free (call);
3821 }
3822
3823 base = i;
3824 }
3825 free (ovly_sections);
3826
3827 if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3828 goto file_err;
3829 if (fclose (script) != 0)
3830 goto file_err;
3831
3832 if (htab->auto_overlay & AUTO_RELINK)
3833 htab->spu_elf_relink ();
3834
3835 xexit (0);
3836
3837 file_err:
3838 bfd_set_error (bfd_error_system_call);
3839 err_exit:
3840 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3841 xexit (1);
3842}
3843
49fa1e15
AM
3844/* Provide an estimate of total stack required. */
3845
3846static bfd_boolean
c65be8d7 3847spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
49fa1e15 3848{
055ed83b 3849 struct _sum_stack_param sum_stack_param;
49fa1e15 3850
c65be8d7 3851 if (!discover_functions (info))
49fa1e15
AM
3852 return FALSE;
3853
c65be8d7 3854 if (!build_call_tree (info))
49fa1e15
AM
3855 return FALSE;
3856
3857 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3858 info->callbacks->minfo (_("\nStack size for functions. "
3859 "Annotations: '*' max stack, 't' tail call\n"));
49fa1e15 3860
055ed83b
AM
3861 sum_stack_param.emit_stack_syms = emit_stack_syms;
3862 sum_stack_param.overall_stack = 0;
3863 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3864 return FALSE;
49fa1e15 3865
055ed83b
AM
3866 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3867 (bfd_vma) sum_stack_param.overall_stack);
49fa1e15
AM
3868 return TRUE;
3869}
3870
3871/* Perform a final link. */
3872
3873static bfd_boolean
3874spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3875{
3876 struct spu_link_hash_table *htab = spu_hash_table (info);
3877
9dcc4794
AM
3878 if (htab->auto_overlay)
3879 spu_elf_auto_overlay (info, htab->spu_elf_load_ovl_mgr);
3880
49fa1e15 3881 if (htab->stack_analysis
c65be8d7 3882 && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
49fa1e15
AM
3883 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3884
3885 return bfd_elf_final_link (output_bfd, info);
3886}
3887
ece5ef60
AM
3888/* Called when not normally emitting relocs, ie. !info->relocatable
3889 and !info->emitrelocations. Returns a count of special relocs
3890 that need to be emitted. */
3891
3892static unsigned int
3893spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
3894{
3895 unsigned int count = 0;
3896 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3897
3898 for (; relocs < relend; relocs++)
3899 {
3900 int r_type = ELF32_R_TYPE (relocs->r_info);
3901 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3902 ++count;
3903 }
3904
3905 return count;
3906}
3907
e9f53129
AM
3908/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3909
d16c7321 3910static int
e9f53129
AM
3911spu_elf_relocate_section (bfd *output_bfd,
3912 struct bfd_link_info *info,
3913 bfd *input_bfd,
3914 asection *input_section,
3915 bfd_byte *contents,
3916 Elf_Internal_Rela *relocs,
3917 Elf_Internal_Sym *local_syms,
3918 asection **local_sections)
3919{
3920 Elf_Internal_Shdr *symtab_hdr;
3921 struct elf_link_hash_entry **sym_hashes;
3922 Elf_Internal_Rela *rel, *relend;
3923 struct spu_link_hash_table *htab;
8374f9d4 3924 asection *ea = bfd_get_section_by_name (output_bfd, "._ea");
d16c7321 3925 int ret = TRUE;
ece5ef60 3926 bfd_boolean emit_these_relocs = FALSE;
cc5ca406 3927 bfd_boolean is_ea_sym;
fdba2fcd 3928 bfd_boolean stubs;
e9f53129 3929
e9f53129 3930 htab = spu_hash_table (info);
fdba2fcd
AM
3931 stubs = (htab->stub_sec != NULL
3932 && maybe_needs_stubs (input_section, output_bfd));
e9f53129
AM
3933 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3934 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3935
3936 rel = relocs;
3937 relend = relocs + input_section->reloc_count;
3938 for (; rel < relend; rel++)
3939 {
3940 int r_type;
3941 reloc_howto_type *howto;
8374f9d4 3942 unsigned int r_symndx;
e9f53129
AM
3943 Elf_Internal_Sym *sym;
3944 asection *sec;
3945 struct elf_link_hash_entry *h;
3946 const char *sym_name;
3947 bfd_vma relocation;
3948 bfd_vma addend;
3949 bfd_reloc_status_type r;
3950 bfd_boolean unresolved_reloc;
3951 bfd_boolean warned;
124b52c6 3952 enum _stub_type stub_type;
e9f53129
AM
3953
3954 r_symndx = ELF32_R_SYM (rel->r_info);
3955 r_type = ELF32_R_TYPE (rel->r_info);
3956 howto = elf_howto_table + r_type;
3957 unresolved_reloc = FALSE;
3958 warned = FALSE;
e9f53129
AM
3959 h = NULL;
3960 sym = NULL;
3961 sec = NULL;
3962 if (r_symndx < symtab_hdr->sh_info)
3963 {
3964 sym = local_syms + r_symndx;
3965 sec = local_sections[r_symndx];
3966 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
3967 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
3968 }
3969 else
3970 {
3971 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3972 r_symndx, symtab_hdr, sym_hashes,
3973 h, sec, relocation,
3974 unresolved_reloc, warned);
3975 sym_name = h->root.root.string;
3976 }
3977
ab96bf03
AM
3978 if (sec != NULL && elf_discarded_section (sec))
3979 {
3980 /* For relocs against symbols from removed linkonce sections,
3981 or sections discarded by a linker script, we just want the
3982 section contents zeroed. Avoid any special processing. */
3983 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
3984 rel->r_info = 0;
3985 rel->r_addend = 0;
3986 continue;
3987 }
3988
3989 if (info->relocatable)
3990 continue;
3991
cc5ca406
AM
3992 is_ea_sym = (ea != NULL
3993 && sec != NULL
3994 && sec->output_section == ea);
3995
8374f9d4
AM
3996 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3997 {
cc5ca406 3998 if (is_ea_sym)
8374f9d4
AM
3999 {
4000 /* ._ea is a special section that isn't allocated in SPU
4001 memory, but rather occupies space in PPU memory as
4002 part of an embedded ELF image. If this reloc is
4003 against a symbol defined in ._ea, then transform the
4004 reloc into an equivalent one without a symbol
4005 relative to the start of the ELF image. */
4006 rel->r_addend += (relocation
4007 - ea->vma
4008 + elf_section_data (ea)->this_hdr.sh_offset);
4009 rel->r_info = ELF32_R_INFO (0, r_type);
4010 }
4011 emit_these_relocs = TRUE;
4012 continue;
4013 }
4014
cc5ca406 4015 if (is_ea_sym)
8374f9d4
AM
4016 unresolved_reloc = TRUE;
4017
e9f53129
AM
4018 if (unresolved_reloc)
4019 {
4020 (*_bfd_error_handler)
4021 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4022 input_bfd,
4023 bfd_get_section_name (input_bfd, input_section),
4024 (long) rel->r_offset,
4025 howto->name,
4026 sym_name);
4027 ret = FALSE;
4028 }
4029
4030 /* If this symbol is in an overlay area, we may need to relocate
4031 to the overlay stub. */
4032 addend = rel->r_addend;
124b52c6
AM
4033 if (stubs
4034 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4035 contents, info)) != no_stub)
e9f53129 4036 {
124b52c6
AM
4037 unsigned int ovl = 0;
4038 struct got_entry *g, **head;
47f6dab9 4039
124b52c6
AM
4040 if (stub_type != nonovl_stub)
4041 ovl = (spu_elf_section_data (input_section->output_section)
4042 ->u.o.ovl_index);
5f5fb9ec 4043
124b52c6
AM
4044 if (h != NULL)
4045 head = &h->got.glist;
4046 else
4047 head = elf_local_got_ents (input_bfd) + r_symndx;
47f6dab9 4048
124b52c6
AM
4049 for (g = *head; g != NULL; g = g->next)
4050 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4051 break;
4052 if (g == NULL)
4053 abort ();
5f5fb9ec 4054
124b52c6
AM
4055 relocation = g->stub_addr;
4056 addend = 0;
e9f53129
AM
4057 }
4058
4059 r = _bfd_final_link_relocate (howto,
4060 input_bfd,
4061 input_section,
4062 contents,
4063 rel->r_offset, relocation, addend);
4064
4065 if (r != bfd_reloc_ok)
4066 {
4067 const char *msg = (const char *) 0;
4068
4069 switch (r)
4070 {
4071 case bfd_reloc_overflow:
4072 if (!((*info->callbacks->reloc_overflow)
4073 (info, (h ? &h->root : NULL), sym_name, howto->name,
4074 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4075 return FALSE;
4076 break;
4077
4078 case bfd_reloc_undefined:
4079 if (!((*info->callbacks->undefined_symbol)
4080 (info, sym_name, input_bfd, input_section,
4081 rel->r_offset, TRUE)))
4082 return FALSE;
4083 break;
4084
4085 case bfd_reloc_outofrange:
4086 msg = _("internal error: out of range error");
4087 goto common_error;
4088
4089 case bfd_reloc_notsupported:
4090 msg = _("internal error: unsupported relocation error");
4091 goto common_error;
4092
4093 case bfd_reloc_dangerous:
4094 msg = _("internal error: dangerous error");
4095 goto common_error;
4096
4097 default:
4098 msg = _("internal error: unknown error");
4099 /* fall through */
4100
4101 common_error:
d16c7321 4102 ret = FALSE;
e9f53129
AM
4103 if (!((*info->callbacks->warning)
4104 (info, msg, sym_name, input_bfd, input_section,
4105 rel->r_offset)))
4106 return FALSE;
4107 break;
4108 }
4109 }
4110 }
4111
ece5ef60
AM
4112 if (ret
4113 && emit_these_relocs
ece5ef60
AM
4114 && !info->emitrelocations)
4115 {
4116 Elf_Internal_Rela *wrel;
4117 Elf_Internal_Shdr *rel_hdr;
4118
4119 wrel = rel = relocs;
4120 relend = relocs + input_section->reloc_count;
4121 for (; rel < relend; rel++)
4122 {
4123 int r_type;
4124
4125 r_type = ELF32_R_TYPE (rel->r_info);
4126 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4127 *wrel++ = *rel;
4128 }
4129 input_section->reloc_count = wrel - relocs;
4130 /* Backflips for _bfd_elf_link_output_relocs. */
4131 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4132 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4133 ret = 2;
4134 }
4135
e9f53129
AM
4136 return ret;
4137}
4138
c1b2796f
AM
4139/* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4140
4141static bfd_boolean
4142spu_elf_output_symbol_hook (struct bfd_link_info *info,
4143 const char *sym_name ATTRIBUTE_UNUSED,
4144 Elf_Internal_Sym *sym,
4145 asection *sym_sec ATTRIBUTE_UNUSED,
4146 struct elf_link_hash_entry *h)
4147{
4148 struct spu_link_hash_table *htab = spu_hash_table (info);
4149
4150 if (!info->relocatable
47f6dab9 4151 && htab->stub_sec != NULL
c1b2796f
AM
4152 && h != NULL
4153 && (h->root.type == bfd_link_hash_defined
4154 || h->root.type == bfd_link_hash_defweak)
4155 && h->def_regular
4156 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4157 {
4a628337 4158 struct got_entry *g;
c1b2796f 4159
4a628337
AM
4160 for (g = h->got.glist; g != NULL; g = g->next)
4161 if (g->addend == 0 && g->ovl == 0)
4162 {
4163 sym->st_shndx = (_bfd_elf_section_from_bfd_section
4164 (htab->stub_sec[0]->output_section->owner,
4165 htab->stub_sec[0]->output_section));
4166 sym->st_value = g->stub_addr;
4167 break;
4168 }
c1b2796f
AM
4169 }
4170
4171 return TRUE;
4172}
4173
e9f53129
AM
4174static int spu_plugin = 0;
4175
4176void
4177spu_elf_plugin (int val)
4178{
4179 spu_plugin = val;
4180}
4181
4182/* Set ELF header e_type for plugins. */
4183
4184static void
4185spu_elf_post_process_headers (bfd *abfd,
4186 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4187{
4188 if (spu_plugin)
4189 {
4190 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4191
4192 i_ehdrp->e_type = ET_DYN;
4193 }
4194}
4195
4196/* We may add an extra PT_LOAD segment for .toe. We also need extra
4197 segments for overlays. */
4198
4199static int
4200spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4201{
4202 struct spu_link_hash_table *htab = spu_hash_table (info);
4203 int extra = htab->num_overlays;
4204 asection *sec;
4205
4206 if (extra)
4207 ++extra;
4208
4209 sec = bfd_get_section_by_name (abfd, ".toe");
4210 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4211 ++extra;
4212
4213 return extra;
4214}
4215
4216/* Remove .toe section from other PT_LOAD segments and put it in
4217 a segment of its own. Put overlays in separate segments too. */
4218
4219static bfd_boolean
4220spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4221{
4222 asection *toe, *s;
4223 struct elf_segment_map *m;
4224 unsigned int i;
4225
4226 if (info == NULL)
4227 return TRUE;
4228
4229 toe = bfd_get_section_by_name (abfd, ".toe");
4230 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4231 if (m->p_type == PT_LOAD && m->count > 1)
4232 for (i = 0; i < m->count; i++)
4233 if ((s = m->sections[i]) == toe
47f6dab9 4234 || spu_elf_section_data (s)->u.o.ovl_index != 0)
e9f53129
AM
4235 {
4236 struct elf_segment_map *m2;
4237 bfd_vma amt;
4238
4239 if (i + 1 < m->count)
4240 {
4241 amt = sizeof (struct elf_segment_map);
4242 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4243 m2 = bfd_zalloc (abfd, amt);
4244 if (m2 == NULL)
4245 return FALSE;
4246 m2->count = m->count - (i + 1);
4247 memcpy (m2->sections, m->sections + i + 1,
4248 m2->count * sizeof (m->sections[0]));
4249 m2->p_type = PT_LOAD;
4250 m2->next = m->next;
4251 m->next = m2;
4252 }
4253 m->count = 1;
4254 if (i != 0)
4255 {
4256 m->count = i;
4257 amt = sizeof (struct elf_segment_map);
4258 m2 = bfd_zalloc (abfd, amt);
4259 if (m2 == NULL)
4260 return FALSE;
4261 m2->p_type = PT_LOAD;
4262 m2->count = 1;
4263 m2->sections[0] = s;
4264 m2->next = m->next;
4265 m->next = m2;
4266 }
4267 break;
4268 }
4269
4270 return TRUE;
4271}
4272
7d3287cb
AM
4273/* Tweak the section type of .note.spu_name. */
4274
4275static bfd_boolean
4276spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4277 Elf_Internal_Shdr *hdr,
4278 asection *sec)
4279{
4280 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4281 hdr->sh_type = SHT_NOTE;
4282 return TRUE;
4283}
4284
e9f53129
AM
4285/* Tweak phdrs before writing them out. */
4286
4287static int
4288spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4289{
4290 const struct elf_backend_data *bed;
4291 struct elf_obj_tdata *tdata;
4292 Elf_Internal_Phdr *phdr, *last;
4293 struct spu_link_hash_table *htab;
4294 unsigned int count;
4295 unsigned int i;
4296
4297 if (info == NULL)
4298 return TRUE;
4299
4300 bed = get_elf_backend_data (abfd);
4301 tdata = elf_tdata (abfd);
4302 phdr = tdata->phdr;
4303 count = tdata->program_header_size / bed->s->sizeof_phdr;
4304 htab = spu_hash_table (info);
4305 if (htab->num_overlays != 0)
4306 {
4307 struct elf_segment_map *m;
4308 unsigned int o;
4309
4310 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4311 if (m->count != 0
47f6dab9 4312 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
e9f53129
AM
4313 {
4314 /* Mark this as an overlay header. */
4315 phdr[i].p_flags |= PF_OVERLAY;
4316
4317 if (htab->ovtab != NULL && htab->ovtab->size != 0)
4318 {
4319 bfd_byte *p = htab->ovtab->contents;
47f6dab9 4320 unsigned int off = o * 16 + 8;
e9f53129
AM
4321
4322 /* Write file_off into _ovly_table. */
4323 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4324 }
4325 }
4326 }
4327
4328 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4329 of 16. This should always be possible when using the standard
4330 linker scripts, but don't create overlapping segments if
4331 someone is playing games with linker scripts. */
4332 last = NULL;
4333 for (i = count; i-- != 0; )
4334 if (phdr[i].p_type == PT_LOAD)
4335 {
4336 unsigned adjust;
4337
4338 adjust = -phdr[i].p_filesz & 15;
4339 if (adjust != 0
4340 && last != NULL
4341 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4342 break;
4343
4344 adjust = -phdr[i].p_memsz & 15;
4345 if (adjust != 0
4346 && last != NULL
4347 && phdr[i].p_filesz != 0
4348 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4349 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4350 break;
4351
4352 if (phdr[i].p_filesz != 0)
4353 last = &phdr[i];
4354 }
4355
4356 if (i == (unsigned int) -1)
4357 for (i = count; i-- != 0; )
4358 if (phdr[i].p_type == PT_LOAD)
4359 {
4360 unsigned adjust;
4361
4362 adjust = -phdr[i].p_filesz & 15;
4363 phdr[i].p_filesz += adjust;
4364
4365 adjust = -phdr[i].p_memsz & 15;
4366 phdr[i].p_memsz += adjust;
4367 }
4368
4369 return TRUE;
4370}
4371
e9f53129
AM
4372#define TARGET_BIG_SYM bfd_elf32_spu_vec
4373#define TARGET_BIG_NAME "elf32-spu"
4374#define ELF_ARCH bfd_arch_spu
4375#define ELF_MACHINE_CODE EM_SPU
4376/* This matches the alignment need for DMA. */
4377#define ELF_MAXPAGESIZE 0x80
4378#define elf_backend_rela_normal 1
4379#define elf_backend_can_gc_sections 1
4380
4381#define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
157090f7 4382#define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
e9f53129 4383#define elf_info_to_howto spu_elf_info_to_howto
ece5ef60 4384#define elf_backend_count_relocs spu_elf_count_relocs
e9f53129
AM
4385#define elf_backend_relocate_section spu_elf_relocate_section
4386#define elf_backend_symbol_processing spu_elf_backend_symbol_processing
c1b2796f 4387#define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
124b52c6 4388#define elf_backend_object_p spu_elf_object_p
e9f53129
AM
4389#define bfd_elf32_new_section_hook spu_elf_new_section_hook
4390#define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
e9f53129
AM
4391
4392#define elf_backend_additional_program_headers spu_elf_additional_program_headers
4393#define elf_backend_modify_segment_map spu_elf_modify_segment_map
4394#define elf_backend_modify_program_headers spu_elf_modify_program_headers
4395#define elf_backend_post_process_headers spu_elf_post_process_headers
7d3287cb 4396#define elf_backend_fake_sections spu_elf_fake_sections
e9f53129 4397#define elf_backend_special_sections spu_elf_special_sections
49fa1e15 4398#define bfd_elf32_bfd_final_link spu_elf_final_link
e9f53129
AM
4399
4400#include "elf32-target.h"
This page took 0.316651 seconds and 4 git commands to generate.