Fix Aarch64 bug in warning filtering.
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright (C) 2006-2018 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* We use RELA style relocs. Don't define USE_REL. */
31
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
94 };
95
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 { NULL, 0, 0, 0, 0 }
100 };
101
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 {
105 switch (code)
106 {
107 default:
108 return (enum elf_spu_reloc_type) -1;
109 case BFD_RELOC_NONE:
110 return R_SPU_NONE;
111 case BFD_RELOC_SPU_IMM10W:
112 return R_SPU_ADDR10;
113 case BFD_RELOC_SPU_IMM16W:
114 return R_SPU_ADDR16;
115 case BFD_RELOC_SPU_LO16:
116 return R_SPU_ADDR16_LO;
117 case BFD_RELOC_SPU_HI16:
118 return R_SPU_ADDR16_HI;
119 case BFD_RELOC_SPU_IMM18:
120 return R_SPU_ADDR18;
121 case BFD_RELOC_SPU_PCREL16:
122 return R_SPU_REL16;
123 case BFD_RELOC_SPU_IMM7:
124 return R_SPU_ADDR7;
125 case BFD_RELOC_SPU_IMM8:
126 return R_SPU_NONE;
127 case BFD_RELOC_SPU_PCREL9a:
128 return R_SPU_REL9;
129 case BFD_RELOC_SPU_PCREL9b:
130 return R_SPU_REL9I;
131 case BFD_RELOC_SPU_IMM10:
132 return R_SPU_ADDR10I;
133 case BFD_RELOC_SPU_IMM16:
134 return R_SPU_ADDR16I;
135 case BFD_RELOC_32:
136 return R_SPU_ADDR32;
137 case BFD_RELOC_32_PCREL:
138 return R_SPU_REL32;
139 case BFD_RELOC_SPU_PPU32:
140 return R_SPU_PPU32;
141 case BFD_RELOC_SPU_PPU64:
142 return R_SPU_PPU64;
143 case BFD_RELOC_SPU_ADD_PIC:
144 return R_SPU_ADD_PIC;
145 }
146 }
147
148 static bfd_boolean
149 spu_elf_info_to_howto (bfd *abfd,
150 arelent *cache_ptr,
151 Elf_Internal_Rela *dst)
152 {
153 enum elf_spu_reloc_type r_type;
154
155 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
156 /* PR 17512: file: 90c2a92e. */
157 if (r_type >= R_SPU_max)
158 {
159 /* xgettext:c-format */
160 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
161 abfd, r_type);
162 bfd_set_error (bfd_error_bad_value);
163 return FALSE;
164 }
165 cache_ptr->howto = &elf_howto_table[(int) r_type];
166 return TRUE;
167 }
168
169 static reloc_howto_type *
170 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
171 bfd_reloc_code_real_type code)
172 {
173 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
174
175 if (r_type == (enum elf_spu_reloc_type) -1)
176 return NULL;
177
178 return elf_howto_table + r_type;
179 }
180
181 static reloc_howto_type *
182 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
183 const char *r_name)
184 {
185 unsigned int i;
186
187 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
188 if (elf_howto_table[i].name != NULL
189 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
190 return &elf_howto_table[i];
191
192 return NULL;
193 }
194
195 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
196
197 static bfd_reloc_status_type
198 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
199 void *data, asection *input_section,
200 bfd *output_bfd, char **error_message)
201 {
202 bfd_size_type octets;
203 bfd_vma val;
204 long insn;
205
206 /* If this is a relocatable link (output_bfd test tells us), just
207 call the generic function. Any adjustment will be done at final
208 link time. */
209 if (output_bfd != NULL)
210 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
211 input_section, output_bfd, error_message);
212
213 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
214 return bfd_reloc_outofrange;
215 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
216
217 /* Get symbol value. */
218 val = 0;
219 if (!bfd_is_com_section (symbol->section))
220 val = symbol->value;
221 if (symbol->section->output_section)
222 val += symbol->section->output_section->vma;
223
224 val += reloc_entry->addend;
225
226 /* Make it pc-relative. */
227 val -= input_section->output_section->vma + input_section->output_offset;
228
229 val >>= 2;
230 if (val + 256 >= 512)
231 return bfd_reloc_overflow;
232
233 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
234
235 /* Move two high bits of value to REL9I and REL9 position.
236 The mask will take care of selecting the right field. */
237 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
238 insn &= ~reloc_entry->howto->dst_mask;
239 insn |= val & reloc_entry->howto->dst_mask;
240 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
241 return bfd_reloc_ok;
242 }
243
244 static bfd_boolean
245 spu_elf_new_section_hook (bfd *abfd, asection *sec)
246 {
247 if (!sec->used_by_bfd)
248 {
249 struct _spu_elf_section_data *sdata;
250
251 sdata = bfd_zalloc (abfd, sizeof (*sdata));
252 if (sdata == NULL)
253 return FALSE;
254 sec->used_by_bfd = sdata;
255 }
256
257 return _bfd_elf_new_section_hook (abfd, sec);
258 }
259
260 /* Set up overlay info for executables. */
261
262 static bfd_boolean
263 spu_elf_object_p (bfd *abfd)
264 {
265 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
266 {
267 unsigned int i, num_ovl, num_buf;
268 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
269 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
270 Elf_Internal_Phdr *last_phdr = NULL;
271
272 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
273 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
274 {
275 unsigned int j;
276
277 ++num_ovl;
278 if (last_phdr == NULL
279 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
280 ++num_buf;
281 last_phdr = phdr;
282 for (j = 1; j < elf_numsections (abfd); j++)
283 {
284 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
285
286 if (ELF_SECTION_SIZE (shdr, phdr) != 0
287 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
288 {
289 asection *sec = shdr->bfd_section;
290 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
291 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
292 }
293 }
294 }
295 }
296 return TRUE;
297 }
298
299 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
300 strip --strip-unneeded will not remove them. */
301
302 static void
303 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
304 {
305 if (sym->name != NULL
306 && sym->section != bfd_abs_section_ptr
307 && strncmp (sym->name, "_EAR_", 5) == 0)
308 sym->flags |= BSF_KEEP;
309 }
310
311 /* SPU ELF linker hash table. */
312
313 struct spu_link_hash_table
314 {
315 struct elf_link_hash_table elf;
316
317 struct spu_elf_params *params;
318
319 /* Shortcuts to overlay sections. */
320 asection *ovtab;
321 asection *init;
322 asection *toe;
323 asection **ovl_sec;
324
325 /* Count of stubs in each overlay section. */
326 unsigned int *stub_count;
327
328 /* The stub section for each overlay section. */
329 asection **stub_sec;
330
331 struct elf_link_hash_entry *ovly_entry[2];
332
333 /* Number of overlay buffers. */
334 unsigned int num_buf;
335
336 /* Total number of overlays. */
337 unsigned int num_overlays;
338
339 /* For soft icache. */
340 unsigned int line_size_log2;
341 unsigned int num_lines_log2;
342 unsigned int fromelem_size_log2;
343
344 /* How much memory we have. */
345 unsigned int local_store;
346
347 /* Count of overlay stubs needed in non-overlay area. */
348 unsigned int non_ovly_stub;
349
350 /* Pointer to the fixup section */
351 asection *sfixup;
352
353 /* Set on error. */
354 unsigned int stub_err : 1;
355 };
356
357 /* Hijack the generic got fields for overlay stub accounting. */
358
359 struct got_entry
360 {
361 struct got_entry *next;
362 unsigned int ovl;
363 union {
364 bfd_vma addend;
365 bfd_vma br_addr;
366 };
367 bfd_vma stub_addr;
368 };
369
370 #define spu_hash_table(p) \
371 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
372 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
373
374 struct call_info
375 {
376 struct function_info *fun;
377 struct call_info *next;
378 unsigned int count;
379 unsigned int max_depth;
380 unsigned int is_tail : 1;
381 unsigned int is_pasted : 1;
382 unsigned int broken_cycle : 1;
383 unsigned int priority : 13;
384 };
385
386 struct function_info
387 {
388 /* List of functions called. Also branches to hot/cold part of
389 function. */
390 struct call_info *call_list;
391 /* For hot/cold part of function, point to owner. */
392 struct function_info *start;
393 /* Symbol at start of function. */
394 union {
395 Elf_Internal_Sym *sym;
396 struct elf_link_hash_entry *h;
397 } u;
398 /* Function section. */
399 asection *sec;
400 asection *rodata;
401 /* Where last called from, and number of sections called from. */
402 asection *last_caller;
403 unsigned int call_count;
404 /* Address range of (this part of) function. */
405 bfd_vma lo, hi;
406 /* Offset where we found a store of lr, or -1 if none found. */
407 bfd_vma lr_store;
408 /* Offset where we found the stack adjustment insn. */
409 bfd_vma sp_adjust;
410 /* Stack usage. */
411 int stack;
412 /* Distance from root of call tree. Tail and hot/cold branches
413 count as one deeper. We aren't counting stack frames here. */
414 unsigned int depth;
415 /* Set if global symbol. */
416 unsigned int global : 1;
417 /* Set if known to be start of function (as distinct from a hunk
418 in hot/cold section. */
419 unsigned int is_func : 1;
420 /* Set if not a root node. */
421 unsigned int non_root : 1;
422 /* Flags used during call tree traversal. It's cheaper to replicate
423 the visit flags than have one which needs clearing after a traversal. */
424 unsigned int visit1 : 1;
425 unsigned int visit2 : 1;
426 unsigned int marking : 1;
427 unsigned int visit3 : 1;
428 unsigned int visit4 : 1;
429 unsigned int visit5 : 1;
430 unsigned int visit6 : 1;
431 unsigned int visit7 : 1;
432 };
433
434 struct spu_elf_stack_info
435 {
436 int num_fun;
437 int max_fun;
438 /* Variable size array describing functions, one per contiguous
439 address range belonging to a function. */
440 struct function_info fun[1];
441 };
442
443 static struct function_info *find_function (asection *, bfd_vma,
444 struct bfd_link_info *);
445
446 /* Create a spu ELF linker hash table. */
447
448 static struct bfd_link_hash_table *
449 spu_elf_link_hash_table_create (bfd *abfd)
450 {
451 struct spu_link_hash_table *htab;
452
453 htab = bfd_zmalloc (sizeof (*htab));
454 if (htab == NULL)
455 return NULL;
456
457 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
458 _bfd_elf_link_hash_newfunc,
459 sizeof (struct elf_link_hash_entry),
460 SPU_ELF_DATA))
461 {
462 free (htab);
463 return NULL;
464 }
465
466 htab->elf.init_got_refcount.refcount = 0;
467 htab->elf.init_got_refcount.glist = NULL;
468 htab->elf.init_got_offset.offset = 0;
469 htab->elf.init_got_offset.glist = NULL;
470 return &htab->elf.root;
471 }
472
473 void
474 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
475 {
476 bfd_vma max_branch_log2;
477
478 struct spu_link_hash_table *htab = spu_hash_table (info);
479 htab->params = params;
480 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
481 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
482
483 /* For the software i-cache, we provide a "from" list whose size
484 is a power-of-two number of quadwords, big enough to hold one
485 byte per outgoing branch. Compute this number here. */
486 max_branch_log2 = bfd_log2 (htab->params->max_branch);
487 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
488 }
489
490 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
491 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
492 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
493
494 static bfd_boolean
495 get_sym_h (struct elf_link_hash_entry **hp,
496 Elf_Internal_Sym **symp,
497 asection **symsecp,
498 Elf_Internal_Sym **locsymsp,
499 unsigned long r_symndx,
500 bfd *ibfd)
501 {
502 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
503
504 if (r_symndx >= symtab_hdr->sh_info)
505 {
506 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
507 struct elf_link_hash_entry *h;
508
509 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
510 while (h->root.type == bfd_link_hash_indirect
511 || h->root.type == bfd_link_hash_warning)
512 h = (struct elf_link_hash_entry *) h->root.u.i.link;
513
514 if (hp != NULL)
515 *hp = h;
516
517 if (symp != NULL)
518 *symp = NULL;
519
520 if (symsecp != NULL)
521 {
522 asection *symsec = NULL;
523 if (h->root.type == bfd_link_hash_defined
524 || h->root.type == bfd_link_hash_defweak)
525 symsec = h->root.u.def.section;
526 *symsecp = symsec;
527 }
528 }
529 else
530 {
531 Elf_Internal_Sym *sym;
532 Elf_Internal_Sym *locsyms = *locsymsp;
533
534 if (locsyms == NULL)
535 {
536 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
537 if (locsyms == NULL)
538 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
539 symtab_hdr->sh_info,
540 0, NULL, NULL, NULL);
541 if (locsyms == NULL)
542 return FALSE;
543 *locsymsp = locsyms;
544 }
545 sym = locsyms + r_symndx;
546
547 if (hp != NULL)
548 *hp = NULL;
549
550 if (symp != NULL)
551 *symp = sym;
552
553 if (symsecp != NULL)
554 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
555 }
556
557 return TRUE;
558 }
559
560 /* Create the note section if not already present. This is done early so
561 that the linker maps the sections to the right place in the output. */
562
563 bfd_boolean
564 spu_elf_create_sections (struct bfd_link_info *info)
565 {
566 struct spu_link_hash_table *htab = spu_hash_table (info);
567 bfd *ibfd;
568
569 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
570 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
571 break;
572
573 if (ibfd == NULL)
574 {
575 /* Make SPU_PTNOTE_SPUNAME section. */
576 asection *s;
577 size_t name_len;
578 size_t size;
579 bfd_byte *data;
580 flagword flags;
581
582 ibfd = info->input_bfds;
583 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
584 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
585 if (s == NULL
586 || !bfd_set_section_alignment (ibfd, s, 4))
587 return FALSE;
588
589 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
590 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
591 size += (name_len + 3) & -4;
592
593 if (!bfd_set_section_size (ibfd, s, size))
594 return FALSE;
595
596 data = bfd_zalloc (ibfd, size);
597 if (data == NULL)
598 return FALSE;
599
600 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
601 bfd_put_32 (ibfd, name_len, data + 4);
602 bfd_put_32 (ibfd, 1, data + 8);
603 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
604 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
605 bfd_get_filename (info->output_bfd), name_len);
606 s->contents = data;
607 }
608
609 if (htab->params->emit_fixups)
610 {
611 asection *s;
612 flagword flags;
613
614 if (htab->elf.dynobj == NULL)
615 htab->elf.dynobj = ibfd;
616 ibfd = htab->elf.dynobj;
617 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
618 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
619 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
620 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
621 return FALSE;
622 htab->sfixup = s;
623 }
624
625 return TRUE;
626 }
627
628 /* qsort predicate to sort sections by vma. */
629
630 static int
631 sort_sections (const void *a, const void *b)
632 {
633 const asection *const *s1 = a;
634 const asection *const *s2 = b;
635 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
636
637 if (delta != 0)
638 return delta < 0 ? -1 : 1;
639
640 return (*s1)->index - (*s2)->index;
641 }
642
643 /* Identify overlays in the output bfd, and number them.
644 Returns 0 on error, 1 if no overlays, 2 if overlays. */
645
646 int
647 spu_elf_find_overlays (struct bfd_link_info *info)
648 {
649 struct spu_link_hash_table *htab = spu_hash_table (info);
650 asection **alloc_sec;
651 unsigned int i, n, ovl_index, num_buf;
652 asection *s;
653 bfd_vma ovl_end;
654 static const char *const entry_names[2][2] = {
655 { "__ovly_load", "__icache_br_handler" },
656 { "__ovly_return", "__icache_call_handler" }
657 };
658
659 if (info->output_bfd->section_count < 2)
660 return 1;
661
662 alloc_sec
663 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
664 if (alloc_sec == NULL)
665 return 0;
666
667 /* Pick out all the alloced sections. */
668 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
669 if ((s->flags & SEC_ALLOC) != 0
670 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
671 && s->size != 0)
672 alloc_sec[n++] = s;
673
674 if (n == 0)
675 {
676 free (alloc_sec);
677 return 1;
678 }
679
680 /* Sort them by vma. */
681 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
682
683 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
684 if (htab->params->ovly_flavour == ovly_soft_icache)
685 {
686 unsigned int prev_buf = 0, set_id = 0;
687
688 /* Look for an overlapping vma to find the first overlay section. */
689 bfd_vma vma_start = 0;
690
691 for (i = 1; i < n; i++)
692 {
693 s = alloc_sec[i];
694 if (s->vma < ovl_end)
695 {
696 asection *s0 = alloc_sec[i - 1];
697 vma_start = s0->vma;
698 ovl_end = (s0->vma
699 + ((bfd_vma) 1
700 << (htab->num_lines_log2 + htab->line_size_log2)));
701 --i;
702 break;
703 }
704 else
705 ovl_end = s->vma + s->size;
706 }
707
708 /* Now find any sections within the cache area. */
709 for (ovl_index = 0, num_buf = 0; i < n; i++)
710 {
711 s = alloc_sec[i];
712 if (s->vma >= ovl_end)
713 break;
714
715 /* A section in an overlay area called .ovl.init is not
716 an overlay, in the sense that it might be loaded in
717 by the overlay manager, but rather the initial
718 section contents for the overlay buffer. */
719 if (strncmp (s->name, ".ovl.init", 9) != 0)
720 {
721 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
722 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
723 prev_buf = num_buf;
724
725 if ((s->vma - vma_start) & (htab->params->line_size - 1))
726 {
727 info->callbacks->einfo (_("%X%P: overlay section %pA "
728 "does not start on a cache line\n"),
729 s);
730 bfd_set_error (bfd_error_bad_value);
731 return 0;
732 }
733 else if (s->size > htab->params->line_size)
734 {
735 info->callbacks->einfo (_("%X%P: overlay section %pA "
736 "is larger than a cache line\n"),
737 s);
738 bfd_set_error (bfd_error_bad_value);
739 return 0;
740 }
741
742 alloc_sec[ovl_index++] = s;
743 spu_elf_section_data (s)->u.o.ovl_index
744 = (set_id << htab->num_lines_log2) + num_buf;
745 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
746 }
747 }
748
749 /* Ensure there are no more overlay sections. */
750 for ( ; i < n; i++)
751 {
752 s = alloc_sec[i];
753 if (s->vma < ovl_end)
754 {
755 info->callbacks->einfo (_("%X%P: overlay section %pA "
756 "is not in cache area\n"),
757 alloc_sec[i-1]);
758 bfd_set_error (bfd_error_bad_value);
759 return 0;
760 }
761 else
762 ovl_end = s->vma + s->size;
763 }
764 }
765 else
766 {
767 /* Look for overlapping vmas. Any with overlap must be overlays.
768 Count them. Also count the number of overlay regions. */
769 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
770 {
771 s = alloc_sec[i];
772 if (s->vma < ovl_end)
773 {
774 asection *s0 = alloc_sec[i - 1];
775
776 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
777 {
778 ++num_buf;
779 if (strncmp (s0->name, ".ovl.init", 9) != 0)
780 {
781 alloc_sec[ovl_index] = s0;
782 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
783 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
784 }
785 else
786 ovl_end = s->vma + s->size;
787 }
788 if (strncmp (s->name, ".ovl.init", 9) != 0)
789 {
790 alloc_sec[ovl_index] = s;
791 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
792 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
793 if (s0->vma != s->vma)
794 {
795 /* xgettext:c-format */
796 info->callbacks->einfo (_("%X%P: overlay sections %pA "
797 "and %pA do not start at the "
798 "same address\n"),
799 s0, s);
800 bfd_set_error (bfd_error_bad_value);
801 return 0;
802 }
803 if (ovl_end < s->vma + s->size)
804 ovl_end = s->vma + s->size;
805 }
806 }
807 else
808 ovl_end = s->vma + s->size;
809 }
810 }
811
812 htab->num_overlays = ovl_index;
813 htab->num_buf = num_buf;
814 htab->ovl_sec = alloc_sec;
815
816 if (ovl_index == 0)
817 return 1;
818
819 for (i = 0; i < 2; i++)
820 {
821 const char *name;
822 struct elf_link_hash_entry *h;
823
824 name = entry_names[i][htab->params->ovly_flavour];
825 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
826 if (h == NULL)
827 return 0;
828
829 if (h->root.type == bfd_link_hash_new)
830 {
831 h->root.type = bfd_link_hash_undefined;
832 h->ref_regular = 1;
833 h->ref_regular_nonweak = 1;
834 h->non_elf = 0;
835 }
836 htab->ovly_entry[i] = h;
837 }
838
839 return 2;
840 }
841
842 /* Non-zero to use bra in overlay stubs rather than br. */
843 #define BRA_STUBS 0
844
845 #define BRA 0x30000000
846 #define BRASL 0x31000000
847 #define BR 0x32000000
848 #define BRSL 0x33000000
849 #define NOP 0x40200000
850 #define LNOP 0x00200000
851 #define ILA 0x42000000
852
853 /* Return true for all relative and absolute branch instructions.
854 bra 00110000 0..
855 brasl 00110001 0..
856 br 00110010 0..
857 brsl 00110011 0..
858 brz 00100000 0..
859 brnz 00100001 0..
860 brhz 00100010 0..
861 brhnz 00100011 0.. */
862
863 static bfd_boolean
864 is_branch (const unsigned char *insn)
865 {
866 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
867 }
868
869 /* Return true for all indirect branch instructions.
870 bi 00110101 000
871 bisl 00110101 001
872 iret 00110101 010
873 bisled 00110101 011
874 biz 00100101 000
875 binz 00100101 001
876 bihz 00100101 010
877 bihnz 00100101 011 */
878
879 static bfd_boolean
880 is_indirect_branch (const unsigned char *insn)
881 {
882 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
883 }
884
885 /* Return true for branch hint instructions.
886 hbra 0001000..
887 hbrr 0001001.. */
888
889 static bfd_boolean
890 is_hint (const unsigned char *insn)
891 {
892 return (insn[0] & 0xfc) == 0x10;
893 }
894
895 /* True if INPUT_SECTION might need overlay stubs. */
896
897 static bfd_boolean
898 maybe_needs_stubs (asection *input_section)
899 {
900 /* No stubs for debug sections and suchlike. */
901 if ((input_section->flags & SEC_ALLOC) == 0)
902 return FALSE;
903
904 /* No stubs for link-once sections that will be discarded. */
905 if (input_section->output_section == bfd_abs_section_ptr)
906 return FALSE;
907
908 /* Don't create stubs for .eh_frame references. */
909 if (strcmp (input_section->name, ".eh_frame") == 0)
910 return FALSE;
911
912 return TRUE;
913 }
914
915 enum _stub_type
916 {
917 no_stub,
918 call_ovl_stub,
919 br000_ovl_stub,
920 br001_ovl_stub,
921 br010_ovl_stub,
922 br011_ovl_stub,
923 br100_ovl_stub,
924 br101_ovl_stub,
925 br110_ovl_stub,
926 br111_ovl_stub,
927 nonovl_stub,
928 stub_error
929 };
930
931 /* Return non-zero if this reloc symbol should go via an overlay stub.
932 Return 2 if the stub must be in non-overlay area. */
933
934 static enum _stub_type
935 needs_ovl_stub (struct elf_link_hash_entry *h,
936 Elf_Internal_Sym *sym,
937 asection *sym_sec,
938 asection *input_section,
939 Elf_Internal_Rela *irela,
940 bfd_byte *contents,
941 struct bfd_link_info *info)
942 {
943 struct spu_link_hash_table *htab = spu_hash_table (info);
944 enum elf_spu_reloc_type r_type;
945 unsigned int sym_type;
946 bfd_boolean branch, hint, call;
947 enum _stub_type ret = no_stub;
948 bfd_byte insn[4];
949
950 if (sym_sec == NULL
951 || sym_sec->output_section == bfd_abs_section_ptr
952 || spu_elf_section_data (sym_sec->output_section) == NULL)
953 return ret;
954
955 if (h != NULL)
956 {
957 /* Ensure no stubs for user supplied overlay manager syms. */
958 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
959 return ret;
960
961 /* setjmp always goes via an overlay stub, because then the return
962 and hence the longjmp goes via __ovly_return. That magically
963 makes setjmp/longjmp between overlays work. */
964 if (strncmp (h->root.root.string, "setjmp", 6) == 0
965 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
966 ret = call_ovl_stub;
967 }
968
969 if (h != NULL)
970 sym_type = h->type;
971 else
972 sym_type = ELF_ST_TYPE (sym->st_info);
973
974 r_type = ELF32_R_TYPE (irela->r_info);
975 branch = FALSE;
976 hint = FALSE;
977 call = FALSE;
978 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
979 {
980 if (contents == NULL)
981 {
982 contents = insn;
983 if (!bfd_get_section_contents (input_section->owner,
984 input_section,
985 contents,
986 irela->r_offset, 4))
987 return stub_error;
988 }
989 else
990 contents += irela->r_offset;
991
992 branch = is_branch (contents);
993 hint = is_hint (contents);
994 if (branch || hint)
995 {
996 call = (contents[0] & 0xfd) == 0x31;
997 if (call
998 && sym_type != STT_FUNC
999 && contents != insn)
1000 {
1001 /* It's common for people to write assembly and forget
1002 to give function symbols the right type. Handle
1003 calls to such symbols, but warn so that (hopefully)
1004 people will fix their code. We need the symbol
1005 type to be correct to distinguish function pointer
1006 initialisation from other pointer initialisations. */
1007 const char *sym_name;
1008
1009 if (h != NULL)
1010 sym_name = h->root.root.string;
1011 else
1012 {
1013 Elf_Internal_Shdr *symtab_hdr;
1014 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1015 sym_name = bfd_elf_sym_name (input_section->owner,
1016 symtab_hdr,
1017 sym,
1018 sym_sec);
1019 }
1020 _bfd_error_handler
1021 /* xgettext:c-format */
1022 (_("warning: call to non-function symbol %s defined in %pB"),
1023 sym_name, sym_sec->owner);
1024
1025 }
1026 }
1027 }
1028
1029 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1030 || (sym_type != STT_FUNC
1031 && !(branch || hint)
1032 && (sym_sec->flags & SEC_CODE) == 0))
1033 return no_stub;
1034
1035 /* Usually, symbols in non-overlay sections don't need stubs. */
1036 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1037 && !htab->params->non_overlay_stubs)
1038 return ret;
1039
1040 /* A reference from some other section to a symbol in an overlay
1041 section needs a stub. */
1042 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1043 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1044 {
1045 unsigned int lrlive = 0;
1046 if (branch)
1047 lrlive = (contents[1] & 0x70) >> 4;
1048
1049 if (!lrlive && (call || sym_type == STT_FUNC))
1050 ret = call_ovl_stub;
1051 else
1052 ret = br000_ovl_stub + lrlive;
1053 }
1054
1055 /* If this insn isn't a branch then we are possibly taking the
1056 address of a function and passing it out somehow. Soft-icache code
1057 always generates inline code to do indirect branches. */
1058 if (!(branch || hint)
1059 && sym_type == STT_FUNC
1060 && htab->params->ovly_flavour != ovly_soft_icache)
1061 ret = nonovl_stub;
1062
1063 return ret;
1064 }
1065
1066 static bfd_boolean
1067 count_stub (struct spu_link_hash_table *htab,
1068 bfd *ibfd,
1069 asection *isec,
1070 enum _stub_type stub_type,
1071 struct elf_link_hash_entry *h,
1072 const Elf_Internal_Rela *irela)
1073 {
1074 unsigned int ovl = 0;
1075 struct got_entry *g, **head;
1076 bfd_vma addend;
1077
1078 /* If this instruction is a branch or call, we need a stub
1079 for it. One stub per function per overlay.
1080 If it isn't a branch, then we are taking the address of
1081 this function so need a stub in the non-overlay area
1082 for it. One stub per function. */
1083 if (stub_type != nonovl_stub)
1084 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1085
1086 if (h != NULL)
1087 head = &h->got.glist;
1088 else
1089 {
1090 if (elf_local_got_ents (ibfd) == NULL)
1091 {
1092 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1093 * sizeof (*elf_local_got_ents (ibfd)));
1094 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1095 if (elf_local_got_ents (ibfd) == NULL)
1096 return FALSE;
1097 }
1098 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1099 }
1100
1101 if (htab->params->ovly_flavour == ovly_soft_icache)
1102 {
1103 htab->stub_count[ovl] += 1;
1104 return TRUE;
1105 }
1106
1107 addend = 0;
1108 if (irela != NULL)
1109 addend = irela->r_addend;
1110
1111 if (ovl == 0)
1112 {
1113 struct got_entry *gnext;
1114
1115 for (g = *head; g != NULL; g = g->next)
1116 if (g->addend == addend && g->ovl == 0)
1117 break;
1118
1119 if (g == NULL)
1120 {
1121 /* Need a new non-overlay area stub. Zap other stubs. */
1122 for (g = *head; g != NULL; g = gnext)
1123 {
1124 gnext = g->next;
1125 if (g->addend == addend)
1126 {
1127 htab->stub_count[g->ovl] -= 1;
1128 free (g);
1129 }
1130 }
1131 }
1132 }
1133 else
1134 {
1135 for (g = *head; g != NULL; g = g->next)
1136 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1137 break;
1138 }
1139
1140 if (g == NULL)
1141 {
1142 g = bfd_malloc (sizeof *g);
1143 if (g == NULL)
1144 return FALSE;
1145 g->ovl = ovl;
1146 g->addend = addend;
1147 g->stub_addr = (bfd_vma) -1;
1148 g->next = *head;
1149 *head = g;
1150
1151 htab->stub_count[ovl] += 1;
1152 }
1153
1154 return TRUE;
1155 }
1156
1157 /* Support two sizes of overlay stubs, a slower more compact stub of two
1158 instructions, and a faster stub of four instructions.
1159 Soft-icache stubs are four or eight words. */
1160
1161 static unsigned int
1162 ovl_stub_size (struct spu_elf_params *params)
1163 {
1164 return 16 << params->ovly_flavour >> params->compact_stub;
1165 }
1166
1167 static unsigned int
1168 ovl_stub_size_log2 (struct spu_elf_params *params)
1169 {
1170 return 4 + params->ovly_flavour - params->compact_stub;
1171 }
1172
1173 /* Two instruction overlay stubs look like:
1174
1175 brsl $75,__ovly_load
1176 .word target_ovl_and_address
1177
1178 ovl_and_address is a word with the overlay number in the top 14 bits
1179 and local store address in the bottom 18 bits.
1180
1181 Four instruction overlay stubs look like:
1182
1183 ila $78,ovl_number
1184 lnop
1185 ila $79,target_address
1186 br __ovly_load
1187
1188 Software icache stubs are:
1189
1190 .word target_index
1191 .word target_ia;
1192 .word lrlive_branchlocalstoreaddr;
1193 brasl $75,__icache_br_handler
1194 .quad xor_pattern
1195 */
1196
1197 static bfd_boolean
1198 build_stub (struct bfd_link_info *info,
1199 bfd *ibfd,
1200 asection *isec,
1201 enum _stub_type stub_type,
1202 struct elf_link_hash_entry *h,
1203 const Elf_Internal_Rela *irela,
1204 bfd_vma dest,
1205 asection *dest_sec)
1206 {
1207 struct spu_link_hash_table *htab = spu_hash_table (info);
1208 unsigned int ovl, dest_ovl, set_id;
1209 struct got_entry *g, **head;
1210 asection *sec;
1211 bfd_vma addend, from, to, br_dest, patt;
1212 unsigned int lrlive;
1213
1214 ovl = 0;
1215 if (stub_type != nonovl_stub)
1216 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1217
1218 if (h != NULL)
1219 head = &h->got.glist;
1220 else
1221 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1222
1223 addend = 0;
1224 if (irela != NULL)
1225 addend = irela->r_addend;
1226
1227 if (htab->params->ovly_flavour == ovly_soft_icache)
1228 {
1229 g = bfd_malloc (sizeof *g);
1230 if (g == NULL)
1231 return FALSE;
1232 g->ovl = ovl;
1233 g->br_addr = 0;
1234 if (irela != NULL)
1235 g->br_addr = (irela->r_offset
1236 + isec->output_offset
1237 + isec->output_section->vma);
1238 g->next = *head;
1239 *head = g;
1240 }
1241 else
1242 {
1243 for (g = *head; g != NULL; g = g->next)
1244 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1245 break;
1246 if (g == NULL)
1247 abort ();
1248
1249 if (g->ovl == 0 && ovl != 0)
1250 return TRUE;
1251
1252 if (g->stub_addr != (bfd_vma) -1)
1253 return TRUE;
1254 }
1255
1256 sec = htab->stub_sec[ovl];
1257 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1258 from = sec->size + sec->output_offset + sec->output_section->vma;
1259 g->stub_addr = from;
1260 to = (htab->ovly_entry[0]->root.u.def.value
1261 + htab->ovly_entry[0]->root.u.def.section->output_offset
1262 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1263
1264 if (((dest | to | from) & 3) != 0)
1265 {
1266 htab->stub_err = 1;
1267 return FALSE;
1268 }
1269 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1270
1271 if (htab->params->ovly_flavour == ovly_normal
1272 && !htab->params->compact_stub)
1273 {
1274 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1275 sec->contents + sec->size);
1276 bfd_put_32 (sec->owner, LNOP,
1277 sec->contents + sec->size + 4);
1278 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1279 sec->contents + sec->size + 8);
1280 if (!BRA_STUBS)
1281 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1282 sec->contents + sec->size + 12);
1283 else
1284 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1285 sec->contents + sec->size + 12);
1286 }
1287 else if (htab->params->ovly_flavour == ovly_normal
1288 && htab->params->compact_stub)
1289 {
1290 if (!BRA_STUBS)
1291 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1292 sec->contents + sec->size);
1293 else
1294 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1295 sec->contents + sec->size);
1296 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1297 sec->contents + sec->size + 4);
1298 }
1299 else if (htab->params->ovly_flavour == ovly_soft_icache
1300 && htab->params->compact_stub)
1301 {
1302 lrlive = 0;
1303 if (stub_type == nonovl_stub)
1304 ;
1305 else if (stub_type == call_ovl_stub)
1306 /* A brsl makes lr live and *(*sp+16) is live.
1307 Tail calls have the same liveness. */
1308 lrlive = 5;
1309 else if (!htab->params->lrlive_analysis)
1310 /* Assume stack frame and lr save. */
1311 lrlive = 1;
1312 else if (irela != NULL)
1313 {
1314 /* Analyse branch instructions. */
1315 struct function_info *caller;
1316 bfd_vma off;
1317
1318 caller = find_function (isec, irela->r_offset, info);
1319 if (caller->start == NULL)
1320 off = irela->r_offset;
1321 else
1322 {
1323 struct function_info *found = NULL;
1324
1325 /* Find the earliest piece of this function that
1326 has frame adjusting instructions. We might
1327 see dynamic frame adjustment (eg. for alloca)
1328 in some later piece, but functions using
1329 alloca always set up a frame earlier. Frame
1330 setup instructions are always in one piece. */
1331 if (caller->lr_store != (bfd_vma) -1
1332 || caller->sp_adjust != (bfd_vma) -1)
1333 found = caller;
1334 while (caller->start != NULL)
1335 {
1336 caller = caller->start;
1337 if (caller->lr_store != (bfd_vma) -1
1338 || caller->sp_adjust != (bfd_vma) -1)
1339 found = caller;
1340 }
1341 if (found != NULL)
1342 caller = found;
1343 off = (bfd_vma) -1;
1344 }
1345
1346 if (off > caller->sp_adjust)
1347 {
1348 if (off > caller->lr_store)
1349 /* Only *(*sp+16) is live. */
1350 lrlive = 1;
1351 else
1352 /* If no lr save, then we must be in a
1353 leaf function with a frame.
1354 lr is still live. */
1355 lrlive = 4;
1356 }
1357 else if (off > caller->lr_store)
1358 {
1359 /* Between lr save and stack adjust. */
1360 lrlive = 3;
1361 /* This should never happen since prologues won't
1362 be split here. */
1363 BFD_ASSERT (0);
1364 }
1365 else
1366 /* On entry to function. */
1367 lrlive = 5;
1368
1369 if (stub_type != br000_ovl_stub
1370 && lrlive != stub_type - br000_ovl_stub)
1371 /* xgettext:c-format */
1372 info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1373 "from analysis (%u)\n"),
1374 isec, irela->r_offset, lrlive,
1375 stub_type - br000_ovl_stub);
1376 }
1377
1378 /* If given lrlive info via .brinfo, use it. */
1379 if (stub_type > br000_ovl_stub)
1380 lrlive = stub_type - br000_ovl_stub;
1381
1382 if (ovl == 0)
1383 to = (htab->ovly_entry[1]->root.u.def.value
1384 + htab->ovly_entry[1]->root.u.def.section->output_offset
1385 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1386
1387 /* The branch that uses this stub goes to stub_addr + 4. We'll
1388 set up an xor pattern that can be used by the icache manager
1389 to modify this branch to go directly to its destination. */
1390 g->stub_addr += 4;
1391 br_dest = g->stub_addr;
1392 if (irela == NULL)
1393 {
1394 /* Except in the case of _SPUEAR_ stubs, the branch in
1395 question is the one in the stub itself. */
1396 BFD_ASSERT (stub_type == nonovl_stub);
1397 g->br_addr = g->stub_addr;
1398 br_dest = to;
1399 }
1400
1401 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1402 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1403 sec->contents + sec->size);
1404 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1405 sec->contents + sec->size + 4);
1406 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1407 sec->contents + sec->size + 8);
1408 patt = dest ^ br_dest;
1409 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1410 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1411 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1412 sec->contents + sec->size + 12);
1413
1414 if (ovl == 0)
1415 /* Extra space for linked list entries. */
1416 sec->size += 16;
1417 }
1418 else
1419 abort ();
1420
1421 sec->size += ovl_stub_size (htab->params);
1422
1423 if (htab->params->emit_stub_syms)
1424 {
1425 size_t len;
1426 char *name;
1427 int add;
1428
1429 len = 8 + sizeof (".ovl_call.") - 1;
1430 if (h != NULL)
1431 len += strlen (h->root.root.string);
1432 else
1433 len += 8 + 1 + 8;
1434 add = 0;
1435 if (irela != NULL)
1436 add = (int) irela->r_addend & 0xffffffff;
1437 if (add != 0)
1438 len += 1 + 8;
1439 name = bfd_malloc (len + 1);
1440 if (name == NULL)
1441 return FALSE;
1442
1443 sprintf (name, "%08x.ovl_call.", g->ovl);
1444 if (h != NULL)
1445 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1446 else
1447 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1448 dest_sec->id & 0xffffffff,
1449 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1450 if (add != 0)
1451 sprintf (name + len - 9, "+%x", add);
1452
1453 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1454 free (name);
1455 if (h == NULL)
1456 return FALSE;
1457 if (h->root.type == bfd_link_hash_new)
1458 {
1459 h->root.type = bfd_link_hash_defined;
1460 h->root.u.def.section = sec;
1461 h->size = ovl_stub_size (htab->params);
1462 h->root.u.def.value = sec->size - h->size;
1463 h->type = STT_FUNC;
1464 h->ref_regular = 1;
1465 h->def_regular = 1;
1466 h->ref_regular_nonweak = 1;
1467 h->forced_local = 1;
1468 h->non_elf = 0;
1469 }
1470 }
1471
1472 return TRUE;
1473 }
1474
1475 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1476 symbols. */
1477
1478 static bfd_boolean
1479 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1480 {
1481 /* Symbols starting with _SPUEAR_ need a stub because they may be
1482 invoked by the PPU. */
1483 struct bfd_link_info *info = inf;
1484 struct spu_link_hash_table *htab = spu_hash_table (info);
1485 asection *sym_sec;
1486
1487 if ((h->root.type == bfd_link_hash_defined
1488 || h->root.type == bfd_link_hash_defweak)
1489 && h->def_regular
1490 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1491 && (sym_sec = h->root.u.def.section) != NULL
1492 && sym_sec->output_section != bfd_abs_section_ptr
1493 && spu_elf_section_data (sym_sec->output_section) != NULL
1494 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1495 || htab->params->non_overlay_stubs))
1496 {
1497 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1498 }
1499
1500 return TRUE;
1501 }
1502
1503 static bfd_boolean
1504 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1505 {
1506 /* Symbols starting with _SPUEAR_ need a stub because they may be
1507 invoked by the PPU. */
1508 struct bfd_link_info *info = inf;
1509 struct spu_link_hash_table *htab = spu_hash_table (info);
1510 asection *sym_sec;
1511
1512 if ((h->root.type == bfd_link_hash_defined
1513 || h->root.type == bfd_link_hash_defweak)
1514 && h->def_regular
1515 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1516 && (sym_sec = h->root.u.def.section) != NULL
1517 && sym_sec->output_section != bfd_abs_section_ptr
1518 && spu_elf_section_data (sym_sec->output_section) != NULL
1519 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1520 || htab->params->non_overlay_stubs))
1521 {
1522 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1523 h->root.u.def.value, sym_sec);
1524 }
1525
1526 return TRUE;
1527 }
1528
1529 /* Size or build stubs. */
1530
1531 static bfd_boolean
1532 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1533 {
1534 struct spu_link_hash_table *htab = spu_hash_table (info);
1535 bfd *ibfd;
1536
1537 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1538 {
1539 extern const bfd_target spu_elf32_vec;
1540 Elf_Internal_Shdr *symtab_hdr;
1541 asection *isec;
1542 Elf_Internal_Sym *local_syms = NULL;
1543
1544 if (ibfd->xvec != &spu_elf32_vec)
1545 continue;
1546
1547 /* We'll need the symbol table in a second. */
1548 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1549 if (symtab_hdr->sh_info == 0)
1550 continue;
1551
1552 /* Walk over each section attached to the input bfd. */
1553 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1554 {
1555 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1556
1557 /* If there aren't any relocs, then there's nothing more to do. */
1558 if ((isec->flags & SEC_RELOC) == 0
1559 || isec->reloc_count == 0)
1560 continue;
1561
1562 if (!maybe_needs_stubs (isec))
1563 continue;
1564
1565 /* Get the relocs. */
1566 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1567 info->keep_memory);
1568 if (internal_relocs == NULL)
1569 goto error_ret_free_local;
1570
1571 /* Now examine each relocation. */
1572 irela = internal_relocs;
1573 irelaend = irela + isec->reloc_count;
1574 for (; irela < irelaend; irela++)
1575 {
1576 enum elf_spu_reloc_type r_type;
1577 unsigned int r_indx;
1578 asection *sym_sec;
1579 Elf_Internal_Sym *sym;
1580 struct elf_link_hash_entry *h;
1581 enum _stub_type stub_type;
1582
1583 r_type = ELF32_R_TYPE (irela->r_info);
1584 r_indx = ELF32_R_SYM (irela->r_info);
1585
1586 if (r_type >= R_SPU_max)
1587 {
1588 bfd_set_error (bfd_error_bad_value);
1589 error_ret_free_internal:
1590 if (elf_section_data (isec)->relocs != internal_relocs)
1591 free (internal_relocs);
1592 error_ret_free_local:
1593 if (local_syms != NULL
1594 && (symtab_hdr->contents
1595 != (unsigned char *) local_syms))
1596 free (local_syms);
1597 return FALSE;
1598 }
1599
1600 /* Determine the reloc target section. */
1601 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1602 goto error_ret_free_internal;
1603
1604 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1605 NULL, info);
1606 if (stub_type == no_stub)
1607 continue;
1608 else if (stub_type == stub_error)
1609 goto error_ret_free_internal;
1610
1611 if (htab->stub_count == NULL)
1612 {
1613 bfd_size_type amt;
1614 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1615 htab->stub_count = bfd_zmalloc (amt);
1616 if (htab->stub_count == NULL)
1617 goto error_ret_free_internal;
1618 }
1619
1620 if (!build)
1621 {
1622 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1623 goto error_ret_free_internal;
1624 }
1625 else
1626 {
1627 bfd_vma dest;
1628
1629 if (h != NULL)
1630 dest = h->root.u.def.value;
1631 else
1632 dest = sym->st_value;
1633 dest += irela->r_addend;
1634 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1635 dest, sym_sec))
1636 goto error_ret_free_internal;
1637 }
1638 }
1639
1640 /* We're done with the internal relocs, free them. */
1641 if (elf_section_data (isec)->relocs != internal_relocs)
1642 free (internal_relocs);
1643 }
1644
1645 if (local_syms != NULL
1646 && symtab_hdr->contents != (unsigned char *) local_syms)
1647 {
1648 if (!info->keep_memory)
1649 free (local_syms);
1650 else
1651 symtab_hdr->contents = (unsigned char *) local_syms;
1652 }
1653 }
1654
1655 return TRUE;
1656 }
1657
1658 /* Allocate space for overlay call and return stubs.
1659 Return 0 on error, 1 if no overlays, 2 otherwise. */
1660
1661 int
1662 spu_elf_size_stubs (struct bfd_link_info *info)
1663 {
1664 struct spu_link_hash_table *htab;
1665 bfd *ibfd;
1666 bfd_size_type amt;
1667 flagword flags;
1668 unsigned int i;
1669 asection *stub;
1670
1671 if (!process_stubs (info, FALSE))
1672 return 0;
1673
1674 htab = spu_hash_table (info);
1675 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1676 if (htab->stub_err)
1677 return 0;
1678
1679 ibfd = info->input_bfds;
1680 if (htab->stub_count != NULL)
1681 {
1682 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1683 htab->stub_sec = bfd_zmalloc (amt);
1684 if (htab->stub_sec == NULL)
1685 return 0;
1686
1687 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1688 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1689 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1690 htab->stub_sec[0] = stub;
1691 if (stub == NULL
1692 || !bfd_set_section_alignment (ibfd, stub,
1693 ovl_stub_size_log2 (htab->params)))
1694 return 0;
1695 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1696 if (htab->params->ovly_flavour == ovly_soft_icache)
1697 /* Extra space for linked list entries. */
1698 stub->size += htab->stub_count[0] * 16;
1699
1700 for (i = 0; i < htab->num_overlays; ++i)
1701 {
1702 asection *osec = htab->ovl_sec[i];
1703 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1704 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1705 htab->stub_sec[ovl] = stub;
1706 if (stub == NULL
1707 || !bfd_set_section_alignment (ibfd, stub,
1708 ovl_stub_size_log2 (htab->params)))
1709 return 0;
1710 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1711 }
1712 }
1713
1714 if (htab->params->ovly_flavour == ovly_soft_icache)
1715 {
1716 /* Space for icache manager tables.
1717 a) Tag array, one quadword per cache line.
1718 b) Rewrite "to" list, one quadword per cache line.
1719 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1720 a power-of-two number of full quadwords) per cache line. */
1721
1722 flags = SEC_ALLOC;
1723 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1724 if (htab->ovtab == NULL
1725 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1726 return 0;
1727
1728 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1729 << htab->num_lines_log2;
1730
1731 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1732 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1733 if (htab->init == NULL
1734 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1735 return 0;
1736
1737 htab->init->size = 16;
1738 }
1739 else if (htab->stub_count == NULL)
1740 return 1;
1741 else
1742 {
1743 /* htab->ovtab consists of two arrays.
1744 . struct {
1745 . u32 vma;
1746 . u32 size;
1747 . u32 file_off;
1748 . u32 buf;
1749 . } _ovly_table[];
1750 .
1751 . struct {
1752 . u32 mapped;
1753 . } _ovly_buf_table[];
1754 . */
1755
1756 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1757 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1758 if (htab->ovtab == NULL
1759 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1760 return 0;
1761
1762 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1763 }
1764
1765 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1766 if (htab->toe == NULL
1767 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1768 return 0;
1769 htab->toe->size = 16;
1770
1771 return 2;
1772 }
1773
1774 /* Called from ld to place overlay manager data sections. This is done
1775 after the overlay manager itself is loaded, mainly so that the
1776 linker's htab->init section is placed after any other .ovl.init
1777 sections. */
1778
1779 void
1780 spu_elf_place_overlay_data (struct bfd_link_info *info)
1781 {
1782 struct spu_link_hash_table *htab = spu_hash_table (info);
1783 unsigned int i;
1784
1785 if (htab->stub_sec != NULL)
1786 {
1787 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1788
1789 for (i = 0; i < htab->num_overlays; ++i)
1790 {
1791 asection *osec = htab->ovl_sec[i];
1792 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1793 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1794 }
1795 }
1796
1797 if (htab->params->ovly_flavour == ovly_soft_icache)
1798 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1799
1800 if (htab->ovtab != NULL)
1801 {
1802 const char *ovout = ".data";
1803 if (htab->params->ovly_flavour == ovly_soft_icache)
1804 ovout = ".bss";
1805 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1806 }
1807
1808 if (htab->toe != NULL)
1809 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1810 }
1811
1812 /* Functions to handle embedded spu_ovl.o object. */
1813
1814 static void *
1815 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1816 {
1817 return stream;
1818 }
1819
1820 static file_ptr
1821 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1822 void *stream,
1823 void *buf,
1824 file_ptr nbytes,
1825 file_ptr offset)
1826 {
1827 struct _ovl_stream *os;
1828 size_t count;
1829 size_t max;
1830
1831 os = (struct _ovl_stream *) stream;
1832 max = (const char *) os->end - (const char *) os->start;
1833
1834 if ((ufile_ptr) offset >= max)
1835 return 0;
1836
1837 count = nbytes;
1838 if (count > max - offset)
1839 count = max - offset;
1840
1841 memcpy (buf, (const char *) os->start + offset, count);
1842 return count;
1843 }
1844
1845 static int
1846 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1847 void *stream,
1848 struct stat *sb)
1849 {
1850 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1851
1852 memset (sb, 0, sizeof (*sb));
1853 sb->st_size = (const char *) os->end - (const char *) os->start;
1854 return 0;
1855 }
1856
1857 bfd_boolean
1858 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1859 {
1860 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1861 "elf32-spu",
1862 ovl_mgr_open,
1863 (void *) stream,
1864 ovl_mgr_pread,
1865 NULL,
1866 ovl_mgr_stat);
1867 return *ovl_bfd != NULL;
1868 }
1869
1870 static unsigned int
1871 overlay_index (asection *sec)
1872 {
1873 if (sec == NULL
1874 || sec->output_section == bfd_abs_section_ptr)
1875 return 0;
1876 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1877 }
1878
1879 /* Define an STT_OBJECT symbol. */
1880
1881 static struct elf_link_hash_entry *
1882 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1883 {
1884 struct elf_link_hash_entry *h;
1885
1886 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1887 if (h == NULL)
1888 return NULL;
1889
1890 if (h->root.type != bfd_link_hash_defined
1891 || !h->def_regular)
1892 {
1893 h->root.type = bfd_link_hash_defined;
1894 h->root.u.def.section = htab->ovtab;
1895 h->type = STT_OBJECT;
1896 h->ref_regular = 1;
1897 h->def_regular = 1;
1898 h->ref_regular_nonweak = 1;
1899 h->non_elf = 0;
1900 }
1901 else if (h->root.u.def.section->owner != NULL)
1902 {
1903 /* xgettext:c-format */
1904 _bfd_error_handler (_("%pB is not allowed to define %s"),
1905 h->root.u.def.section->owner,
1906 h->root.root.string);
1907 bfd_set_error (bfd_error_bad_value);
1908 return NULL;
1909 }
1910 else
1911 {
1912 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1913 h->root.root.string);
1914 bfd_set_error (bfd_error_bad_value);
1915 return NULL;
1916 }
1917
1918 return h;
1919 }
1920
1921 /* Fill in all stubs and the overlay tables. */
1922
1923 static bfd_boolean
1924 spu_elf_build_stubs (struct bfd_link_info *info)
1925 {
1926 struct spu_link_hash_table *htab = spu_hash_table (info);
1927 struct elf_link_hash_entry *h;
1928 bfd_byte *p;
1929 asection *s;
1930 bfd *obfd;
1931 unsigned int i;
1932
1933 if (htab->num_overlays != 0)
1934 {
1935 for (i = 0; i < 2; i++)
1936 {
1937 h = htab->ovly_entry[i];
1938 if (h != NULL
1939 && (h->root.type == bfd_link_hash_defined
1940 || h->root.type == bfd_link_hash_defweak)
1941 && h->def_regular)
1942 {
1943 s = h->root.u.def.section->output_section;
1944 if (spu_elf_section_data (s)->u.o.ovl_index)
1945 {
1946 _bfd_error_handler (_("%s in overlay section"),
1947 h->root.root.string);
1948 bfd_set_error (bfd_error_bad_value);
1949 return FALSE;
1950 }
1951 }
1952 }
1953 }
1954
1955 if (htab->stub_sec != NULL)
1956 {
1957 for (i = 0; i <= htab->num_overlays; i++)
1958 if (htab->stub_sec[i]->size != 0)
1959 {
1960 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1961 htab->stub_sec[i]->size);
1962 if (htab->stub_sec[i]->contents == NULL)
1963 return FALSE;
1964 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1965 htab->stub_sec[i]->size = 0;
1966 }
1967
1968 /* Fill in all the stubs. */
1969 process_stubs (info, TRUE);
1970 if (!htab->stub_err)
1971 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1972
1973 if (htab->stub_err)
1974 {
1975 _bfd_error_handler (_("overlay stub relocation overflow"));
1976 bfd_set_error (bfd_error_bad_value);
1977 return FALSE;
1978 }
1979
1980 for (i = 0; i <= htab->num_overlays; i++)
1981 {
1982 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1983 {
1984 _bfd_error_handler (_("stubs don't match calculated size"));
1985 bfd_set_error (bfd_error_bad_value);
1986 return FALSE;
1987 }
1988 htab->stub_sec[i]->rawsize = 0;
1989 }
1990 }
1991
1992 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1993 return TRUE;
1994
1995 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1996 if (htab->ovtab->contents == NULL)
1997 return FALSE;
1998
1999 p = htab->ovtab->contents;
2000 if (htab->params->ovly_flavour == ovly_soft_icache)
2001 {
2002 bfd_vma off;
2003
2004 h = define_ovtab_symbol (htab, "__icache_tag_array");
2005 if (h == NULL)
2006 return FALSE;
2007 h->root.u.def.value = 0;
2008 h->size = 16 << htab->num_lines_log2;
2009 off = h->size;
2010
2011 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2012 if (h == NULL)
2013 return FALSE;
2014 h->root.u.def.value = 16 << htab->num_lines_log2;
2015 h->root.u.def.section = bfd_abs_section_ptr;
2016
2017 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2018 if (h == NULL)
2019 return FALSE;
2020 h->root.u.def.value = off;
2021 h->size = 16 << htab->num_lines_log2;
2022 off += h->size;
2023
2024 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2025 if (h == NULL)
2026 return FALSE;
2027 h->root.u.def.value = 16 << htab->num_lines_log2;
2028 h->root.u.def.section = bfd_abs_section_ptr;
2029
2030 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2031 if (h == NULL)
2032 return FALSE;
2033 h->root.u.def.value = off;
2034 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2035 off += h->size;
2036
2037 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2038 if (h == NULL)
2039 return FALSE;
2040 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2041 + htab->num_lines_log2);
2042 h->root.u.def.section = bfd_abs_section_ptr;
2043
2044 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2045 if (h == NULL)
2046 return FALSE;
2047 h->root.u.def.value = htab->fromelem_size_log2;
2048 h->root.u.def.section = bfd_abs_section_ptr;
2049
2050 h = define_ovtab_symbol (htab, "__icache_base");
2051 if (h == NULL)
2052 return FALSE;
2053 h->root.u.def.value = htab->ovl_sec[0]->vma;
2054 h->root.u.def.section = bfd_abs_section_ptr;
2055 h->size = htab->num_buf << htab->line_size_log2;
2056
2057 h = define_ovtab_symbol (htab, "__icache_linesize");
2058 if (h == NULL)
2059 return FALSE;
2060 h->root.u.def.value = 1 << htab->line_size_log2;
2061 h->root.u.def.section = bfd_abs_section_ptr;
2062
2063 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2064 if (h == NULL)
2065 return FALSE;
2066 h->root.u.def.value = htab->line_size_log2;
2067 h->root.u.def.section = bfd_abs_section_ptr;
2068
2069 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2070 if (h == NULL)
2071 return FALSE;
2072 h->root.u.def.value = -htab->line_size_log2;
2073 h->root.u.def.section = bfd_abs_section_ptr;
2074
2075 h = define_ovtab_symbol (htab, "__icache_cachesize");
2076 if (h == NULL)
2077 return FALSE;
2078 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2079 h->root.u.def.section = bfd_abs_section_ptr;
2080
2081 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2082 if (h == NULL)
2083 return FALSE;
2084 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2085 h->root.u.def.section = bfd_abs_section_ptr;
2086
2087 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2088 if (h == NULL)
2089 return FALSE;
2090 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2091 h->root.u.def.section = bfd_abs_section_ptr;
2092
2093 if (htab->init != NULL && htab->init->size != 0)
2094 {
2095 htab->init->contents = bfd_zalloc (htab->init->owner,
2096 htab->init->size);
2097 if (htab->init->contents == NULL)
2098 return FALSE;
2099
2100 h = define_ovtab_symbol (htab, "__icache_fileoff");
2101 if (h == NULL)
2102 return FALSE;
2103 h->root.u.def.value = 0;
2104 h->root.u.def.section = htab->init;
2105 h->size = 8;
2106 }
2107 }
2108 else
2109 {
2110 /* Write out _ovly_table. */
2111 /* set low bit of .size to mark non-overlay area as present. */
2112 p[7] = 1;
2113 obfd = htab->ovtab->output_section->owner;
2114 for (s = obfd->sections; s != NULL; s = s->next)
2115 {
2116 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2117
2118 if (ovl_index != 0)
2119 {
2120 unsigned long off = ovl_index * 16;
2121 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2122
2123 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2124 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2125 p + off + 4);
2126 /* file_off written later in spu_elf_modify_program_headers. */
2127 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2128 }
2129 }
2130
2131 h = define_ovtab_symbol (htab, "_ovly_table");
2132 if (h == NULL)
2133 return FALSE;
2134 h->root.u.def.value = 16;
2135 h->size = htab->num_overlays * 16;
2136
2137 h = define_ovtab_symbol (htab, "_ovly_table_end");
2138 if (h == NULL)
2139 return FALSE;
2140 h->root.u.def.value = htab->num_overlays * 16 + 16;
2141 h->size = 0;
2142
2143 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2144 if (h == NULL)
2145 return FALSE;
2146 h->root.u.def.value = htab->num_overlays * 16 + 16;
2147 h->size = htab->num_buf * 4;
2148
2149 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2150 if (h == NULL)
2151 return FALSE;
2152 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2153 h->size = 0;
2154 }
2155
2156 h = define_ovtab_symbol (htab, "_EAR_");
2157 if (h == NULL)
2158 return FALSE;
2159 h->root.u.def.section = htab->toe;
2160 h->root.u.def.value = 0;
2161 h->size = 16;
2162
2163 return TRUE;
2164 }
2165
2166 /* Check that all loadable section VMAs lie in the range
2167 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2168
2169 asection *
2170 spu_elf_check_vma (struct bfd_link_info *info)
2171 {
2172 struct elf_segment_map *m;
2173 unsigned int i;
2174 struct spu_link_hash_table *htab = spu_hash_table (info);
2175 bfd *abfd = info->output_bfd;
2176 bfd_vma hi = htab->params->local_store_hi;
2177 bfd_vma lo = htab->params->local_store_lo;
2178
2179 htab->local_store = hi + 1 - lo;
2180
2181 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2182 if (m->p_type == PT_LOAD)
2183 for (i = 0; i < m->count; i++)
2184 if (m->sections[i]->size != 0
2185 && (m->sections[i]->vma < lo
2186 || m->sections[i]->vma > hi
2187 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2188 return m->sections[i];
2189
2190 return NULL;
2191 }
2192
2193 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2194 Search for stack adjusting insns, and return the sp delta.
2195 If a store of lr is found save the instruction offset to *LR_STORE.
2196 If a stack adjusting instruction is found, save that offset to
2197 *SP_ADJUST. */
2198
2199 static int
2200 find_function_stack_adjust (asection *sec,
2201 bfd_vma offset,
2202 bfd_vma *lr_store,
2203 bfd_vma *sp_adjust)
2204 {
2205 int reg[128];
2206
2207 memset (reg, 0, sizeof (reg));
2208 for ( ; offset + 4 <= sec->size; offset += 4)
2209 {
2210 unsigned char buf[4];
2211 int rt, ra;
2212 int imm;
2213
2214 /* Assume no relocs on stack adjusing insns. */
2215 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2216 break;
2217
2218 rt = buf[3] & 0x7f;
2219 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2220
2221 if (buf[0] == 0x24 /* stqd */)
2222 {
2223 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2224 *lr_store = offset;
2225 continue;
2226 }
2227
2228 /* Partly decoded immediate field. */
2229 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2230
2231 if (buf[0] == 0x1c /* ai */)
2232 {
2233 imm >>= 7;
2234 imm = (imm ^ 0x200) - 0x200;
2235 reg[rt] = reg[ra] + imm;
2236
2237 if (rt == 1 /* sp */)
2238 {
2239 if (reg[rt] > 0)
2240 break;
2241 *sp_adjust = offset;
2242 return reg[rt];
2243 }
2244 }
2245 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2246 {
2247 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2248
2249 reg[rt] = reg[ra] + reg[rb];
2250 if (rt == 1)
2251 {
2252 if (reg[rt] > 0)
2253 break;
2254 *sp_adjust = offset;
2255 return reg[rt];
2256 }
2257 }
2258 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2259 {
2260 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2261
2262 reg[rt] = reg[rb] - reg[ra];
2263 if (rt == 1)
2264 {
2265 if (reg[rt] > 0)
2266 break;
2267 *sp_adjust = offset;
2268 return reg[rt];
2269 }
2270 }
2271 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2272 {
2273 if (buf[0] >= 0x42 /* ila */)
2274 imm |= (buf[0] & 1) << 17;
2275 else
2276 {
2277 imm &= 0xffff;
2278
2279 if (buf[0] == 0x40 /* il */)
2280 {
2281 if ((buf[1] & 0x80) == 0)
2282 continue;
2283 imm = (imm ^ 0x8000) - 0x8000;
2284 }
2285 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2286 imm <<= 16;
2287 }
2288 reg[rt] = imm;
2289 continue;
2290 }
2291 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2292 {
2293 reg[rt] |= imm & 0xffff;
2294 continue;
2295 }
2296 else if (buf[0] == 0x04 /* ori */)
2297 {
2298 imm >>= 7;
2299 imm = (imm ^ 0x200) - 0x200;
2300 reg[rt] = reg[ra] | imm;
2301 continue;
2302 }
2303 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2304 {
2305 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2306 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2307 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2308 | ((imm & 0x1000) ? 0x000000ff : 0));
2309 continue;
2310 }
2311 else if (buf[0] == 0x16 /* andbi */)
2312 {
2313 imm >>= 7;
2314 imm &= 0xff;
2315 imm |= imm << 8;
2316 imm |= imm << 16;
2317 reg[rt] = reg[ra] & imm;
2318 continue;
2319 }
2320 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2321 {
2322 /* Used in pic reg load. Say rt is trashed. Won't be used
2323 in stack adjust, but we need to continue past this branch. */
2324 reg[rt] = 0;
2325 continue;
2326 }
2327 else if (is_branch (buf) || is_indirect_branch (buf))
2328 /* If we hit a branch then we must be out of the prologue. */
2329 break;
2330 }
2331
2332 return 0;
2333 }
2334
2335 /* qsort predicate to sort symbols by section and value. */
2336
2337 static Elf_Internal_Sym *sort_syms_syms;
2338 static asection **sort_syms_psecs;
2339
2340 static int
2341 sort_syms (const void *a, const void *b)
2342 {
2343 Elf_Internal_Sym *const *s1 = a;
2344 Elf_Internal_Sym *const *s2 = b;
2345 asection *sec1,*sec2;
2346 bfd_signed_vma delta;
2347
2348 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2349 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2350
2351 if (sec1 != sec2)
2352 return sec1->index - sec2->index;
2353
2354 delta = (*s1)->st_value - (*s2)->st_value;
2355 if (delta != 0)
2356 return delta < 0 ? -1 : 1;
2357
2358 delta = (*s2)->st_size - (*s1)->st_size;
2359 if (delta != 0)
2360 return delta < 0 ? -1 : 1;
2361
2362 return *s1 < *s2 ? -1 : 1;
2363 }
2364
2365 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2366 entries for section SEC. */
2367
2368 static struct spu_elf_stack_info *
2369 alloc_stack_info (asection *sec, int max_fun)
2370 {
2371 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2372 bfd_size_type amt;
2373
2374 amt = sizeof (struct spu_elf_stack_info);
2375 amt += (max_fun - 1) * sizeof (struct function_info);
2376 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2377 if (sec_data->u.i.stack_info != NULL)
2378 sec_data->u.i.stack_info->max_fun = max_fun;
2379 return sec_data->u.i.stack_info;
2380 }
2381
2382 /* Add a new struct function_info describing a (part of a) function
2383 starting at SYM_H. Keep the array sorted by address. */
2384
2385 static struct function_info *
2386 maybe_insert_function (asection *sec,
2387 void *sym_h,
2388 bfd_boolean global,
2389 bfd_boolean is_func)
2390 {
2391 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2392 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2393 int i;
2394 bfd_vma off, size;
2395
2396 if (sinfo == NULL)
2397 {
2398 sinfo = alloc_stack_info (sec, 20);
2399 if (sinfo == NULL)
2400 return NULL;
2401 }
2402
2403 if (!global)
2404 {
2405 Elf_Internal_Sym *sym = sym_h;
2406 off = sym->st_value;
2407 size = sym->st_size;
2408 }
2409 else
2410 {
2411 struct elf_link_hash_entry *h = sym_h;
2412 off = h->root.u.def.value;
2413 size = h->size;
2414 }
2415
2416 for (i = sinfo->num_fun; --i >= 0; )
2417 if (sinfo->fun[i].lo <= off)
2418 break;
2419
2420 if (i >= 0)
2421 {
2422 /* Don't add another entry for an alias, but do update some
2423 info. */
2424 if (sinfo->fun[i].lo == off)
2425 {
2426 /* Prefer globals over local syms. */
2427 if (global && !sinfo->fun[i].global)
2428 {
2429 sinfo->fun[i].global = TRUE;
2430 sinfo->fun[i].u.h = sym_h;
2431 }
2432 if (is_func)
2433 sinfo->fun[i].is_func = TRUE;
2434 return &sinfo->fun[i];
2435 }
2436 /* Ignore a zero-size symbol inside an existing function. */
2437 else if (sinfo->fun[i].hi > off && size == 0)
2438 return &sinfo->fun[i];
2439 }
2440
2441 if (sinfo->num_fun >= sinfo->max_fun)
2442 {
2443 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2444 bfd_size_type old = amt;
2445
2446 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2447 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2448 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2449 sinfo = bfd_realloc (sinfo, amt);
2450 if (sinfo == NULL)
2451 return NULL;
2452 memset ((char *) sinfo + old, 0, amt - old);
2453 sec_data->u.i.stack_info = sinfo;
2454 }
2455
2456 if (++i < sinfo->num_fun)
2457 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2458 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2459 sinfo->fun[i].is_func = is_func;
2460 sinfo->fun[i].global = global;
2461 sinfo->fun[i].sec = sec;
2462 if (global)
2463 sinfo->fun[i].u.h = sym_h;
2464 else
2465 sinfo->fun[i].u.sym = sym_h;
2466 sinfo->fun[i].lo = off;
2467 sinfo->fun[i].hi = off + size;
2468 sinfo->fun[i].lr_store = -1;
2469 sinfo->fun[i].sp_adjust = -1;
2470 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2471 &sinfo->fun[i].lr_store,
2472 &sinfo->fun[i].sp_adjust);
2473 sinfo->num_fun += 1;
2474 return &sinfo->fun[i];
2475 }
2476
2477 /* Return the name of FUN. */
2478
2479 static const char *
2480 func_name (struct function_info *fun)
2481 {
2482 asection *sec;
2483 bfd *ibfd;
2484 Elf_Internal_Shdr *symtab_hdr;
2485
2486 while (fun->start != NULL)
2487 fun = fun->start;
2488
2489 if (fun->global)
2490 return fun->u.h->root.root.string;
2491
2492 sec = fun->sec;
2493 if (fun->u.sym->st_name == 0)
2494 {
2495 size_t len = strlen (sec->name);
2496 char *name = bfd_malloc (len + 10);
2497 if (name == NULL)
2498 return "(null)";
2499 sprintf (name, "%s+%lx", sec->name,
2500 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2501 return name;
2502 }
2503 ibfd = sec->owner;
2504 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2505 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2506 }
2507
2508 /* Read the instruction at OFF in SEC. Return true iff the instruction
2509 is a nop, lnop, or stop 0 (all zero insn). */
2510
2511 static bfd_boolean
2512 is_nop (asection *sec, bfd_vma off)
2513 {
2514 unsigned char insn[4];
2515
2516 if (off + 4 > sec->size
2517 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2518 return FALSE;
2519 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2520 return TRUE;
2521 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2522 return TRUE;
2523 return FALSE;
2524 }
2525
2526 /* Extend the range of FUN to cover nop padding up to LIMIT.
2527 Return TRUE iff some instruction other than a NOP was found. */
2528
2529 static bfd_boolean
2530 insns_at_end (struct function_info *fun, bfd_vma limit)
2531 {
2532 bfd_vma off = (fun->hi + 3) & -4;
2533
2534 while (off < limit && is_nop (fun->sec, off))
2535 off += 4;
2536 if (off < limit)
2537 {
2538 fun->hi = off;
2539 return TRUE;
2540 }
2541 fun->hi = limit;
2542 return FALSE;
2543 }
2544
2545 /* Check and fix overlapping function ranges. Return TRUE iff there
2546 are gaps in the current info we have about functions in SEC. */
2547
2548 static bfd_boolean
2549 check_function_ranges (asection *sec, struct bfd_link_info *info)
2550 {
2551 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2552 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2553 int i;
2554 bfd_boolean gaps = FALSE;
2555
2556 if (sinfo == NULL)
2557 return FALSE;
2558
2559 for (i = 1; i < sinfo->num_fun; i++)
2560 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2561 {
2562 /* Fix overlapping symbols. */
2563 const char *f1 = func_name (&sinfo->fun[i - 1]);
2564 const char *f2 = func_name (&sinfo->fun[i]);
2565
2566 /* xgettext:c-format */
2567 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2568 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2569 }
2570 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2571 gaps = TRUE;
2572
2573 if (sinfo->num_fun == 0)
2574 gaps = TRUE;
2575 else
2576 {
2577 if (sinfo->fun[0].lo != 0)
2578 gaps = TRUE;
2579 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2580 {
2581 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2582
2583 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2584 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2585 }
2586 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2587 gaps = TRUE;
2588 }
2589 return gaps;
2590 }
2591
2592 /* Search current function info for a function that contains address
2593 OFFSET in section SEC. */
2594
2595 static struct function_info *
2596 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2597 {
2598 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2599 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2600 int lo, hi, mid;
2601
2602 lo = 0;
2603 hi = sinfo->num_fun;
2604 while (lo < hi)
2605 {
2606 mid = (lo + hi) / 2;
2607 if (offset < sinfo->fun[mid].lo)
2608 hi = mid;
2609 else if (offset >= sinfo->fun[mid].hi)
2610 lo = mid + 1;
2611 else
2612 return &sinfo->fun[mid];
2613 }
2614 /* xgettext:c-format */
2615 info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2616 sec, offset);
2617 bfd_set_error (bfd_error_bad_value);
2618 return NULL;
2619 }
2620
2621 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2622 if CALLEE was new. If this function return FALSE, CALLEE should
2623 be freed. */
2624
2625 static bfd_boolean
2626 insert_callee (struct function_info *caller, struct call_info *callee)
2627 {
2628 struct call_info **pp, *p;
2629
2630 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2631 if (p->fun == callee->fun)
2632 {
2633 /* Tail calls use less stack than normal calls. Retain entry
2634 for normal call over one for tail call. */
2635 p->is_tail &= callee->is_tail;
2636 if (!p->is_tail)
2637 {
2638 p->fun->start = NULL;
2639 p->fun->is_func = TRUE;
2640 }
2641 p->count += callee->count;
2642 /* Reorder list so most recent call is first. */
2643 *pp = p->next;
2644 p->next = caller->call_list;
2645 caller->call_list = p;
2646 return FALSE;
2647 }
2648 callee->next = caller->call_list;
2649 caller->call_list = callee;
2650 return TRUE;
2651 }
2652
2653 /* Copy CALL and insert the copy into CALLER. */
2654
2655 static bfd_boolean
2656 copy_callee (struct function_info *caller, const struct call_info *call)
2657 {
2658 struct call_info *callee;
2659 callee = bfd_malloc (sizeof (*callee));
2660 if (callee == NULL)
2661 return FALSE;
2662 *callee = *call;
2663 if (!insert_callee (caller, callee))
2664 free (callee);
2665 return TRUE;
2666 }
2667
2668 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2669 overlay stub sections. */
2670
2671 static bfd_boolean
2672 interesting_section (asection *s)
2673 {
2674 return (s->output_section != bfd_abs_section_ptr
2675 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2676 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2677 && s->size != 0);
2678 }
2679
2680 /* Rummage through the relocs for SEC, looking for function calls.
2681 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2682 mark destination symbols on calls as being functions. Also
2683 look at branches, which may be tail calls or go to hot/cold
2684 section part of same function. */
2685
2686 static bfd_boolean
2687 mark_functions_via_relocs (asection *sec,
2688 struct bfd_link_info *info,
2689 int call_tree)
2690 {
2691 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2692 Elf_Internal_Shdr *symtab_hdr;
2693 void *psyms;
2694 unsigned int priority = 0;
2695 static bfd_boolean warned;
2696
2697 if (!interesting_section (sec)
2698 || sec->reloc_count == 0)
2699 return TRUE;
2700
2701 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2702 info->keep_memory);
2703 if (internal_relocs == NULL)
2704 return FALSE;
2705
2706 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2707 psyms = &symtab_hdr->contents;
2708 irela = internal_relocs;
2709 irelaend = irela + sec->reloc_count;
2710 for (; irela < irelaend; irela++)
2711 {
2712 enum elf_spu_reloc_type r_type;
2713 unsigned int r_indx;
2714 asection *sym_sec;
2715 Elf_Internal_Sym *sym;
2716 struct elf_link_hash_entry *h;
2717 bfd_vma val;
2718 bfd_boolean nonbranch, is_call;
2719 struct function_info *caller;
2720 struct call_info *callee;
2721
2722 r_type = ELF32_R_TYPE (irela->r_info);
2723 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2724
2725 r_indx = ELF32_R_SYM (irela->r_info);
2726 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2727 return FALSE;
2728
2729 if (sym_sec == NULL
2730 || sym_sec->output_section == bfd_abs_section_ptr)
2731 continue;
2732
2733 is_call = FALSE;
2734 if (!nonbranch)
2735 {
2736 unsigned char insn[4];
2737
2738 if (!bfd_get_section_contents (sec->owner, sec, insn,
2739 irela->r_offset, 4))
2740 return FALSE;
2741 if (is_branch (insn))
2742 {
2743 is_call = (insn[0] & 0xfd) == 0x31;
2744 priority = insn[1] & 0x0f;
2745 priority <<= 8;
2746 priority |= insn[2];
2747 priority <<= 8;
2748 priority |= insn[3];
2749 priority >>= 7;
2750 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2751 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2752 {
2753 if (!warned)
2754 info->callbacks->einfo
2755 /* xgettext:c-format */
2756 (_("%pB(%pA+0x%v): call to non-code section"
2757 " %pB(%pA), analysis incomplete\n"),
2758 sec->owner, sec, irela->r_offset,
2759 sym_sec->owner, sym_sec);
2760 warned = TRUE;
2761 continue;
2762 }
2763 }
2764 else
2765 {
2766 nonbranch = TRUE;
2767 if (is_hint (insn))
2768 continue;
2769 }
2770 }
2771
2772 if (nonbranch)
2773 {
2774 /* For --auto-overlay, count possible stubs we need for
2775 function pointer references. */
2776 unsigned int sym_type;
2777 if (h)
2778 sym_type = h->type;
2779 else
2780 sym_type = ELF_ST_TYPE (sym->st_info);
2781 if (sym_type == STT_FUNC)
2782 {
2783 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2784 spu_hash_table (info)->non_ovly_stub += 1;
2785 /* If the symbol type is STT_FUNC then this must be a
2786 function pointer initialisation. */
2787 continue;
2788 }
2789 /* Ignore data references. */
2790 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2791 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2792 continue;
2793 /* Otherwise we probably have a jump table reloc for
2794 a switch statement or some other reference to a
2795 code label. */
2796 }
2797
2798 if (h)
2799 val = h->root.u.def.value;
2800 else
2801 val = sym->st_value;
2802 val += irela->r_addend;
2803
2804 if (!call_tree)
2805 {
2806 struct function_info *fun;
2807
2808 if (irela->r_addend != 0)
2809 {
2810 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2811 if (fake == NULL)
2812 return FALSE;
2813 fake->st_value = val;
2814 fake->st_shndx
2815 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2816 sym = fake;
2817 }
2818 if (sym)
2819 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2820 else
2821 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2822 if (fun == NULL)
2823 return FALSE;
2824 if (irela->r_addend != 0
2825 && fun->u.sym != sym)
2826 free (sym);
2827 continue;
2828 }
2829
2830 caller = find_function (sec, irela->r_offset, info);
2831 if (caller == NULL)
2832 return FALSE;
2833 callee = bfd_malloc (sizeof *callee);
2834 if (callee == NULL)
2835 return FALSE;
2836
2837 callee->fun = find_function (sym_sec, val, info);
2838 if (callee->fun == NULL)
2839 return FALSE;
2840 callee->is_tail = !is_call;
2841 callee->is_pasted = FALSE;
2842 callee->broken_cycle = FALSE;
2843 callee->priority = priority;
2844 callee->count = nonbranch? 0 : 1;
2845 if (callee->fun->last_caller != sec)
2846 {
2847 callee->fun->last_caller = sec;
2848 callee->fun->call_count += 1;
2849 }
2850 if (!insert_callee (caller, callee))
2851 free (callee);
2852 else if (!is_call
2853 && !callee->fun->is_func
2854 && callee->fun->stack == 0)
2855 {
2856 /* This is either a tail call or a branch from one part of
2857 the function to another, ie. hot/cold section. If the
2858 destination has been called by some other function then
2859 it is a separate function. We also assume that functions
2860 are not split across input files. */
2861 if (sec->owner != sym_sec->owner)
2862 {
2863 callee->fun->start = NULL;
2864 callee->fun->is_func = TRUE;
2865 }
2866 else if (callee->fun->start == NULL)
2867 {
2868 struct function_info *caller_start = caller;
2869 while (caller_start->start)
2870 caller_start = caller_start->start;
2871
2872 if (caller_start != callee->fun)
2873 callee->fun->start = caller_start;
2874 }
2875 else
2876 {
2877 struct function_info *callee_start;
2878 struct function_info *caller_start;
2879 callee_start = callee->fun;
2880 while (callee_start->start)
2881 callee_start = callee_start->start;
2882 caller_start = caller;
2883 while (caller_start->start)
2884 caller_start = caller_start->start;
2885 if (caller_start != callee_start)
2886 {
2887 callee->fun->start = NULL;
2888 callee->fun->is_func = TRUE;
2889 }
2890 }
2891 }
2892 }
2893
2894 return TRUE;
2895 }
2896
2897 /* Handle something like .init or .fini, which has a piece of a function.
2898 These sections are pasted together to form a single function. */
2899
2900 static bfd_boolean
2901 pasted_function (asection *sec)
2902 {
2903 struct bfd_link_order *l;
2904 struct _spu_elf_section_data *sec_data;
2905 struct spu_elf_stack_info *sinfo;
2906 Elf_Internal_Sym *fake;
2907 struct function_info *fun, *fun_start;
2908
2909 fake = bfd_zmalloc (sizeof (*fake));
2910 if (fake == NULL)
2911 return FALSE;
2912 fake->st_value = 0;
2913 fake->st_size = sec->size;
2914 fake->st_shndx
2915 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2916 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2917 if (!fun)
2918 return FALSE;
2919
2920 /* Find a function immediately preceding this section. */
2921 fun_start = NULL;
2922 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2923 {
2924 if (l->u.indirect.section == sec)
2925 {
2926 if (fun_start != NULL)
2927 {
2928 struct call_info *callee = bfd_malloc (sizeof *callee);
2929 if (callee == NULL)
2930 return FALSE;
2931
2932 fun->start = fun_start;
2933 callee->fun = fun;
2934 callee->is_tail = TRUE;
2935 callee->is_pasted = TRUE;
2936 callee->broken_cycle = FALSE;
2937 callee->priority = 0;
2938 callee->count = 1;
2939 if (!insert_callee (fun_start, callee))
2940 free (callee);
2941 return TRUE;
2942 }
2943 break;
2944 }
2945 if (l->type == bfd_indirect_link_order
2946 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2947 && (sinfo = sec_data->u.i.stack_info) != NULL
2948 && sinfo->num_fun != 0)
2949 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2950 }
2951
2952 /* Don't return an error if we did not find a function preceding this
2953 section. The section may have incorrect flags. */
2954 return TRUE;
2955 }
2956
2957 /* Map address ranges in code sections to functions. */
2958
2959 static bfd_boolean
2960 discover_functions (struct bfd_link_info *info)
2961 {
2962 bfd *ibfd;
2963 int bfd_idx;
2964 Elf_Internal_Sym ***psym_arr;
2965 asection ***sec_arr;
2966 bfd_boolean gaps = FALSE;
2967
2968 bfd_idx = 0;
2969 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2970 bfd_idx++;
2971
2972 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2973 if (psym_arr == NULL)
2974 return FALSE;
2975 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2976 if (sec_arr == NULL)
2977 return FALSE;
2978
2979 for (ibfd = info->input_bfds, bfd_idx = 0;
2980 ibfd != NULL;
2981 ibfd = ibfd->link.next, bfd_idx++)
2982 {
2983 extern const bfd_target spu_elf32_vec;
2984 Elf_Internal_Shdr *symtab_hdr;
2985 asection *sec;
2986 size_t symcount;
2987 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2988 asection **psecs, **p;
2989
2990 if (ibfd->xvec != &spu_elf32_vec)
2991 continue;
2992
2993 /* Read all the symbols. */
2994 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2995 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2996 if (symcount == 0)
2997 {
2998 if (!gaps)
2999 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3000 if (interesting_section (sec))
3001 {
3002 gaps = TRUE;
3003 break;
3004 }
3005 continue;
3006 }
3007
3008 if (symtab_hdr->contents != NULL)
3009 {
3010 /* Don't use cached symbols since the generic ELF linker
3011 code only reads local symbols, and we need globals too. */
3012 free (symtab_hdr->contents);
3013 symtab_hdr->contents = NULL;
3014 }
3015 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3016 NULL, NULL, NULL);
3017 symtab_hdr->contents = (void *) syms;
3018 if (syms == NULL)
3019 return FALSE;
3020
3021 /* Select defined function symbols that are going to be output. */
3022 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3023 if (psyms == NULL)
3024 return FALSE;
3025 psym_arr[bfd_idx] = psyms;
3026 psecs = bfd_malloc (symcount * sizeof (*psecs));
3027 if (psecs == NULL)
3028 return FALSE;
3029 sec_arr[bfd_idx] = psecs;
3030 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3031 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3032 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3033 {
3034 asection *s;
3035
3036 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3037 if (s != NULL && interesting_section (s))
3038 *psy++ = sy;
3039 }
3040 symcount = psy - psyms;
3041 *psy = NULL;
3042
3043 /* Sort them by section and offset within section. */
3044 sort_syms_syms = syms;
3045 sort_syms_psecs = psecs;
3046 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3047
3048 /* Now inspect the function symbols. */
3049 for (psy = psyms; psy < psyms + symcount; )
3050 {
3051 asection *s = psecs[*psy - syms];
3052 Elf_Internal_Sym **psy2;
3053
3054 for (psy2 = psy; ++psy2 < psyms + symcount; )
3055 if (psecs[*psy2 - syms] != s)
3056 break;
3057
3058 if (!alloc_stack_info (s, psy2 - psy))
3059 return FALSE;
3060 psy = psy2;
3061 }
3062
3063 /* First install info about properly typed and sized functions.
3064 In an ideal world this will cover all code sections, except
3065 when partitioning functions into hot and cold sections,
3066 and the horrible pasted together .init and .fini functions. */
3067 for (psy = psyms; psy < psyms + symcount; ++psy)
3068 {
3069 sy = *psy;
3070 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3071 {
3072 asection *s = psecs[sy - syms];
3073 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3074 return FALSE;
3075 }
3076 }
3077
3078 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3079 if (interesting_section (sec))
3080 gaps |= check_function_ranges (sec, info);
3081 }
3082
3083 if (gaps)
3084 {
3085 /* See if we can discover more function symbols by looking at
3086 relocations. */
3087 for (ibfd = info->input_bfds, bfd_idx = 0;
3088 ibfd != NULL;
3089 ibfd = ibfd->link.next, bfd_idx++)
3090 {
3091 asection *sec;
3092
3093 if (psym_arr[bfd_idx] == NULL)
3094 continue;
3095
3096 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3097 if (!mark_functions_via_relocs (sec, info, FALSE))
3098 return FALSE;
3099 }
3100
3101 for (ibfd = info->input_bfds, bfd_idx = 0;
3102 ibfd != NULL;
3103 ibfd = ibfd->link.next, bfd_idx++)
3104 {
3105 Elf_Internal_Shdr *symtab_hdr;
3106 asection *sec;
3107 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3108 asection **psecs;
3109
3110 if ((psyms = psym_arr[bfd_idx]) == NULL)
3111 continue;
3112
3113 psecs = sec_arr[bfd_idx];
3114
3115 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3116 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3117
3118 gaps = FALSE;
3119 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3120 if (interesting_section (sec))
3121 gaps |= check_function_ranges (sec, info);
3122 if (!gaps)
3123 continue;
3124
3125 /* Finally, install all globals. */
3126 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3127 {
3128 asection *s;
3129
3130 s = psecs[sy - syms];
3131
3132 /* Global syms might be improperly typed functions. */
3133 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3134 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3135 {
3136 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3137 return FALSE;
3138 }
3139 }
3140 }
3141
3142 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3143 {
3144 extern const bfd_target spu_elf32_vec;
3145 asection *sec;
3146
3147 if (ibfd->xvec != &spu_elf32_vec)
3148 continue;
3149
3150 /* Some of the symbols we've installed as marking the
3151 beginning of functions may have a size of zero. Extend
3152 the range of such functions to the beginning of the
3153 next symbol of interest. */
3154 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3155 if (interesting_section (sec))
3156 {
3157 struct _spu_elf_section_data *sec_data;
3158 struct spu_elf_stack_info *sinfo;
3159
3160 sec_data = spu_elf_section_data (sec);
3161 sinfo = sec_data->u.i.stack_info;
3162 if (sinfo != NULL && sinfo->num_fun != 0)
3163 {
3164 int fun_idx;
3165 bfd_vma hi = sec->size;
3166
3167 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3168 {
3169 sinfo->fun[fun_idx].hi = hi;
3170 hi = sinfo->fun[fun_idx].lo;
3171 }
3172
3173 sinfo->fun[0].lo = 0;
3174 }
3175 /* No symbols in this section. Must be .init or .fini
3176 or something similar. */
3177 else if (!pasted_function (sec))
3178 return FALSE;
3179 }
3180 }
3181 }
3182
3183 for (ibfd = info->input_bfds, bfd_idx = 0;
3184 ibfd != NULL;
3185 ibfd = ibfd->link.next, bfd_idx++)
3186 {
3187 if (psym_arr[bfd_idx] == NULL)
3188 continue;
3189
3190 free (psym_arr[bfd_idx]);
3191 free (sec_arr[bfd_idx]);
3192 }
3193
3194 free (psym_arr);
3195 free (sec_arr);
3196
3197 return TRUE;
3198 }
3199
3200 /* Iterate over all function_info we have collected, calling DOIT on
3201 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3202 if ROOT_ONLY. */
3203
3204 static bfd_boolean
3205 for_each_node (bfd_boolean (*doit) (struct function_info *,
3206 struct bfd_link_info *,
3207 void *),
3208 struct bfd_link_info *info,
3209 void *param,
3210 int root_only)
3211 {
3212 bfd *ibfd;
3213
3214 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3215 {
3216 extern const bfd_target spu_elf32_vec;
3217 asection *sec;
3218
3219 if (ibfd->xvec != &spu_elf32_vec)
3220 continue;
3221
3222 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3223 {
3224 struct _spu_elf_section_data *sec_data;
3225 struct spu_elf_stack_info *sinfo;
3226
3227 if ((sec_data = spu_elf_section_data (sec)) != NULL
3228 && (sinfo = sec_data->u.i.stack_info) != NULL)
3229 {
3230 int i;
3231 for (i = 0; i < sinfo->num_fun; ++i)
3232 if (!root_only || !sinfo->fun[i].non_root)
3233 if (!doit (&sinfo->fun[i], info, param))
3234 return FALSE;
3235 }
3236 }
3237 }
3238 return TRUE;
3239 }
3240
3241 /* Transfer call info attached to struct function_info entries for
3242 all of a given function's sections to the first entry. */
3243
3244 static bfd_boolean
3245 transfer_calls (struct function_info *fun,
3246 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3247 void *param ATTRIBUTE_UNUSED)
3248 {
3249 struct function_info *start = fun->start;
3250
3251 if (start != NULL)
3252 {
3253 struct call_info *call, *call_next;
3254
3255 while (start->start != NULL)
3256 start = start->start;
3257 for (call = fun->call_list; call != NULL; call = call_next)
3258 {
3259 call_next = call->next;
3260 if (!insert_callee (start, call))
3261 free (call);
3262 }
3263 fun->call_list = NULL;
3264 }
3265 return TRUE;
3266 }
3267
3268 /* Mark nodes in the call graph that are called by some other node. */
3269
3270 static bfd_boolean
3271 mark_non_root (struct function_info *fun,
3272 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3273 void *param ATTRIBUTE_UNUSED)
3274 {
3275 struct call_info *call;
3276
3277 if (fun->visit1)
3278 return TRUE;
3279 fun->visit1 = TRUE;
3280 for (call = fun->call_list; call; call = call->next)
3281 {
3282 call->fun->non_root = TRUE;
3283 mark_non_root (call->fun, 0, 0);
3284 }
3285 return TRUE;
3286 }
3287
3288 /* Remove cycles from the call graph. Set depth of nodes. */
3289
3290 static bfd_boolean
3291 remove_cycles (struct function_info *fun,
3292 struct bfd_link_info *info,
3293 void *param)
3294 {
3295 struct call_info **callp, *call;
3296 unsigned int depth = *(unsigned int *) param;
3297 unsigned int max_depth = depth;
3298
3299 fun->depth = depth;
3300 fun->visit2 = TRUE;
3301 fun->marking = TRUE;
3302
3303 callp = &fun->call_list;
3304 while ((call = *callp) != NULL)
3305 {
3306 call->max_depth = depth + !call->is_pasted;
3307 if (!call->fun->visit2)
3308 {
3309 if (!remove_cycles (call->fun, info, &call->max_depth))
3310 return FALSE;
3311 if (max_depth < call->max_depth)
3312 max_depth = call->max_depth;
3313 }
3314 else if (call->fun->marking)
3315 {
3316 struct spu_link_hash_table *htab = spu_hash_table (info);
3317
3318 if (!htab->params->auto_overlay
3319 && htab->params->stack_analysis)
3320 {
3321 const char *f1 = func_name (fun);
3322 const char *f2 = func_name (call->fun);
3323
3324 /* xgettext:c-format */
3325 info->callbacks->info (_("stack analysis will ignore the call "
3326 "from %s to %s\n"),
3327 f1, f2);
3328 }
3329
3330 call->broken_cycle = TRUE;
3331 }
3332 callp = &call->next;
3333 }
3334 fun->marking = FALSE;
3335 *(unsigned int *) param = max_depth;
3336 return TRUE;
3337 }
3338
3339 /* Check that we actually visited all nodes in remove_cycles. If we
3340 didn't, then there is some cycle in the call graph not attached to
3341 any root node. Arbitrarily choose a node in the cycle as a new
3342 root and break the cycle. */
3343
3344 static bfd_boolean
3345 mark_detached_root (struct function_info *fun,
3346 struct bfd_link_info *info,
3347 void *param)
3348 {
3349 if (fun->visit2)
3350 return TRUE;
3351 fun->non_root = FALSE;
3352 *(unsigned int *) param = 0;
3353 return remove_cycles (fun, info, param);
3354 }
3355
3356 /* Populate call_list for each function. */
3357
3358 static bfd_boolean
3359 build_call_tree (struct bfd_link_info *info)
3360 {
3361 bfd *ibfd;
3362 unsigned int depth;
3363
3364 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3365 {
3366 extern const bfd_target spu_elf32_vec;
3367 asection *sec;
3368
3369 if (ibfd->xvec != &spu_elf32_vec)
3370 continue;
3371
3372 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3373 if (!mark_functions_via_relocs (sec, info, TRUE))
3374 return FALSE;
3375 }
3376
3377 /* Transfer call info from hot/cold section part of function
3378 to main entry. */
3379 if (!spu_hash_table (info)->params->auto_overlay
3380 && !for_each_node (transfer_calls, info, 0, FALSE))
3381 return FALSE;
3382
3383 /* Find the call graph root(s). */
3384 if (!for_each_node (mark_non_root, info, 0, FALSE))
3385 return FALSE;
3386
3387 /* Remove cycles from the call graph. We start from the root node(s)
3388 so that we break cycles in a reasonable place. */
3389 depth = 0;
3390 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3391 return FALSE;
3392
3393 return for_each_node (mark_detached_root, info, &depth, FALSE);
3394 }
3395
3396 /* qsort predicate to sort calls by priority, max_depth then count. */
3397
3398 static int
3399 sort_calls (const void *a, const void *b)
3400 {
3401 struct call_info *const *c1 = a;
3402 struct call_info *const *c2 = b;
3403 int delta;
3404
3405 delta = (*c2)->priority - (*c1)->priority;
3406 if (delta != 0)
3407 return delta;
3408
3409 delta = (*c2)->max_depth - (*c1)->max_depth;
3410 if (delta != 0)
3411 return delta;
3412
3413 delta = (*c2)->count - (*c1)->count;
3414 if (delta != 0)
3415 return delta;
3416
3417 return (char *) c1 - (char *) c2;
3418 }
3419
3420 struct _mos_param {
3421 unsigned int max_overlay_size;
3422 };
3423
3424 /* Set linker_mark and gc_mark on any sections that we will put in
3425 overlays. These flags are used by the generic ELF linker, but we
3426 won't be continuing on to bfd_elf_final_link so it is OK to use
3427 them. linker_mark is clear before we get here. Set segment_mark
3428 on sections that are part of a pasted function (excluding the last
3429 section).
3430
3431 Set up function rodata section if --overlay-rodata. We don't
3432 currently include merged string constant rodata sections since
3433
3434 Sort the call graph so that the deepest nodes will be visited
3435 first. */
3436
3437 static bfd_boolean
3438 mark_overlay_section (struct function_info *fun,
3439 struct bfd_link_info *info,
3440 void *param)
3441 {
3442 struct call_info *call;
3443 unsigned int count;
3444 struct _mos_param *mos_param = param;
3445 struct spu_link_hash_table *htab = spu_hash_table (info);
3446
3447 if (fun->visit4)
3448 return TRUE;
3449
3450 fun->visit4 = TRUE;
3451 if (!fun->sec->linker_mark
3452 && (htab->params->ovly_flavour != ovly_soft_icache
3453 || htab->params->non_ia_text
3454 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3455 || strcmp (fun->sec->name, ".init") == 0
3456 || strcmp (fun->sec->name, ".fini") == 0))
3457 {
3458 unsigned int size;
3459
3460 fun->sec->linker_mark = 1;
3461 fun->sec->gc_mark = 1;
3462 fun->sec->segment_mark = 0;
3463 /* Ensure SEC_CODE is set on this text section (it ought to
3464 be!), and SEC_CODE is clear on rodata sections. We use
3465 this flag to differentiate the two overlay section types. */
3466 fun->sec->flags |= SEC_CODE;
3467
3468 size = fun->sec->size;
3469 if (htab->params->auto_overlay & OVERLAY_RODATA)
3470 {
3471 char *name = NULL;
3472
3473 /* Find the rodata section corresponding to this function's
3474 text section. */
3475 if (strcmp (fun->sec->name, ".text") == 0)
3476 {
3477 name = bfd_malloc (sizeof (".rodata"));
3478 if (name == NULL)
3479 return FALSE;
3480 memcpy (name, ".rodata", sizeof (".rodata"));
3481 }
3482 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3483 {
3484 size_t len = strlen (fun->sec->name);
3485 name = bfd_malloc (len + 3);
3486 if (name == NULL)
3487 return FALSE;
3488 memcpy (name, ".rodata", sizeof (".rodata"));
3489 memcpy (name + 7, fun->sec->name + 5, len - 4);
3490 }
3491 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3492 {
3493 size_t len = strlen (fun->sec->name) + 1;
3494 name = bfd_malloc (len);
3495 if (name == NULL)
3496 return FALSE;
3497 memcpy (name, fun->sec->name, len);
3498 name[14] = 'r';
3499 }
3500
3501 if (name != NULL)
3502 {
3503 asection *rodata = NULL;
3504 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3505 if (group_sec == NULL)
3506 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3507 else
3508 while (group_sec != NULL && group_sec != fun->sec)
3509 {
3510 if (strcmp (group_sec->name, name) == 0)
3511 {
3512 rodata = group_sec;
3513 break;
3514 }
3515 group_sec = elf_section_data (group_sec)->next_in_group;
3516 }
3517 fun->rodata = rodata;
3518 if (fun->rodata)
3519 {
3520 size += fun->rodata->size;
3521 if (htab->params->line_size != 0
3522 && size > htab->params->line_size)
3523 {
3524 size -= fun->rodata->size;
3525 fun->rodata = NULL;
3526 }
3527 else
3528 {
3529 fun->rodata->linker_mark = 1;
3530 fun->rodata->gc_mark = 1;
3531 fun->rodata->flags &= ~SEC_CODE;
3532 }
3533 }
3534 free (name);
3535 }
3536 }
3537 if (mos_param->max_overlay_size < size)
3538 mos_param->max_overlay_size = size;
3539 }
3540
3541 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3542 count += 1;
3543
3544 if (count > 1)
3545 {
3546 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3547 if (calls == NULL)
3548 return FALSE;
3549
3550 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3551 calls[count++] = call;
3552
3553 qsort (calls, count, sizeof (*calls), sort_calls);
3554
3555 fun->call_list = NULL;
3556 while (count != 0)
3557 {
3558 --count;
3559 calls[count]->next = fun->call_list;
3560 fun->call_list = calls[count];
3561 }
3562 free (calls);
3563 }
3564
3565 for (call = fun->call_list; call != NULL; call = call->next)
3566 {
3567 if (call->is_pasted)
3568 {
3569 /* There can only be one is_pasted call per function_info. */
3570 BFD_ASSERT (!fun->sec->segment_mark);
3571 fun->sec->segment_mark = 1;
3572 }
3573 if (!call->broken_cycle
3574 && !mark_overlay_section (call->fun, info, param))
3575 return FALSE;
3576 }
3577
3578 /* Don't put entry code into an overlay. The overlay manager needs
3579 a stack! Also, don't mark .ovl.init as an overlay. */
3580 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3581 == info->output_bfd->start_address
3582 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3583 {
3584 fun->sec->linker_mark = 0;
3585 if (fun->rodata != NULL)
3586 fun->rodata->linker_mark = 0;
3587 }
3588 return TRUE;
3589 }
3590
3591 /* If non-zero then unmark functions called from those within sections
3592 that we need to unmark. Unfortunately this isn't reliable since the
3593 call graph cannot know the destination of function pointer calls. */
3594 #define RECURSE_UNMARK 0
3595
3596 struct _uos_param {
3597 asection *exclude_input_section;
3598 asection *exclude_output_section;
3599 unsigned long clearing;
3600 };
3601
3602 /* Undo some of mark_overlay_section's work. */
3603
3604 static bfd_boolean
3605 unmark_overlay_section (struct function_info *fun,
3606 struct bfd_link_info *info,
3607 void *param)
3608 {
3609 struct call_info *call;
3610 struct _uos_param *uos_param = param;
3611 unsigned int excluded = 0;
3612
3613 if (fun->visit5)
3614 return TRUE;
3615
3616 fun->visit5 = TRUE;
3617
3618 excluded = 0;
3619 if (fun->sec == uos_param->exclude_input_section
3620 || fun->sec->output_section == uos_param->exclude_output_section)
3621 excluded = 1;
3622
3623 if (RECURSE_UNMARK)
3624 uos_param->clearing += excluded;
3625
3626 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3627 {
3628 fun->sec->linker_mark = 0;
3629 if (fun->rodata)
3630 fun->rodata->linker_mark = 0;
3631 }
3632
3633 for (call = fun->call_list; call != NULL; call = call->next)
3634 if (!call->broken_cycle
3635 && !unmark_overlay_section (call->fun, info, param))
3636 return FALSE;
3637
3638 if (RECURSE_UNMARK)
3639 uos_param->clearing -= excluded;
3640 return TRUE;
3641 }
3642
3643 struct _cl_param {
3644 unsigned int lib_size;
3645 asection **lib_sections;
3646 };
3647
3648 /* Add sections we have marked as belonging to overlays to an array
3649 for consideration as non-overlay sections. The array consist of
3650 pairs of sections, (text,rodata), for functions in the call graph. */
3651
3652 static bfd_boolean
3653 collect_lib_sections (struct function_info *fun,
3654 struct bfd_link_info *info,
3655 void *param)
3656 {
3657 struct _cl_param *lib_param = param;
3658 struct call_info *call;
3659 unsigned int size;
3660
3661 if (fun->visit6)
3662 return TRUE;
3663
3664 fun->visit6 = TRUE;
3665 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3666 return TRUE;
3667
3668 size = fun->sec->size;
3669 if (fun->rodata)
3670 size += fun->rodata->size;
3671
3672 if (size <= lib_param->lib_size)
3673 {
3674 *lib_param->lib_sections++ = fun->sec;
3675 fun->sec->gc_mark = 0;
3676 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3677 {
3678 *lib_param->lib_sections++ = fun->rodata;
3679 fun->rodata->gc_mark = 0;
3680 }
3681 else
3682 *lib_param->lib_sections++ = NULL;
3683 }
3684
3685 for (call = fun->call_list; call != NULL; call = call->next)
3686 if (!call->broken_cycle)
3687 collect_lib_sections (call->fun, info, param);
3688
3689 return TRUE;
3690 }
3691
3692 /* qsort predicate to sort sections by call count. */
3693
3694 static int
3695 sort_lib (const void *a, const void *b)
3696 {
3697 asection *const *s1 = a;
3698 asection *const *s2 = b;
3699 struct _spu_elf_section_data *sec_data;
3700 struct spu_elf_stack_info *sinfo;
3701 int delta;
3702
3703 delta = 0;
3704 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3705 && (sinfo = sec_data->u.i.stack_info) != NULL)
3706 {
3707 int i;
3708 for (i = 0; i < sinfo->num_fun; ++i)
3709 delta -= sinfo->fun[i].call_count;
3710 }
3711
3712 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3713 && (sinfo = sec_data->u.i.stack_info) != NULL)
3714 {
3715 int i;
3716 for (i = 0; i < sinfo->num_fun; ++i)
3717 delta += sinfo->fun[i].call_count;
3718 }
3719
3720 if (delta != 0)
3721 return delta;
3722
3723 return s1 - s2;
3724 }
3725
3726 /* Remove some sections from those marked to be in overlays. Choose
3727 those that are called from many places, likely library functions. */
3728
3729 static unsigned int
3730 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3731 {
3732 bfd *ibfd;
3733 asection **lib_sections;
3734 unsigned int i, lib_count;
3735 struct _cl_param collect_lib_param;
3736 struct function_info dummy_caller;
3737 struct spu_link_hash_table *htab;
3738
3739 memset (&dummy_caller, 0, sizeof (dummy_caller));
3740 lib_count = 0;
3741 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3742 {
3743 extern const bfd_target spu_elf32_vec;
3744 asection *sec;
3745
3746 if (ibfd->xvec != &spu_elf32_vec)
3747 continue;
3748
3749 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3750 if (sec->linker_mark
3751 && sec->size < lib_size
3752 && (sec->flags & SEC_CODE) != 0)
3753 lib_count += 1;
3754 }
3755 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3756 if (lib_sections == NULL)
3757 return (unsigned int) -1;
3758 collect_lib_param.lib_size = lib_size;
3759 collect_lib_param.lib_sections = lib_sections;
3760 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3761 TRUE))
3762 return (unsigned int) -1;
3763 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3764
3765 /* Sort sections so that those with the most calls are first. */
3766 if (lib_count > 1)
3767 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3768
3769 htab = spu_hash_table (info);
3770 for (i = 0; i < lib_count; i++)
3771 {
3772 unsigned int tmp, stub_size;
3773 asection *sec;
3774 struct _spu_elf_section_data *sec_data;
3775 struct spu_elf_stack_info *sinfo;
3776
3777 sec = lib_sections[2 * i];
3778 /* If this section is OK, its size must be less than lib_size. */
3779 tmp = sec->size;
3780 /* If it has a rodata section, then add that too. */
3781 if (lib_sections[2 * i + 1])
3782 tmp += lib_sections[2 * i + 1]->size;
3783 /* Add any new overlay call stubs needed by the section. */
3784 stub_size = 0;
3785 if (tmp < lib_size
3786 && (sec_data = spu_elf_section_data (sec)) != NULL
3787 && (sinfo = sec_data->u.i.stack_info) != NULL)
3788 {
3789 int k;
3790 struct call_info *call;
3791
3792 for (k = 0; k < sinfo->num_fun; ++k)
3793 for (call = sinfo->fun[k].call_list; call; call = call->next)
3794 if (call->fun->sec->linker_mark)
3795 {
3796 struct call_info *p;
3797 for (p = dummy_caller.call_list; p; p = p->next)
3798 if (p->fun == call->fun)
3799 break;
3800 if (!p)
3801 stub_size += ovl_stub_size (htab->params);
3802 }
3803 }
3804 if (tmp + stub_size < lib_size)
3805 {
3806 struct call_info **pp, *p;
3807
3808 /* This section fits. Mark it as non-overlay. */
3809 lib_sections[2 * i]->linker_mark = 0;
3810 if (lib_sections[2 * i + 1])
3811 lib_sections[2 * i + 1]->linker_mark = 0;
3812 lib_size -= tmp + stub_size;
3813 /* Call stubs to the section we just added are no longer
3814 needed. */
3815 pp = &dummy_caller.call_list;
3816 while ((p = *pp) != NULL)
3817 if (!p->fun->sec->linker_mark)
3818 {
3819 lib_size += ovl_stub_size (htab->params);
3820 *pp = p->next;
3821 free (p);
3822 }
3823 else
3824 pp = &p->next;
3825 /* Add new call stubs to dummy_caller. */
3826 if ((sec_data = spu_elf_section_data (sec)) != NULL
3827 && (sinfo = sec_data->u.i.stack_info) != NULL)
3828 {
3829 int k;
3830 struct call_info *call;
3831
3832 for (k = 0; k < sinfo->num_fun; ++k)
3833 for (call = sinfo->fun[k].call_list;
3834 call;
3835 call = call->next)
3836 if (call->fun->sec->linker_mark)
3837 {
3838 struct call_info *callee;
3839 callee = bfd_malloc (sizeof (*callee));
3840 if (callee == NULL)
3841 return (unsigned int) -1;
3842 *callee = *call;
3843 if (!insert_callee (&dummy_caller, callee))
3844 free (callee);
3845 }
3846 }
3847 }
3848 }
3849 while (dummy_caller.call_list != NULL)
3850 {
3851 struct call_info *call = dummy_caller.call_list;
3852 dummy_caller.call_list = call->next;
3853 free (call);
3854 }
3855 for (i = 0; i < 2 * lib_count; i++)
3856 if (lib_sections[i])
3857 lib_sections[i]->gc_mark = 1;
3858 free (lib_sections);
3859 return lib_size;
3860 }
3861
3862 /* Build an array of overlay sections. The deepest node's section is
3863 added first, then its parent node's section, then everything called
3864 from the parent section. The idea being to group sections to
3865 minimise calls between different overlays. */
3866
3867 static bfd_boolean
3868 collect_overlays (struct function_info *fun,
3869 struct bfd_link_info *info,
3870 void *param)
3871 {
3872 struct call_info *call;
3873 bfd_boolean added_fun;
3874 asection ***ovly_sections = param;
3875
3876 if (fun->visit7)
3877 return TRUE;
3878
3879 fun->visit7 = TRUE;
3880 for (call = fun->call_list; call != NULL; call = call->next)
3881 if (!call->is_pasted && !call->broken_cycle)
3882 {
3883 if (!collect_overlays (call->fun, info, ovly_sections))
3884 return FALSE;
3885 break;
3886 }
3887
3888 added_fun = FALSE;
3889 if (fun->sec->linker_mark && fun->sec->gc_mark)
3890 {
3891 fun->sec->gc_mark = 0;
3892 *(*ovly_sections)++ = fun->sec;
3893 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3894 {
3895 fun->rodata->gc_mark = 0;
3896 *(*ovly_sections)++ = fun->rodata;
3897 }
3898 else
3899 *(*ovly_sections)++ = NULL;
3900 added_fun = TRUE;
3901
3902 /* Pasted sections must stay with the first section. We don't
3903 put pasted sections in the array, just the first section.
3904 Mark subsequent sections as already considered. */
3905 if (fun->sec->segment_mark)
3906 {
3907 struct function_info *call_fun = fun;
3908 do
3909 {
3910 for (call = call_fun->call_list; call != NULL; call = call->next)
3911 if (call->is_pasted)
3912 {
3913 call_fun = call->fun;
3914 call_fun->sec->gc_mark = 0;
3915 if (call_fun->rodata)
3916 call_fun->rodata->gc_mark = 0;
3917 break;
3918 }
3919 if (call == NULL)
3920 abort ();
3921 }
3922 while (call_fun->sec->segment_mark);
3923 }
3924 }
3925
3926 for (call = fun->call_list; call != NULL; call = call->next)
3927 if (!call->broken_cycle
3928 && !collect_overlays (call->fun, info, ovly_sections))
3929 return FALSE;
3930
3931 if (added_fun)
3932 {
3933 struct _spu_elf_section_data *sec_data;
3934 struct spu_elf_stack_info *sinfo;
3935
3936 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3937 && (sinfo = sec_data->u.i.stack_info) != NULL)
3938 {
3939 int i;
3940 for (i = 0; i < sinfo->num_fun; ++i)
3941 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3942 return FALSE;
3943 }
3944 }
3945
3946 return TRUE;
3947 }
3948
3949 struct _sum_stack_param {
3950 size_t cum_stack;
3951 size_t overall_stack;
3952 bfd_boolean emit_stack_syms;
3953 };
3954
3955 /* Descend the call graph for FUN, accumulating total stack required. */
3956
3957 static bfd_boolean
3958 sum_stack (struct function_info *fun,
3959 struct bfd_link_info *info,
3960 void *param)
3961 {
3962 struct call_info *call;
3963 struct function_info *max;
3964 size_t stack, cum_stack;
3965 const char *f1;
3966 bfd_boolean has_call;
3967 struct _sum_stack_param *sum_stack_param = param;
3968 struct spu_link_hash_table *htab;
3969
3970 cum_stack = fun->stack;
3971 sum_stack_param->cum_stack = cum_stack;
3972 if (fun->visit3)
3973 return TRUE;
3974
3975 has_call = FALSE;
3976 max = NULL;
3977 for (call = fun->call_list; call; call = call->next)
3978 {
3979 if (call->broken_cycle)
3980 continue;
3981 if (!call->is_pasted)
3982 has_call = TRUE;
3983 if (!sum_stack (call->fun, info, sum_stack_param))
3984 return FALSE;
3985 stack = sum_stack_param->cum_stack;
3986 /* Include caller stack for normal calls, don't do so for
3987 tail calls. fun->stack here is local stack usage for
3988 this function. */
3989 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3990 stack += fun->stack;
3991 if (cum_stack < stack)
3992 {
3993 cum_stack = stack;
3994 max = call->fun;
3995 }
3996 }
3997
3998 sum_stack_param->cum_stack = cum_stack;
3999 stack = fun->stack;
4000 /* Now fun->stack holds cumulative stack. */
4001 fun->stack = cum_stack;
4002 fun->visit3 = TRUE;
4003
4004 if (!fun->non_root
4005 && sum_stack_param->overall_stack < cum_stack)
4006 sum_stack_param->overall_stack = cum_stack;
4007
4008 htab = spu_hash_table (info);
4009 if (htab->params->auto_overlay)
4010 return TRUE;
4011
4012 f1 = func_name (fun);
4013 if (htab->params->stack_analysis)
4014 {
4015 if (!fun->non_root)
4016 info->callbacks->info (" %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4017 info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4018 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4019
4020 if (has_call)
4021 {
4022 info->callbacks->minfo (_(" calls:\n"));
4023 for (call = fun->call_list; call; call = call->next)
4024 if (!call->is_pasted && !call->broken_cycle)
4025 {
4026 const char *f2 = func_name (call->fun);
4027 const char *ann1 = call->fun == max ? "*" : " ";
4028 const char *ann2 = call->is_tail ? "t" : " ";
4029
4030 info->callbacks->minfo (" %s%s %s\n", ann1, ann2, f2);
4031 }
4032 }
4033 }
4034
4035 if (sum_stack_param->emit_stack_syms)
4036 {
4037 char *name = bfd_malloc (18 + strlen (f1));
4038 struct elf_link_hash_entry *h;
4039
4040 if (name == NULL)
4041 return FALSE;
4042
4043 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4044 sprintf (name, "__stack_%s", f1);
4045 else
4046 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4047
4048 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4049 free (name);
4050 if (h != NULL
4051 && (h->root.type == bfd_link_hash_new
4052 || h->root.type == bfd_link_hash_undefined
4053 || h->root.type == bfd_link_hash_undefweak))
4054 {
4055 h->root.type = bfd_link_hash_defined;
4056 h->root.u.def.section = bfd_abs_section_ptr;
4057 h->root.u.def.value = cum_stack;
4058 h->size = 0;
4059 h->type = 0;
4060 h->ref_regular = 1;
4061 h->def_regular = 1;
4062 h->ref_regular_nonweak = 1;
4063 h->forced_local = 1;
4064 h->non_elf = 0;
4065 }
4066 }
4067
4068 return TRUE;
4069 }
4070
4071 /* SEC is part of a pasted function. Return the call_info for the
4072 next section of this function. */
4073
4074 static struct call_info *
4075 find_pasted_call (asection *sec)
4076 {
4077 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4078 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4079 struct call_info *call;
4080 int k;
4081
4082 for (k = 0; k < sinfo->num_fun; ++k)
4083 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4084 if (call->is_pasted)
4085 return call;
4086 abort ();
4087 return 0;
4088 }
4089
4090 /* qsort predicate to sort bfds by file name. */
4091
4092 static int
4093 sort_bfds (const void *a, const void *b)
4094 {
4095 bfd *const *abfd1 = a;
4096 bfd *const *abfd2 = b;
4097
4098 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4099 }
4100
4101 static unsigned int
4102 print_one_overlay_section (FILE *script,
4103 unsigned int base,
4104 unsigned int count,
4105 unsigned int ovlynum,
4106 unsigned int *ovly_map,
4107 asection **ovly_sections,
4108 struct bfd_link_info *info)
4109 {
4110 unsigned int j;
4111
4112 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4113 {
4114 asection *sec = ovly_sections[2 * j];
4115
4116 if (fprintf (script, " %s%c%s (%s)\n",
4117 (sec->owner->my_archive != NULL
4118 ? sec->owner->my_archive->filename : ""),
4119 info->path_separator,
4120 sec->owner->filename,
4121 sec->name) <= 0)
4122 return -1;
4123 if (sec->segment_mark)
4124 {
4125 struct call_info *call = find_pasted_call (sec);
4126 while (call != NULL)
4127 {
4128 struct function_info *call_fun = call->fun;
4129 sec = call_fun->sec;
4130 if (fprintf (script, " %s%c%s (%s)\n",
4131 (sec->owner->my_archive != NULL
4132 ? sec->owner->my_archive->filename : ""),
4133 info->path_separator,
4134 sec->owner->filename,
4135 sec->name) <= 0)
4136 return -1;
4137 for (call = call_fun->call_list; call; call = call->next)
4138 if (call->is_pasted)
4139 break;
4140 }
4141 }
4142 }
4143
4144 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4145 {
4146 asection *sec = ovly_sections[2 * j + 1];
4147 if (sec != NULL
4148 && fprintf (script, " %s%c%s (%s)\n",
4149 (sec->owner->my_archive != NULL
4150 ? sec->owner->my_archive->filename : ""),
4151 info->path_separator,
4152 sec->owner->filename,
4153 sec->name) <= 0)
4154 return -1;
4155
4156 sec = ovly_sections[2 * j];
4157 if (sec->segment_mark)
4158 {
4159 struct call_info *call = find_pasted_call (sec);
4160 while (call != NULL)
4161 {
4162 struct function_info *call_fun = call->fun;
4163 sec = call_fun->rodata;
4164 if (sec != NULL
4165 && fprintf (script, " %s%c%s (%s)\n",
4166 (sec->owner->my_archive != NULL
4167 ? sec->owner->my_archive->filename : ""),
4168 info->path_separator,
4169 sec->owner->filename,
4170 sec->name) <= 0)
4171 return -1;
4172 for (call = call_fun->call_list; call; call = call->next)
4173 if (call->is_pasted)
4174 break;
4175 }
4176 }
4177 }
4178
4179 return j;
4180 }
4181
4182 /* Handle --auto-overlay. */
4183
4184 static void
4185 spu_elf_auto_overlay (struct bfd_link_info *info)
4186 {
4187 bfd *ibfd;
4188 bfd **bfd_arr;
4189 struct elf_segment_map *m;
4190 unsigned int fixed_size, lo, hi;
4191 unsigned int reserved;
4192 struct spu_link_hash_table *htab;
4193 unsigned int base, i, count, bfd_count;
4194 unsigned int region, ovlynum;
4195 asection **ovly_sections, **ovly_p;
4196 unsigned int *ovly_map;
4197 FILE *script;
4198 unsigned int total_overlay_size, overlay_size;
4199 const char *ovly_mgr_entry;
4200 struct elf_link_hash_entry *h;
4201 struct _mos_param mos_param;
4202 struct _uos_param uos_param;
4203 struct function_info dummy_caller;
4204
4205 /* Find the extents of our loadable image. */
4206 lo = (unsigned int) -1;
4207 hi = 0;
4208 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4209 if (m->p_type == PT_LOAD)
4210 for (i = 0; i < m->count; i++)
4211 if (m->sections[i]->size != 0)
4212 {
4213 if (m->sections[i]->vma < lo)
4214 lo = m->sections[i]->vma;
4215 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4216 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4217 }
4218 fixed_size = hi + 1 - lo;
4219
4220 if (!discover_functions (info))
4221 goto err_exit;
4222
4223 if (!build_call_tree (info))
4224 goto err_exit;
4225
4226 htab = spu_hash_table (info);
4227 reserved = htab->params->auto_overlay_reserved;
4228 if (reserved == 0)
4229 {
4230 struct _sum_stack_param sum_stack_param;
4231
4232 sum_stack_param.emit_stack_syms = 0;
4233 sum_stack_param.overall_stack = 0;
4234 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4235 goto err_exit;
4236 reserved = (sum_stack_param.overall_stack
4237 + htab->params->extra_stack_space);
4238 }
4239
4240 /* No need for overlays if everything already fits. */
4241 if (fixed_size + reserved <= htab->local_store
4242 && htab->params->ovly_flavour != ovly_soft_icache)
4243 {
4244 htab->params->auto_overlay = 0;
4245 return;
4246 }
4247
4248 uos_param.exclude_input_section = 0;
4249 uos_param.exclude_output_section
4250 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4251
4252 ovly_mgr_entry = "__ovly_load";
4253 if (htab->params->ovly_flavour == ovly_soft_icache)
4254 ovly_mgr_entry = "__icache_br_handler";
4255 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4256 FALSE, FALSE, FALSE);
4257 if (h != NULL
4258 && (h->root.type == bfd_link_hash_defined
4259 || h->root.type == bfd_link_hash_defweak)
4260 && h->def_regular)
4261 {
4262 /* We have a user supplied overlay manager. */
4263 uos_param.exclude_input_section = h->root.u.def.section;
4264 }
4265 else
4266 {
4267 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4268 builtin version to .text, and will adjust .text size. */
4269 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4270 }
4271
4272 /* Mark overlay sections, and find max overlay section size. */
4273 mos_param.max_overlay_size = 0;
4274 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4275 goto err_exit;
4276
4277 /* We can't put the overlay manager or interrupt routines in
4278 overlays. */
4279 uos_param.clearing = 0;
4280 if ((uos_param.exclude_input_section
4281 || uos_param.exclude_output_section)
4282 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4283 goto err_exit;
4284
4285 bfd_count = 0;
4286 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4287 ++bfd_count;
4288 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4289 if (bfd_arr == NULL)
4290 goto err_exit;
4291
4292 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4293 count = 0;
4294 bfd_count = 0;
4295 total_overlay_size = 0;
4296 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4297 {
4298 extern const bfd_target spu_elf32_vec;
4299 asection *sec;
4300 unsigned int old_count;
4301
4302 if (ibfd->xvec != &spu_elf32_vec)
4303 continue;
4304
4305 old_count = count;
4306 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4307 if (sec->linker_mark)
4308 {
4309 if ((sec->flags & SEC_CODE) != 0)
4310 count += 1;
4311 fixed_size -= sec->size;
4312 total_overlay_size += sec->size;
4313 }
4314 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4315 && sec->output_section->owner == info->output_bfd
4316 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4317 fixed_size -= sec->size;
4318 if (count != old_count)
4319 bfd_arr[bfd_count++] = ibfd;
4320 }
4321
4322 /* Since the overlay link script selects sections by file name and
4323 section name, ensure that file names are unique. */
4324 if (bfd_count > 1)
4325 {
4326 bfd_boolean ok = TRUE;
4327
4328 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4329 for (i = 1; i < bfd_count; ++i)
4330 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4331 {
4332 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4333 {
4334 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4335 /* xgettext:c-format */
4336 info->callbacks->einfo (_("%s duplicated in %s\n"),
4337 bfd_arr[i]->filename,
4338 bfd_arr[i]->my_archive->filename);
4339 else
4340 info->callbacks->einfo (_("%s duplicated\n"),
4341 bfd_arr[i]->filename);
4342 ok = FALSE;
4343 }
4344 }
4345 if (!ok)
4346 {
4347 info->callbacks->einfo (_("sorry, no support for duplicate "
4348 "object files in auto-overlay script\n"));
4349 bfd_set_error (bfd_error_bad_value);
4350 goto err_exit;
4351 }
4352 }
4353 free (bfd_arr);
4354
4355 fixed_size += reserved;
4356 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4357 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4358 {
4359 if (htab->params->ovly_flavour == ovly_soft_icache)
4360 {
4361 /* Stubs in the non-icache area are bigger. */
4362 fixed_size += htab->non_ovly_stub * 16;
4363 /* Space for icache manager tables.
4364 a) Tag array, one quadword per cache line.
4365 - word 0: ia address of present line, init to zero. */
4366 fixed_size += 16 << htab->num_lines_log2;
4367 /* b) Rewrite "to" list, one quadword per cache line. */
4368 fixed_size += 16 << htab->num_lines_log2;
4369 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4370 to a power-of-two number of full quadwords) per cache line. */
4371 fixed_size += 16 << (htab->fromelem_size_log2
4372 + htab->num_lines_log2);
4373 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4374 fixed_size += 16;
4375 }
4376 else
4377 {
4378 /* Guess number of overlays. Assuming overlay buffer is on
4379 average only half full should be conservative. */
4380 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4381 / (htab->local_store - fixed_size));
4382 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4383 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4384 }
4385 }
4386
4387 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4388 /* xgettext:c-format */
4389 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4390 "size of 0x%v exceeds local store\n"),
4391 (bfd_vma) fixed_size,
4392 (bfd_vma) mos_param.max_overlay_size);
4393
4394 /* Now see if we should put some functions in the non-overlay area. */
4395 else if (fixed_size < htab->params->auto_overlay_fixed)
4396 {
4397 unsigned int max_fixed, lib_size;
4398
4399 max_fixed = htab->local_store - mos_param.max_overlay_size;
4400 if (max_fixed > htab->params->auto_overlay_fixed)
4401 max_fixed = htab->params->auto_overlay_fixed;
4402 lib_size = max_fixed - fixed_size;
4403 lib_size = auto_ovl_lib_functions (info, lib_size);
4404 if (lib_size == (unsigned int) -1)
4405 goto err_exit;
4406 fixed_size = max_fixed - lib_size;
4407 }
4408
4409 /* Build an array of sections, suitably sorted to place into
4410 overlays. */
4411 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4412 if (ovly_sections == NULL)
4413 goto err_exit;
4414 ovly_p = ovly_sections;
4415 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4416 goto err_exit;
4417 count = (size_t) (ovly_p - ovly_sections) / 2;
4418 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4419 if (ovly_map == NULL)
4420 goto err_exit;
4421
4422 memset (&dummy_caller, 0, sizeof (dummy_caller));
4423 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4424 if (htab->params->line_size != 0)
4425 overlay_size = htab->params->line_size;
4426 base = 0;
4427 ovlynum = 0;
4428 while (base < count)
4429 {
4430 unsigned int size = 0, rosize = 0, roalign = 0;
4431
4432 for (i = base; i < count; i++)
4433 {
4434 asection *sec, *rosec;
4435 unsigned int tmp, rotmp;
4436 unsigned int num_stubs;
4437 struct call_info *call, *pasty;
4438 struct _spu_elf_section_data *sec_data;
4439 struct spu_elf_stack_info *sinfo;
4440 unsigned int k;
4441
4442 /* See whether we can add this section to the current
4443 overlay without overflowing our overlay buffer. */
4444 sec = ovly_sections[2 * i];
4445 tmp = align_power (size, sec->alignment_power) + sec->size;
4446 rotmp = rosize;
4447 rosec = ovly_sections[2 * i + 1];
4448 if (rosec != NULL)
4449 {
4450 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4451 if (roalign < rosec->alignment_power)
4452 roalign = rosec->alignment_power;
4453 }
4454 if (align_power (tmp, roalign) + rotmp > overlay_size)
4455 break;
4456 if (sec->segment_mark)
4457 {
4458 /* Pasted sections must stay together, so add their
4459 sizes too. */
4460 pasty = find_pasted_call (sec);
4461 while (pasty != NULL)
4462 {
4463 struct function_info *call_fun = pasty->fun;
4464 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4465 + call_fun->sec->size);
4466 if (call_fun->rodata)
4467 {
4468 rotmp = (align_power (rotmp,
4469 call_fun->rodata->alignment_power)
4470 + call_fun->rodata->size);
4471 if (roalign < rosec->alignment_power)
4472 roalign = rosec->alignment_power;
4473 }
4474 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4475 if (pasty->is_pasted)
4476 break;
4477 }
4478 }
4479 if (align_power (tmp, roalign) + rotmp > overlay_size)
4480 break;
4481
4482 /* If we add this section, we might need new overlay call
4483 stubs. Add any overlay section calls to dummy_call. */
4484 pasty = NULL;
4485 sec_data = spu_elf_section_data (sec);
4486 sinfo = sec_data->u.i.stack_info;
4487 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4488 for (call = sinfo->fun[k].call_list; call; call = call->next)
4489 if (call->is_pasted)
4490 {
4491 BFD_ASSERT (pasty == NULL);
4492 pasty = call;
4493 }
4494 else if (call->fun->sec->linker_mark)
4495 {
4496 if (!copy_callee (&dummy_caller, call))
4497 goto err_exit;
4498 }
4499 while (pasty != NULL)
4500 {
4501 struct function_info *call_fun = pasty->fun;
4502 pasty = NULL;
4503 for (call = call_fun->call_list; call; call = call->next)
4504 if (call->is_pasted)
4505 {
4506 BFD_ASSERT (pasty == NULL);
4507 pasty = call;
4508 }
4509 else if (!copy_callee (&dummy_caller, call))
4510 goto err_exit;
4511 }
4512
4513 /* Calculate call stub size. */
4514 num_stubs = 0;
4515 for (call = dummy_caller.call_list; call; call = call->next)
4516 {
4517 unsigned int stub_delta = 1;
4518
4519 if (htab->params->ovly_flavour == ovly_soft_icache)
4520 stub_delta = call->count;
4521 num_stubs += stub_delta;
4522
4523 /* If the call is within this overlay, we won't need a
4524 stub. */
4525 for (k = base; k < i + 1; k++)
4526 if (call->fun->sec == ovly_sections[2 * k])
4527 {
4528 num_stubs -= stub_delta;
4529 break;
4530 }
4531 }
4532 if (htab->params->ovly_flavour == ovly_soft_icache
4533 && num_stubs > htab->params->max_branch)
4534 break;
4535 if (align_power (tmp, roalign) + rotmp
4536 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4537 break;
4538 size = tmp;
4539 rosize = rotmp;
4540 }
4541
4542 if (i == base)
4543 {
4544 /* xgettext:c-format */
4545 info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4546 ovly_sections[2 * i]->owner,
4547 ovly_sections[2 * i],
4548 ovly_sections[2 * i + 1] ? " + rodata" : "");
4549 bfd_set_error (bfd_error_bad_value);
4550 goto err_exit;
4551 }
4552
4553 while (dummy_caller.call_list != NULL)
4554 {
4555 struct call_info *call = dummy_caller.call_list;
4556 dummy_caller.call_list = call->next;
4557 free (call);
4558 }
4559
4560 ++ovlynum;
4561 while (base < i)
4562 ovly_map[base++] = ovlynum;
4563 }
4564
4565 script = htab->params->spu_elf_open_overlay_script ();
4566
4567 if (htab->params->ovly_flavour == ovly_soft_icache)
4568 {
4569 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4570 goto file_err;
4571
4572 if (fprintf (script,
4573 " . = ALIGN (%u);\n"
4574 " .ovl.init : { *(.ovl.init) }\n"
4575 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4576 htab->params->line_size) <= 0)
4577 goto file_err;
4578
4579 base = 0;
4580 ovlynum = 1;
4581 while (base < count)
4582 {
4583 unsigned int indx = ovlynum - 1;
4584 unsigned int vma, lma;
4585
4586 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4587 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4588
4589 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4590 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4591 ovlynum, vma, lma) <= 0)
4592 goto file_err;
4593
4594 base = print_one_overlay_section (script, base, count, ovlynum,
4595 ovly_map, ovly_sections, info);
4596 if (base == (unsigned) -1)
4597 goto file_err;
4598
4599 if (fprintf (script, " }\n") <= 0)
4600 goto file_err;
4601
4602 ovlynum++;
4603 }
4604
4605 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4606 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4607 goto file_err;
4608
4609 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4610 goto file_err;
4611 }
4612 else
4613 {
4614 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4615 goto file_err;
4616
4617 if (fprintf (script,
4618 " . = ALIGN (16);\n"
4619 " .ovl.init : { *(.ovl.init) }\n"
4620 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4621 goto file_err;
4622
4623 for (region = 1; region <= htab->params->num_lines; region++)
4624 {
4625 ovlynum = region;
4626 base = 0;
4627 while (base < count && ovly_map[base] < ovlynum)
4628 base++;
4629
4630 if (base == count)
4631 break;
4632
4633 if (region == 1)
4634 {
4635 /* We need to set lma since we are overlaying .ovl.init. */
4636 if (fprintf (script,
4637 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4638 goto file_err;
4639 }
4640 else
4641 {
4642 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4643 goto file_err;
4644 }
4645
4646 while (base < count)
4647 {
4648 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4649 goto file_err;
4650
4651 base = print_one_overlay_section (script, base, count, ovlynum,
4652 ovly_map, ovly_sections, info);
4653 if (base == (unsigned) -1)
4654 goto file_err;
4655
4656 if (fprintf (script, " }\n") <= 0)
4657 goto file_err;
4658
4659 ovlynum += htab->params->num_lines;
4660 while (base < count && ovly_map[base] < ovlynum)
4661 base++;
4662 }
4663
4664 if (fprintf (script, " }\n") <= 0)
4665 goto file_err;
4666 }
4667
4668 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4669 goto file_err;
4670 }
4671
4672 free (ovly_map);
4673 free (ovly_sections);
4674
4675 if (fclose (script) != 0)
4676 goto file_err;
4677
4678 if (htab->params->auto_overlay & AUTO_RELINK)
4679 (*htab->params->spu_elf_relink) ();
4680
4681 xexit (0);
4682
4683 file_err:
4684 bfd_set_error (bfd_error_system_call);
4685 err_exit:
4686 info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4687 xexit (1);
4688 }
4689
4690 /* Provide an estimate of total stack required. */
4691
4692 static bfd_boolean
4693 spu_elf_stack_analysis (struct bfd_link_info *info)
4694 {
4695 struct spu_link_hash_table *htab;
4696 struct _sum_stack_param sum_stack_param;
4697
4698 if (!discover_functions (info))
4699 return FALSE;
4700
4701 if (!build_call_tree (info))
4702 return FALSE;
4703
4704 htab = spu_hash_table (info);
4705 if (htab->params->stack_analysis)
4706 {
4707 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4708 info->callbacks->minfo (_("\nStack size for functions. "
4709 "Annotations: '*' max stack, 't' tail call\n"));
4710 }
4711
4712 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4713 sum_stack_param.overall_stack = 0;
4714 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4715 return FALSE;
4716
4717 if (htab->params->stack_analysis)
4718 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4719 (bfd_vma) sum_stack_param.overall_stack);
4720 return TRUE;
4721 }
4722
4723 /* Perform a final link. */
4724
4725 static bfd_boolean
4726 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4727 {
4728 struct spu_link_hash_table *htab = spu_hash_table (info);
4729
4730 if (htab->params->auto_overlay)
4731 spu_elf_auto_overlay (info);
4732
4733 if ((htab->params->stack_analysis
4734 || (htab->params->ovly_flavour == ovly_soft_icache
4735 && htab->params->lrlive_analysis))
4736 && !spu_elf_stack_analysis (info))
4737 info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4738
4739 if (!spu_elf_build_stubs (info))
4740 info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4741
4742 return bfd_elf_final_link (output_bfd, info);
4743 }
4744
4745 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4746 and !info->emitrelocations. Returns a count of special relocs
4747 that need to be emitted. */
4748
4749 static unsigned int
4750 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4751 {
4752 Elf_Internal_Rela *relocs;
4753 unsigned int count = 0;
4754
4755 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4756 info->keep_memory);
4757 if (relocs != NULL)
4758 {
4759 Elf_Internal_Rela *rel;
4760 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4761
4762 for (rel = relocs; rel < relend; rel++)
4763 {
4764 int r_type = ELF32_R_TYPE (rel->r_info);
4765 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4766 ++count;
4767 }
4768
4769 if (elf_section_data (sec)->relocs != relocs)
4770 free (relocs);
4771 }
4772
4773 return count;
4774 }
4775
4776 /* Functions for adding fixup records to .fixup */
4777
4778 #define FIXUP_RECORD_SIZE 4
4779
4780 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4781 bfd_put_32 (output_bfd, addr, \
4782 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4783 #define FIXUP_GET(output_bfd,htab,index) \
4784 bfd_get_32 (output_bfd, \
4785 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4786
4787 /* Store OFFSET in .fixup. This assumes it will be called with an
4788 increasing OFFSET. When this OFFSET fits with the last base offset,
4789 it just sets a bit, otherwise it adds a new fixup record. */
4790 static void
4791 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4792 bfd_vma offset)
4793 {
4794 struct spu_link_hash_table *htab = spu_hash_table (info);
4795 asection *sfixup = htab->sfixup;
4796 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4797 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4798 if (sfixup->reloc_count == 0)
4799 {
4800 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4801 sfixup->reloc_count++;
4802 }
4803 else
4804 {
4805 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4806 if (qaddr != (base & ~(bfd_vma) 15))
4807 {
4808 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4809 _bfd_error_handler (_("fatal error while creating .fixup"));
4810 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4811 sfixup->reloc_count++;
4812 }
4813 else
4814 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4815 }
4816 }
4817
4818 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4819
4820 static int
4821 spu_elf_relocate_section (bfd *output_bfd,
4822 struct bfd_link_info *info,
4823 bfd *input_bfd,
4824 asection *input_section,
4825 bfd_byte *contents,
4826 Elf_Internal_Rela *relocs,
4827 Elf_Internal_Sym *local_syms,
4828 asection **local_sections)
4829 {
4830 Elf_Internal_Shdr *symtab_hdr;
4831 struct elf_link_hash_entry **sym_hashes;
4832 Elf_Internal_Rela *rel, *relend;
4833 struct spu_link_hash_table *htab;
4834 asection *ea;
4835 int ret = TRUE;
4836 bfd_boolean emit_these_relocs = FALSE;
4837 bfd_boolean is_ea_sym;
4838 bfd_boolean stubs;
4839 unsigned int iovl = 0;
4840
4841 htab = spu_hash_table (info);
4842 stubs = (htab->stub_sec != NULL
4843 && maybe_needs_stubs (input_section));
4844 iovl = overlay_index (input_section);
4845 ea = bfd_get_section_by_name (output_bfd, "._ea");
4846 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4847 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4848
4849 rel = relocs;
4850 relend = relocs + input_section->reloc_count;
4851 for (; rel < relend; rel++)
4852 {
4853 int r_type;
4854 reloc_howto_type *howto;
4855 unsigned int r_symndx;
4856 Elf_Internal_Sym *sym;
4857 asection *sec;
4858 struct elf_link_hash_entry *h;
4859 const char *sym_name;
4860 bfd_vma relocation;
4861 bfd_vma addend;
4862 bfd_reloc_status_type r;
4863 bfd_boolean unresolved_reloc;
4864 enum _stub_type stub_type;
4865
4866 r_symndx = ELF32_R_SYM (rel->r_info);
4867 r_type = ELF32_R_TYPE (rel->r_info);
4868 howto = elf_howto_table + r_type;
4869 unresolved_reloc = FALSE;
4870 h = NULL;
4871 sym = NULL;
4872 sec = NULL;
4873 if (r_symndx < symtab_hdr->sh_info)
4874 {
4875 sym = local_syms + r_symndx;
4876 sec = local_sections[r_symndx];
4877 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4878 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4879 }
4880 else
4881 {
4882 if (sym_hashes == NULL)
4883 return FALSE;
4884
4885 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4886
4887 if (info->wrap_hash != NULL
4888 && (input_section->flags & SEC_DEBUGGING) != 0)
4889 h = ((struct elf_link_hash_entry *)
4890 unwrap_hash_lookup (info, input_bfd, &h->root));
4891
4892 while (h->root.type == bfd_link_hash_indirect
4893 || h->root.type == bfd_link_hash_warning)
4894 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4895
4896 relocation = 0;
4897 if (h->root.type == bfd_link_hash_defined
4898 || h->root.type == bfd_link_hash_defweak)
4899 {
4900 sec = h->root.u.def.section;
4901 if (sec == NULL
4902 || sec->output_section == NULL)
4903 /* Set a flag that will be cleared later if we find a
4904 relocation value for this symbol. output_section
4905 is typically NULL for symbols satisfied by a shared
4906 library. */
4907 unresolved_reloc = TRUE;
4908 else
4909 relocation = (h->root.u.def.value
4910 + sec->output_section->vma
4911 + sec->output_offset);
4912 }
4913 else if (h->root.type == bfd_link_hash_undefweak)
4914 ;
4915 else if (info->unresolved_syms_in_objects == RM_IGNORE
4916 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4917 ;
4918 else if (!bfd_link_relocatable (info)
4919 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4920 {
4921 bfd_boolean err;
4922 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4923 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4924 (*info->callbacks->undefined_symbol) (info,
4925 h->root.root.string,
4926 input_bfd,
4927 input_section,
4928 rel->r_offset, err);
4929 }
4930 sym_name = h->root.root.string;
4931 }
4932
4933 if (sec != NULL && discarded_section (sec))
4934 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4935 rel, 1, relend, howto, 0, contents);
4936
4937 if (bfd_link_relocatable (info))
4938 continue;
4939
4940 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4941 if (r_type == R_SPU_ADD_PIC
4942 && h != NULL
4943 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4944 {
4945 bfd_byte *loc = contents + rel->r_offset;
4946 loc[0] = 0x1c;
4947 loc[1] = 0x00;
4948 loc[2] &= 0x3f;
4949 }
4950
4951 is_ea_sym = (ea != NULL
4952 && sec != NULL
4953 && sec->output_section == ea);
4954
4955 /* If this symbol is in an overlay area, we may need to relocate
4956 to the overlay stub. */
4957 addend = rel->r_addend;
4958 if (stubs
4959 && !is_ea_sym
4960 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4961 contents, info)) != no_stub)
4962 {
4963 unsigned int ovl = 0;
4964 struct got_entry *g, **head;
4965
4966 if (stub_type != nonovl_stub)
4967 ovl = iovl;
4968
4969 if (h != NULL)
4970 head = &h->got.glist;
4971 else
4972 head = elf_local_got_ents (input_bfd) + r_symndx;
4973
4974 for (g = *head; g != NULL; g = g->next)
4975 if (htab->params->ovly_flavour == ovly_soft_icache
4976 ? (g->ovl == ovl
4977 && g->br_addr == (rel->r_offset
4978 + input_section->output_offset
4979 + input_section->output_section->vma))
4980 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4981 break;
4982 if (g == NULL)
4983 abort ();
4984
4985 relocation = g->stub_addr;
4986 addend = 0;
4987 }
4988 else
4989 {
4990 /* For soft icache, encode the overlay index into addresses. */
4991 if (htab->params->ovly_flavour == ovly_soft_icache
4992 && (r_type == R_SPU_ADDR16_HI
4993 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4994 && !is_ea_sym)
4995 {
4996 unsigned int ovl = overlay_index (sec);
4997 if (ovl != 0)
4998 {
4999 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5000 relocation += set_id << 18;
5001 }
5002 }
5003 }
5004
5005 if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5006 && (input_section->flags & SEC_ALLOC) != 0
5007 && r_type == R_SPU_ADDR32)
5008 {
5009 bfd_vma offset;
5010 offset = rel->r_offset + input_section->output_section->vma
5011 + input_section->output_offset;
5012 spu_elf_emit_fixup (output_bfd, info, offset);
5013 }
5014
5015 if (unresolved_reloc)
5016 ;
5017 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5018 {
5019 if (is_ea_sym)
5020 {
5021 /* ._ea is a special section that isn't allocated in SPU
5022 memory, but rather occupies space in PPU memory as
5023 part of an embedded ELF image. If this reloc is
5024 against a symbol defined in ._ea, then transform the
5025 reloc into an equivalent one without a symbol
5026 relative to the start of the ELF image. */
5027 rel->r_addend += (relocation
5028 - ea->vma
5029 + elf_section_data (ea)->this_hdr.sh_offset);
5030 rel->r_info = ELF32_R_INFO (0, r_type);
5031 }
5032 emit_these_relocs = TRUE;
5033 continue;
5034 }
5035 else if (is_ea_sym)
5036 unresolved_reloc = TRUE;
5037
5038 if (unresolved_reloc
5039 && _bfd_elf_section_offset (output_bfd, info, input_section,
5040 rel->r_offset) != (bfd_vma) -1)
5041 {
5042 _bfd_error_handler
5043 /* xgettext:c-format */
5044 (_("%pB(%s+%#" PRIx64 "): "
5045 "unresolvable %s relocation against symbol `%s'"),
5046 input_bfd,
5047 bfd_get_section_name (input_bfd, input_section),
5048 (uint64_t) rel->r_offset,
5049 howto->name,
5050 sym_name);
5051 ret = FALSE;
5052 }
5053
5054 r = _bfd_final_link_relocate (howto,
5055 input_bfd,
5056 input_section,
5057 contents,
5058 rel->r_offset, relocation, addend);
5059
5060 if (r != bfd_reloc_ok)
5061 {
5062 const char *msg = (const char *) 0;
5063
5064 switch (r)
5065 {
5066 case bfd_reloc_overflow:
5067 (*info->callbacks->reloc_overflow)
5068 (info, (h ? &h->root : NULL), sym_name, howto->name,
5069 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5070 break;
5071
5072 case bfd_reloc_undefined:
5073 (*info->callbacks->undefined_symbol)
5074 (info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
5075 break;
5076
5077 case bfd_reloc_outofrange:
5078 msg = _("internal error: out of range error");
5079 goto common_error;
5080
5081 case bfd_reloc_notsupported:
5082 msg = _("internal error: unsupported relocation error");
5083 goto common_error;
5084
5085 case bfd_reloc_dangerous:
5086 msg = _("internal error: dangerous error");
5087 goto common_error;
5088
5089 default:
5090 msg = _("internal error: unknown error");
5091 /* fall through */
5092
5093 common_error:
5094 ret = FALSE;
5095 (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5096 input_section, rel->r_offset);
5097 break;
5098 }
5099 }
5100 }
5101
5102 if (ret
5103 && emit_these_relocs
5104 && !info->emitrelocations)
5105 {
5106 Elf_Internal_Rela *wrel;
5107 Elf_Internal_Shdr *rel_hdr;
5108
5109 wrel = rel = relocs;
5110 relend = relocs + input_section->reloc_count;
5111 for (; rel < relend; rel++)
5112 {
5113 int r_type;
5114
5115 r_type = ELF32_R_TYPE (rel->r_info);
5116 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5117 *wrel++ = *rel;
5118 }
5119 input_section->reloc_count = wrel - relocs;
5120 /* Backflips for _bfd_elf_link_output_relocs. */
5121 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5122 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5123 ret = 2;
5124 }
5125
5126 return ret;
5127 }
5128
5129 static bfd_boolean
5130 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5131 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5132 {
5133 return TRUE;
5134 }
5135
5136 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5137
5138 static int
5139 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5140 const char *sym_name ATTRIBUTE_UNUSED,
5141 Elf_Internal_Sym *sym,
5142 asection *sym_sec ATTRIBUTE_UNUSED,
5143 struct elf_link_hash_entry *h)
5144 {
5145 struct spu_link_hash_table *htab = spu_hash_table (info);
5146
5147 if (!bfd_link_relocatable (info)
5148 && htab->stub_sec != NULL
5149 && h != NULL
5150 && (h->root.type == bfd_link_hash_defined
5151 || h->root.type == bfd_link_hash_defweak)
5152 && h->def_regular
5153 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5154 {
5155 struct got_entry *g;
5156
5157 for (g = h->got.glist; g != NULL; g = g->next)
5158 if (htab->params->ovly_flavour == ovly_soft_icache
5159 ? g->br_addr == g->stub_addr
5160 : g->addend == 0 && g->ovl == 0)
5161 {
5162 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5163 (htab->stub_sec[0]->output_section->owner,
5164 htab->stub_sec[0]->output_section));
5165 sym->st_value = g->stub_addr;
5166 break;
5167 }
5168 }
5169
5170 return 1;
5171 }
5172
5173 static int spu_plugin = 0;
5174
5175 void
5176 spu_elf_plugin (int val)
5177 {
5178 spu_plugin = val;
5179 }
5180
5181 /* Set ELF header e_type for plugins. */
5182
5183 static void
5184 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5185 {
5186 if (spu_plugin)
5187 {
5188 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5189
5190 i_ehdrp->e_type = ET_DYN;
5191 }
5192
5193 _bfd_elf_post_process_headers (abfd, info);
5194 }
5195
5196 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5197 segments for overlays. */
5198
5199 static int
5200 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5201 {
5202 int extra = 0;
5203 asection *sec;
5204
5205 if (info != NULL)
5206 {
5207 struct spu_link_hash_table *htab = spu_hash_table (info);
5208 extra = htab->num_overlays;
5209 }
5210
5211 if (extra)
5212 ++extra;
5213
5214 sec = bfd_get_section_by_name (abfd, ".toe");
5215 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5216 ++extra;
5217
5218 return extra;
5219 }
5220
5221 /* Remove .toe section from other PT_LOAD segments and put it in
5222 a segment of its own. Put overlays in separate segments too. */
5223
5224 static bfd_boolean
5225 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5226 {
5227 asection *toe, *s;
5228 struct elf_segment_map *m, *m_overlay;
5229 struct elf_segment_map **p, **p_overlay;
5230 unsigned int i;
5231
5232 if (info == NULL)
5233 return TRUE;
5234
5235 toe = bfd_get_section_by_name (abfd, ".toe");
5236 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5237 if (m->p_type == PT_LOAD && m->count > 1)
5238 for (i = 0; i < m->count; i++)
5239 if ((s = m->sections[i]) == toe
5240 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5241 {
5242 struct elf_segment_map *m2;
5243 bfd_vma amt;
5244
5245 if (i + 1 < m->count)
5246 {
5247 amt = sizeof (struct elf_segment_map);
5248 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5249 m2 = bfd_zalloc (abfd, amt);
5250 if (m2 == NULL)
5251 return FALSE;
5252 m2->count = m->count - (i + 1);
5253 memcpy (m2->sections, m->sections + i + 1,
5254 m2->count * sizeof (m->sections[0]));
5255 m2->p_type = PT_LOAD;
5256 m2->next = m->next;
5257 m->next = m2;
5258 }
5259 m->count = 1;
5260 if (i != 0)
5261 {
5262 m->count = i;
5263 amt = sizeof (struct elf_segment_map);
5264 m2 = bfd_zalloc (abfd, amt);
5265 if (m2 == NULL)
5266 return FALSE;
5267 m2->p_type = PT_LOAD;
5268 m2->count = 1;
5269 m2->sections[0] = s;
5270 m2->next = m->next;
5271 m->next = m2;
5272 }
5273 break;
5274 }
5275
5276
5277 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5278 PT_LOAD segments. This can cause the .ovl.init section to be
5279 overwritten with the contents of some overlay segment. To work
5280 around this issue, we ensure that all PF_OVERLAY segments are
5281 sorted first amongst the program headers; this ensures that even
5282 with a broken loader, the .ovl.init section (which is not marked
5283 as PF_OVERLAY) will be placed into SPU local store on startup. */
5284
5285 /* Move all overlay segments onto a separate list. */
5286 p = &elf_seg_map (abfd);
5287 p_overlay = &m_overlay;
5288 while (*p != NULL)
5289 {
5290 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5291 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5292 {
5293 m = *p;
5294 *p = m->next;
5295 *p_overlay = m;
5296 p_overlay = &m->next;
5297 continue;
5298 }
5299
5300 p = &((*p)->next);
5301 }
5302
5303 /* Re-insert overlay segments at the head of the segment map. */
5304 *p_overlay = elf_seg_map (abfd);
5305 elf_seg_map (abfd) = m_overlay;
5306
5307 return TRUE;
5308 }
5309
5310 /* Tweak the section type of .note.spu_name. */
5311
5312 static bfd_boolean
5313 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5314 Elf_Internal_Shdr *hdr,
5315 asection *sec)
5316 {
5317 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5318 hdr->sh_type = SHT_NOTE;
5319 return TRUE;
5320 }
5321
5322 /* Tweak phdrs before writing them out. */
5323
5324 static int
5325 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5326 {
5327 const struct elf_backend_data *bed;
5328 struct elf_obj_tdata *tdata;
5329 Elf_Internal_Phdr *phdr, *last;
5330 struct spu_link_hash_table *htab;
5331 unsigned int count;
5332 unsigned int i;
5333
5334 if (info == NULL)
5335 return TRUE;
5336
5337 bed = get_elf_backend_data (abfd);
5338 tdata = elf_tdata (abfd);
5339 phdr = tdata->phdr;
5340 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5341 htab = spu_hash_table (info);
5342 if (htab->num_overlays != 0)
5343 {
5344 struct elf_segment_map *m;
5345 unsigned int o;
5346
5347 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5348 if (m->count != 0
5349 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5350 {
5351 /* Mark this as an overlay header. */
5352 phdr[i].p_flags |= PF_OVERLAY;
5353
5354 if (htab->ovtab != NULL && htab->ovtab->size != 0
5355 && htab->params->ovly_flavour != ovly_soft_icache)
5356 {
5357 bfd_byte *p = htab->ovtab->contents;
5358 unsigned int off = o * 16 + 8;
5359
5360 /* Write file_off into _ovly_table. */
5361 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5362 }
5363 }
5364 /* Soft-icache has its file offset put in .ovl.init. */
5365 if (htab->init != NULL && htab->init->size != 0)
5366 {
5367 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5368
5369 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5370 }
5371 }
5372
5373 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5374 of 16. This should always be possible when using the standard
5375 linker scripts, but don't create overlapping segments if
5376 someone is playing games with linker scripts. */
5377 last = NULL;
5378 for (i = count; i-- != 0; )
5379 if (phdr[i].p_type == PT_LOAD)
5380 {
5381 unsigned adjust;
5382
5383 adjust = -phdr[i].p_filesz & 15;
5384 if (adjust != 0
5385 && last != NULL
5386 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5387 break;
5388
5389 adjust = -phdr[i].p_memsz & 15;
5390 if (adjust != 0
5391 && last != NULL
5392 && phdr[i].p_filesz != 0
5393 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5394 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5395 break;
5396
5397 if (phdr[i].p_filesz != 0)
5398 last = &phdr[i];
5399 }
5400
5401 if (i == (unsigned int) -1)
5402 for (i = count; i-- != 0; )
5403 if (phdr[i].p_type == PT_LOAD)
5404 {
5405 unsigned adjust;
5406
5407 adjust = -phdr[i].p_filesz & 15;
5408 phdr[i].p_filesz += adjust;
5409
5410 adjust = -phdr[i].p_memsz & 15;
5411 phdr[i].p_memsz += adjust;
5412 }
5413
5414 return TRUE;
5415 }
5416
5417 bfd_boolean
5418 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5419 {
5420 struct spu_link_hash_table *htab = spu_hash_table (info);
5421 if (htab->params->emit_fixups)
5422 {
5423 asection *sfixup = htab->sfixup;
5424 int fixup_count = 0;
5425 bfd *ibfd;
5426 size_t size;
5427
5428 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5429 {
5430 asection *isec;
5431
5432 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5433 continue;
5434
5435 /* Walk over each section attached to the input bfd. */
5436 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5437 {
5438 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5439 bfd_vma base_end;
5440
5441 /* If there aren't any relocs, then there's nothing more
5442 to do. */
5443 if ((isec->flags & SEC_ALLOC) == 0
5444 || (isec->flags & SEC_RELOC) == 0
5445 || isec->reloc_count == 0)
5446 continue;
5447
5448 /* Get the relocs. */
5449 internal_relocs =
5450 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5451 info->keep_memory);
5452 if (internal_relocs == NULL)
5453 return FALSE;
5454
5455 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5456 relocations. They are stored in a single word by
5457 saving the upper 28 bits of the address and setting the
5458 lower 4 bits to a bit mask of the words that have the
5459 relocation. BASE_END keeps track of the next quadword. */
5460 irela = internal_relocs;
5461 irelaend = irela + isec->reloc_count;
5462 base_end = 0;
5463 for (; irela < irelaend; irela++)
5464 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5465 && irela->r_offset >= base_end)
5466 {
5467 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5468 fixup_count++;
5469 }
5470 }
5471 }
5472
5473 /* We always have a NULL fixup as a sentinel */
5474 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5475 if (!bfd_set_section_size (output_bfd, sfixup, size))
5476 return FALSE;
5477 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5478 if (sfixup->contents == NULL)
5479 return FALSE;
5480 }
5481 return TRUE;
5482 }
5483
5484 #define TARGET_BIG_SYM spu_elf32_vec
5485 #define TARGET_BIG_NAME "elf32-spu"
5486 #define ELF_ARCH bfd_arch_spu
5487 #define ELF_TARGET_ID SPU_ELF_DATA
5488 #define ELF_MACHINE_CODE EM_SPU
5489 /* This matches the alignment need for DMA. */
5490 #define ELF_MAXPAGESIZE 0x80
5491 #define elf_backend_rela_normal 1
5492 #define elf_backend_can_gc_sections 1
5493
5494 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5495 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5496 #define elf_info_to_howto spu_elf_info_to_howto
5497 #define elf_backend_count_relocs spu_elf_count_relocs
5498 #define elf_backend_relocate_section spu_elf_relocate_section
5499 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5500 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5501 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5502 #define elf_backend_object_p spu_elf_object_p
5503 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5504 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5505
5506 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5507 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5508 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5509 #define elf_backend_post_process_headers spu_elf_post_process_headers
5510 #define elf_backend_fake_sections spu_elf_fake_sections
5511 #define elf_backend_special_sections spu_elf_special_sections
5512 #define bfd_elf32_bfd_final_link spu_elf_final_link
5513
5514 #include "elf32-target.h"
This page took 0.193229 seconds and 4 git commands to generate.