ubsan: moxie: left shift of negative value
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright (C) 2006-2019 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* All users of this file have bfd_octets_per_byte (abfd, sec) == 1. */
31 #define OCTETS_PER_BYTE(ABFD, SEC) 1
32
33 /* We use RELA style relocs. Don't define USE_REL. */
34
35 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 void *, asection *,
37 bfd *, char **);
38
39 /* Values of type 'enum elf_spu_reloc_type' are used to index this
40 array, so it must be declared in the order of that type. */
41
42 static reloc_howto_type elf_howto_table[] = {
43 HOWTO (R_SPU_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
44 bfd_elf_generic_reloc, "SPU_NONE",
45 FALSE, 0, 0x00000000, FALSE),
46 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR10",
48 FALSE, 0, 0x00ffc000, FALSE),
49 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
53 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
56 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
57 FALSE, 0, 0x007fff80, FALSE),
58 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
59 bfd_elf_generic_reloc, "SPU_ADDR18",
60 FALSE, 0, 0x01ffff80, FALSE),
61 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
62 bfd_elf_generic_reloc, "SPU_ADDR32",
63 FALSE, 0, 0xffffffff, FALSE),
64 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
65 bfd_elf_generic_reloc, "SPU_REL16",
66 FALSE, 0, 0x007fff80, TRUE),
67 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
68 bfd_elf_generic_reloc, "SPU_ADDR7",
69 FALSE, 0, 0x001fc000, FALSE),
70 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9",
72 FALSE, 0, 0x0180007f, TRUE),
73 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
74 spu_elf_rel9, "SPU_REL9I",
75 FALSE, 0, 0x0000c07f, TRUE),
76 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR10I",
78 FALSE, 0, 0x00ffc000, FALSE),
79 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
80 bfd_elf_generic_reloc, "SPU_ADDR16I",
81 FALSE, 0, 0x007fff80, FALSE),
82 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
83 bfd_elf_generic_reloc, "SPU_REL32",
84 FALSE, 0, 0xffffffff, TRUE),
85 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "SPU_ADDR16X",
87 FALSE, 0, 0x007fff80, FALSE),
88 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU32",
90 FALSE, 0, 0xffffffff, FALSE),
91 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_PPU64",
93 FALSE, 0, -1, FALSE),
94 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "SPU_ADD_PIC",
96 FALSE, 0, 0x00000000, FALSE),
97 };
98
99 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
100 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
101 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
102 { NULL, 0, 0, 0, 0 }
103 };
104
105 static enum elf_spu_reloc_type
106 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
107 {
108 switch (code)
109 {
110 default:
111 return (enum elf_spu_reloc_type) -1;
112 case BFD_RELOC_NONE:
113 return R_SPU_NONE;
114 case BFD_RELOC_SPU_IMM10W:
115 return R_SPU_ADDR10;
116 case BFD_RELOC_SPU_IMM16W:
117 return R_SPU_ADDR16;
118 case BFD_RELOC_SPU_LO16:
119 return R_SPU_ADDR16_LO;
120 case BFD_RELOC_SPU_HI16:
121 return R_SPU_ADDR16_HI;
122 case BFD_RELOC_SPU_IMM18:
123 return R_SPU_ADDR18;
124 case BFD_RELOC_SPU_PCREL16:
125 return R_SPU_REL16;
126 case BFD_RELOC_SPU_IMM7:
127 return R_SPU_ADDR7;
128 case BFD_RELOC_SPU_IMM8:
129 return R_SPU_NONE;
130 case BFD_RELOC_SPU_PCREL9a:
131 return R_SPU_REL9;
132 case BFD_RELOC_SPU_PCREL9b:
133 return R_SPU_REL9I;
134 case BFD_RELOC_SPU_IMM10:
135 return R_SPU_ADDR10I;
136 case BFD_RELOC_SPU_IMM16:
137 return R_SPU_ADDR16I;
138 case BFD_RELOC_32:
139 return R_SPU_ADDR32;
140 case BFD_RELOC_32_PCREL:
141 return R_SPU_REL32;
142 case BFD_RELOC_SPU_PPU32:
143 return R_SPU_PPU32;
144 case BFD_RELOC_SPU_PPU64:
145 return R_SPU_PPU64;
146 case BFD_RELOC_SPU_ADD_PIC:
147 return R_SPU_ADD_PIC;
148 }
149 }
150
151 static bfd_boolean
152 spu_elf_info_to_howto (bfd *abfd,
153 arelent *cache_ptr,
154 Elf_Internal_Rela *dst)
155 {
156 enum elf_spu_reloc_type r_type;
157
158 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
159 /* PR 17512: file: 90c2a92e. */
160 if (r_type >= R_SPU_max)
161 {
162 /* xgettext:c-format */
163 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
164 abfd, r_type);
165 bfd_set_error (bfd_error_bad_value);
166 return FALSE;
167 }
168 cache_ptr->howto = &elf_howto_table[(int) r_type];
169 return TRUE;
170 }
171
172 static reloc_howto_type *
173 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
174 bfd_reloc_code_real_type code)
175 {
176 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
177
178 if (r_type == (enum elf_spu_reloc_type) -1)
179 return NULL;
180
181 return elf_howto_table + r_type;
182 }
183
184 static reloc_howto_type *
185 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
186 const char *r_name)
187 {
188 unsigned int i;
189
190 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
191 if (elf_howto_table[i].name != NULL
192 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
193 return &elf_howto_table[i];
194
195 return NULL;
196 }
197
198 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
199
200 static bfd_reloc_status_type
201 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
202 void *data, asection *input_section,
203 bfd *output_bfd, char **error_message)
204 {
205 bfd_size_type octets;
206 bfd_vma val;
207 long insn;
208
209 /* If this is a relocatable link (output_bfd test tells us), just
210 call the generic function. Any adjustment will be done at final
211 link time. */
212 if (output_bfd != NULL)
213 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
214 input_section, output_bfd, error_message);
215
216 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
217 return bfd_reloc_outofrange;
218 octets = reloc_entry->address * OCTETS_PER_BYTE (abfd, input_section);
219
220 /* Get symbol value. */
221 val = 0;
222 if (!bfd_is_com_section (symbol->section))
223 val = symbol->value;
224 if (symbol->section->output_section)
225 val += symbol->section->output_section->vma;
226
227 val += reloc_entry->addend;
228
229 /* Make it pc-relative. */
230 val -= input_section->output_section->vma + input_section->output_offset;
231
232 val >>= 2;
233 if (val + 256 >= 512)
234 return bfd_reloc_overflow;
235
236 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
237
238 /* Move two high bits of value to REL9I and REL9 position.
239 The mask will take care of selecting the right field. */
240 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
241 insn &= ~reloc_entry->howto->dst_mask;
242 insn |= val & reloc_entry->howto->dst_mask;
243 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
244 return bfd_reloc_ok;
245 }
246
247 static bfd_boolean
248 spu_elf_new_section_hook (bfd *abfd, asection *sec)
249 {
250 if (!sec->used_by_bfd)
251 {
252 struct _spu_elf_section_data *sdata;
253
254 sdata = bfd_zalloc (abfd, sizeof (*sdata));
255 if (sdata == NULL)
256 return FALSE;
257 sec->used_by_bfd = sdata;
258 }
259
260 return _bfd_elf_new_section_hook (abfd, sec);
261 }
262
263 /* Set up overlay info for executables. */
264
265 static bfd_boolean
266 spu_elf_object_p (bfd *abfd)
267 {
268 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
269 {
270 unsigned int i, num_ovl, num_buf;
271 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
272 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
273 Elf_Internal_Phdr *last_phdr = NULL;
274
275 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
276 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
277 {
278 unsigned int j;
279
280 ++num_ovl;
281 if (last_phdr == NULL
282 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
283 ++num_buf;
284 last_phdr = phdr;
285 for (j = 1; j < elf_numsections (abfd); j++)
286 {
287 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
288
289 if (ELF_SECTION_SIZE (shdr, phdr) != 0
290 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
291 {
292 asection *sec = shdr->bfd_section;
293 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
294 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
295 }
296 }
297 }
298 }
299 return TRUE;
300 }
301
302 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
303 strip --strip-unneeded will not remove them. */
304
305 static void
306 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
307 {
308 if (sym->name != NULL
309 && sym->section != bfd_abs_section_ptr
310 && strncmp (sym->name, "_EAR_", 5) == 0)
311 sym->flags |= BSF_KEEP;
312 }
313
314 /* SPU ELF linker hash table. */
315
316 struct spu_link_hash_table
317 {
318 struct elf_link_hash_table elf;
319
320 struct spu_elf_params *params;
321
322 /* Shortcuts to overlay sections. */
323 asection *ovtab;
324 asection *init;
325 asection *toe;
326 asection **ovl_sec;
327
328 /* Count of stubs in each overlay section. */
329 unsigned int *stub_count;
330
331 /* The stub section for each overlay section. */
332 asection **stub_sec;
333
334 struct elf_link_hash_entry *ovly_entry[2];
335
336 /* Number of overlay buffers. */
337 unsigned int num_buf;
338
339 /* Total number of overlays. */
340 unsigned int num_overlays;
341
342 /* For soft icache. */
343 unsigned int line_size_log2;
344 unsigned int num_lines_log2;
345 unsigned int fromelem_size_log2;
346
347 /* How much memory we have. */
348 unsigned int local_store;
349
350 /* Count of overlay stubs needed in non-overlay area. */
351 unsigned int non_ovly_stub;
352
353 /* Pointer to the fixup section */
354 asection *sfixup;
355
356 /* Set on error. */
357 unsigned int stub_err : 1;
358 };
359
360 /* Hijack the generic got fields for overlay stub accounting. */
361
362 struct got_entry
363 {
364 struct got_entry *next;
365 unsigned int ovl;
366 union {
367 bfd_vma addend;
368 bfd_vma br_addr;
369 };
370 bfd_vma stub_addr;
371 };
372
373 #define spu_hash_table(p) \
374 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
375 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
376
377 struct call_info
378 {
379 struct function_info *fun;
380 struct call_info *next;
381 unsigned int count;
382 unsigned int max_depth;
383 unsigned int is_tail : 1;
384 unsigned int is_pasted : 1;
385 unsigned int broken_cycle : 1;
386 unsigned int priority : 13;
387 };
388
389 struct function_info
390 {
391 /* List of functions called. Also branches to hot/cold part of
392 function. */
393 struct call_info *call_list;
394 /* For hot/cold part of function, point to owner. */
395 struct function_info *start;
396 /* Symbol at start of function. */
397 union {
398 Elf_Internal_Sym *sym;
399 struct elf_link_hash_entry *h;
400 } u;
401 /* Function section. */
402 asection *sec;
403 asection *rodata;
404 /* Where last called from, and number of sections called from. */
405 asection *last_caller;
406 unsigned int call_count;
407 /* Address range of (this part of) function. */
408 bfd_vma lo, hi;
409 /* Offset where we found a store of lr, or -1 if none found. */
410 bfd_vma lr_store;
411 /* Offset where we found the stack adjustment insn. */
412 bfd_vma sp_adjust;
413 /* Stack usage. */
414 int stack;
415 /* Distance from root of call tree. Tail and hot/cold branches
416 count as one deeper. We aren't counting stack frames here. */
417 unsigned int depth;
418 /* Set if global symbol. */
419 unsigned int global : 1;
420 /* Set if known to be start of function (as distinct from a hunk
421 in hot/cold section. */
422 unsigned int is_func : 1;
423 /* Set if not a root node. */
424 unsigned int non_root : 1;
425 /* Flags used during call tree traversal. It's cheaper to replicate
426 the visit flags than have one which needs clearing after a traversal. */
427 unsigned int visit1 : 1;
428 unsigned int visit2 : 1;
429 unsigned int marking : 1;
430 unsigned int visit3 : 1;
431 unsigned int visit4 : 1;
432 unsigned int visit5 : 1;
433 unsigned int visit6 : 1;
434 unsigned int visit7 : 1;
435 };
436
437 struct spu_elf_stack_info
438 {
439 int num_fun;
440 int max_fun;
441 /* Variable size array describing functions, one per contiguous
442 address range belonging to a function. */
443 struct function_info fun[1];
444 };
445
446 static struct function_info *find_function (asection *, bfd_vma,
447 struct bfd_link_info *);
448
449 /* Create a spu ELF linker hash table. */
450
451 static struct bfd_link_hash_table *
452 spu_elf_link_hash_table_create (bfd *abfd)
453 {
454 struct spu_link_hash_table *htab;
455
456 htab = bfd_zmalloc (sizeof (*htab));
457 if (htab == NULL)
458 return NULL;
459
460 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
461 _bfd_elf_link_hash_newfunc,
462 sizeof (struct elf_link_hash_entry),
463 SPU_ELF_DATA))
464 {
465 free (htab);
466 return NULL;
467 }
468
469 htab->elf.init_got_refcount.refcount = 0;
470 htab->elf.init_got_refcount.glist = NULL;
471 htab->elf.init_got_offset.offset = 0;
472 htab->elf.init_got_offset.glist = NULL;
473 return &htab->elf.root;
474 }
475
476 void
477 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
478 {
479 bfd_vma max_branch_log2;
480
481 struct spu_link_hash_table *htab = spu_hash_table (info);
482 htab->params = params;
483 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
484 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
485
486 /* For the software i-cache, we provide a "from" list whose size
487 is a power-of-two number of quadwords, big enough to hold one
488 byte per outgoing branch. Compute this number here. */
489 max_branch_log2 = bfd_log2 (htab->params->max_branch);
490 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
491 }
492
493 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
494 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
495 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
496
497 static bfd_boolean
498 get_sym_h (struct elf_link_hash_entry **hp,
499 Elf_Internal_Sym **symp,
500 asection **symsecp,
501 Elf_Internal_Sym **locsymsp,
502 unsigned long r_symndx,
503 bfd *ibfd)
504 {
505 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
506
507 if (r_symndx >= symtab_hdr->sh_info)
508 {
509 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
510 struct elf_link_hash_entry *h;
511
512 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
513 while (h->root.type == bfd_link_hash_indirect
514 || h->root.type == bfd_link_hash_warning)
515 h = (struct elf_link_hash_entry *) h->root.u.i.link;
516
517 if (hp != NULL)
518 *hp = h;
519
520 if (symp != NULL)
521 *symp = NULL;
522
523 if (symsecp != NULL)
524 {
525 asection *symsec = NULL;
526 if (h->root.type == bfd_link_hash_defined
527 || h->root.type == bfd_link_hash_defweak)
528 symsec = h->root.u.def.section;
529 *symsecp = symsec;
530 }
531 }
532 else
533 {
534 Elf_Internal_Sym *sym;
535 Elf_Internal_Sym *locsyms = *locsymsp;
536
537 if (locsyms == NULL)
538 {
539 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
540 if (locsyms == NULL)
541 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
542 symtab_hdr->sh_info,
543 0, NULL, NULL, NULL);
544 if (locsyms == NULL)
545 return FALSE;
546 *locsymsp = locsyms;
547 }
548 sym = locsyms + r_symndx;
549
550 if (hp != NULL)
551 *hp = NULL;
552
553 if (symp != NULL)
554 *symp = sym;
555
556 if (symsecp != NULL)
557 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
558 }
559
560 return TRUE;
561 }
562
563 /* Create the note section if not already present. This is done early so
564 that the linker maps the sections to the right place in the output. */
565
566 bfd_boolean
567 spu_elf_create_sections (struct bfd_link_info *info)
568 {
569 struct spu_link_hash_table *htab = spu_hash_table (info);
570 bfd *ibfd;
571
572 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
573 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
574 break;
575
576 if (ibfd == NULL)
577 {
578 /* Make SPU_PTNOTE_SPUNAME section. */
579 asection *s;
580 size_t name_len;
581 size_t size;
582 bfd_byte *data;
583 flagword flags;
584
585 ibfd = info->input_bfds;
586 /* This should really be SEC_LINKER_CREATED, but then we'd need
587 to write out the section ourselves. */
588 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
589 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
590 if (s == NULL
591 || !bfd_set_section_alignment (s, 4))
592 return FALSE;
593 /* Because we didn't set SEC_LINKER_CREATED we need to set the
594 proper section type. */
595 elf_section_type (s) = SHT_NOTE;
596
597 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
598 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
599 size += (name_len + 3) & -4;
600
601 if (!bfd_set_section_size (s, size))
602 return FALSE;
603
604 data = bfd_zalloc (ibfd, size);
605 if (data == NULL)
606 return FALSE;
607
608 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
609 bfd_put_32 (ibfd, name_len, data + 4);
610 bfd_put_32 (ibfd, 1, data + 8);
611 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
612 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
613 bfd_get_filename (info->output_bfd), name_len);
614 s->contents = data;
615 }
616
617 if (htab->params->emit_fixups)
618 {
619 asection *s;
620 flagword flags;
621
622 if (htab->elf.dynobj == NULL)
623 htab->elf.dynobj = ibfd;
624 ibfd = htab->elf.dynobj;
625 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
626 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
627 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
628 if (s == NULL || !bfd_set_section_alignment (s, 2))
629 return FALSE;
630 htab->sfixup = s;
631 }
632
633 return TRUE;
634 }
635
636 /* qsort predicate to sort sections by vma. */
637
638 static int
639 sort_sections (const void *a, const void *b)
640 {
641 const asection *const *s1 = a;
642 const asection *const *s2 = b;
643 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
644
645 if (delta != 0)
646 return delta < 0 ? -1 : 1;
647
648 return (*s1)->index - (*s2)->index;
649 }
650
651 /* Identify overlays in the output bfd, and number them.
652 Returns 0 on error, 1 if no overlays, 2 if overlays. */
653
654 int
655 spu_elf_find_overlays (struct bfd_link_info *info)
656 {
657 struct spu_link_hash_table *htab = spu_hash_table (info);
658 asection **alloc_sec;
659 unsigned int i, n, ovl_index, num_buf;
660 asection *s;
661 bfd_vma ovl_end;
662 static const char *const entry_names[2][2] = {
663 { "__ovly_load", "__icache_br_handler" },
664 { "__ovly_return", "__icache_call_handler" }
665 };
666
667 if (info->output_bfd->section_count < 2)
668 return 1;
669
670 alloc_sec
671 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
672 if (alloc_sec == NULL)
673 return 0;
674
675 /* Pick out all the alloced sections. */
676 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
677 if ((s->flags & SEC_ALLOC) != 0
678 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
679 && s->size != 0)
680 alloc_sec[n++] = s;
681
682 if (n == 0)
683 {
684 free (alloc_sec);
685 return 1;
686 }
687
688 /* Sort them by vma. */
689 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
690
691 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
692 if (htab->params->ovly_flavour == ovly_soft_icache)
693 {
694 unsigned int prev_buf = 0, set_id = 0;
695
696 /* Look for an overlapping vma to find the first overlay section. */
697 bfd_vma vma_start = 0;
698
699 for (i = 1; i < n; i++)
700 {
701 s = alloc_sec[i];
702 if (s->vma < ovl_end)
703 {
704 asection *s0 = alloc_sec[i - 1];
705 vma_start = s0->vma;
706 ovl_end = (s0->vma
707 + ((bfd_vma) 1
708 << (htab->num_lines_log2 + htab->line_size_log2)));
709 --i;
710 break;
711 }
712 else
713 ovl_end = s->vma + s->size;
714 }
715
716 /* Now find any sections within the cache area. */
717 for (ovl_index = 0, num_buf = 0; i < n; i++)
718 {
719 s = alloc_sec[i];
720 if (s->vma >= ovl_end)
721 break;
722
723 /* A section in an overlay area called .ovl.init is not
724 an overlay, in the sense that it might be loaded in
725 by the overlay manager, but rather the initial
726 section contents for the overlay buffer. */
727 if (strncmp (s->name, ".ovl.init", 9) != 0)
728 {
729 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
730 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
731 prev_buf = num_buf;
732
733 if ((s->vma - vma_start) & (htab->params->line_size - 1))
734 {
735 info->callbacks->einfo (_("%X%P: overlay section %pA "
736 "does not start on a cache line\n"),
737 s);
738 bfd_set_error (bfd_error_bad_value);
739 return 0;
740 }
741 else if (s->size > htab->params->line_size)
742 {
743 info->callbacks->einfo (_("%X%P: overlay section %pA "
744 "is larger than a cache line\n"),
745 s);
746 bfd_set_error (bfd_error_bad_value);
747 return 0;
748 }
749
750 alloc_sec[ovl_index++] = s;
751 spu_elf_section_data (s)->u.o.ovl_index
752 = (set_id << htab->num_lines_log2) + num_buf;
753 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
754 }
755 }
756
757 /* Ensure there are no more overlay sections. */
758 for ( ; i < n; i++)
759 {
760 s = alloc_sec[i];
761 if (s->vma < ovl_end)
762 {
763 info->callbacks->einfo (_("%X%P: overlay section %pA "
764 "is not in cache area\n"),
765 alloc_sec[i-1]);
766 bfd_set_error (bfd_error_bad_value);
767 return 0;
768 }
769 else
770 ovl_end = s->vma + s->size;
771 }
772 }
773 else
774 {
775 /* Look for overlapping vmas. Any with overlap must be overlays.
776 Count them. Also count the number of overlay regions. */
777 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
778 {
779 s = alloc_sec[i];
780 if (s->vma < ovl_end)
781 {
782 asection *s0 = alloc_sec[i - 1];
783
784 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
785 {
786 ++num_buf;
787 if (strncmp (s0->name, ".ovl.init", 9) != 0)
788 {
789 alloc_sec[ovl_index] = s0;
790 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
791 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
792 }
793 else
794 ovl_end = s->vma + s->size;
795 }
796 if (strncmp (s->name, ".ovl.init", 9) != 0)
797 {
798 alloc_sec[ovl_index] = s;
799 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
800 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
801 if (s0->vma != s->vma)
802 {
803 /* xgettext:c-format */
804 info->callbacks->einfo (_("%X%P: overlay sections %pA "
805 "and %pA do not start at the "
806 "same address\n"),
807 s0, s);
808 bfd_set_error (bfd_error_bad_value);
809 return 0;
810 }
811 if (ovl_end < s->vma + s->size)
812 ovl_end = s->vma + s->size;
813 }
814 }
815 else
816 ovl_end = s->vma + s->size;
817 }
818 }
819
820 htab->num_overlays = ovl_index;
821 htab->num_buf = num_buf;
822 htab->ovl_sec = alloc_sec;
823
824 if (ovl_index == 0)
825 return 1;
826
827 for (i = 0; i < 2; i++)
828 {
829 const char *name;
830 struct elf_link_hash_entry *h;
831
832 name = entry_names[i][htab->params->ovly_flavour];
833 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
834 if (h == NULL)
835 return 0;
836
837 if (h->root.type == bfd_link_hash_new)
838 {
839 h->root.type = bfd_link_hash_undefined;
840 h->ref_regular = 1;
841 h->ref_regular_nonweak = 1;
842 h->non_elf = 0;
843 }
844 htab->ovly_entry[i] = h;
845 }
846
847 return 2;
848 }
849
850 /* Non-zero to use bra in overlay stubs rather than br. */
851 #define BRA_STUBS 0
852
853 #define BRA 0x30000000
854 #define BRASL 0x31000000
855 #define BR 0x32000000
856 #define BRSL 0x33000000
857 #define NOP 0x40200000
858 #define LNOP 0x00200000
859 #define ILA 0x42000000
860
861 /* Return true for all relative and absolute branch instructions.
862 bra 00110000 0..
863 brasl 00110001 0..
864 br 00110010 0..
865 brsl 00110011 0..
866 brz 00100000 0..
867 brnz 00100001 0..
868 brhz 00100010 0..
869 brhnz 00100011 0.. */
870
871 static bfd_boolean
872 is_branch (const unsigned char *insn)
873 {
874 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
875 }
876
877 /* Return true for all indirect branch instructions.
878 bi 00110101 000
879 bisl 00110101 001
880 iret 00110101 010
881 bisled 00110101 011
882 biz 00100101 000
883 binz 00100101 001
884 bihz 00100101 010
885 bihnz 00100101 011 */
886
887 static bfd_boolean
888 is_indirect_branch (const unsigned char *insn)
889 {
890 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
891 }
892
893 /* Return true for branch hint instructions.
894 hbra 0001000..
895 hbrr 0001001.. */
896
897 static bfd_boolean
898 is_hint (const unsigned char *insn)
899 {
900 return (insn[0] & 0xfc) == 0x10;
901 }
902
903 /* True if INPUT_SECTION might need overlay stubs. */
904
905 static bfd_boolean
906 maybe_needs_stubs (asection *input_section)
907 {
908 /* No stubs for debug sections and suchlike. */
909 if ((input_section->flags & SEC_ALLOC) == 0)
910 return FALSE;
911
912 /* No stubs for link-once sections that will be discarded. */
913 if (input_section->output_section == bfd_abs_section_ptr)
914 return FALSE;
915
916 /* Don't create stubs for .eh_frame references. */
917 if (strcmp (input_section->name, ".eh_frame") == 0)
918 return FALSE;
919
920 return TRUE;
921 }
922
923 enum _stub_type
924 {
925 no_stub,
926 call_ovl_stub,
927 br000_ovl_stub,
928 br001_ovl_stub,
929 br010_ovl_stub,
930 br011_ovl_stub,
931 br100_ovl_stub,
932 br101_ovl_stub,
933 br110_ovl_stub,
934 br111_ovl_stub,
935 nonovl_stub,
936 stub_error
937 };
938
939 /* Return non-zero if this reloc symbol should go via an overlay stub.
940 Return 2 if the stub must be in non-overlay area. */
941
942 static enum _stub_type
943 needs_ovl_stub (struct elf_link_hash_entry *h,
944 Elf_Internal_Sym *sym,
945 asection *sym_sec,
946 asection *input_section,
947 Elf_Internal_Rela *irela,
948 bfd_byte *contents,
949 struct bfd_link_info *info)
950 {
951 struct spu_link_hash_table *htab = spu_hash_table (info);
952 enum elf_spu_reloc_type r_type;
953 unsigned int sym_type;
954 bfd_boolean branch, hint, call;
955 enum _stub_type ret = no_stub;
956 bfd_byte insn[4];
957
958 if (sym_sec == NULL
959 || sym_sec->output_section == bfd_abs_section_ptr
960 || spu_elf_section_data (sym_sec->output_section) == NULL)
961 return ret;
962
963 if (h != NULL)
964 {
965 /* Ensure no stubs for user supplied overlay manager syms. */
966 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
967 return ret;
968
969 /* setjmp always goes via an overlay stub, because then the return
970 and hence the longjmp goes via __ovly_return. That magically
971 makes setjmp/longjmp between overlays work. */
972 if (strncmp (h->root.root.string, "setjmp", 6) == 0
973 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
974 ret = call_ovl_stub;
975 }
976
977 if (h != NULL)
978 sym_type = h->type;
979 else
980 sym_type = ELF_ST_TYPE (sym->st_info);
981
982 r_type = ELF32_R_TYPE (irela->r_info);
983 branch = FALSE;
984 hint = FALSE;
985 call = FALSE;
986 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
987 {
988 if (contents == NULL)
989 {
990 contents = insn;
991 if (!bfd_get_section_contents (input_section->owner,
992 input_section,
993 contents,
994 irela->r_offset, 4))
995 return stub_error;
996 }
997 else
998 contents += irela->r_offset;
999
1000 branch = is_branch (contents);
1001 hint = is_hint (contents);
1002 if (branch || hint)
1003 {
1004 call = (contents[0] & 0xfd) == 0x31;
1005 if (call
1006 && sym_type != STT_FUNC
1007 && contents != insn)
1008 {
1009 /* It's common for people to write assembly and forget
1010 to give function symbols the right type. Handle
1011 calls to such symbols, but warn so that (hopefully)
1012 people will fix their code. We need the symbol
1013 type to be correct to distinguish function pointer
1014 initialisation from other pointer initialisations. */
1015 const char *sym_name;
1016
1017 if (h != NULL)
1018 sym_name = h->root.root.string;
1019 else
1020 {
1021 Elf_Internal_Shdr *symtab_hdr;
1022 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1023 sym_name = bfd_elf_sym_name (input_section->owner,
1024 symtab_hdr,
1025 sym,
1026 sym_sec);
1027 }
1028 _bfd_error_handler
1029 /* xgettext:c-format */
1030 (_("warning: call to non-function symbol %s defined in %pB"),
1031 sym_name, sym_sec->owner);
1032
1033 }
1034 }
1035 }
1036
1037 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1038 || (sym_type != STT_FUNC
1039 && !(branch || hint)
1040 && (sym_sec->flags & SEC_CODE) == 0))
1041 return no_stub;
1042
1043 /* Usually, symbols in non-overlay sections don't need stubs. */
1044 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1045 && !htab->params->non_overlay_stubs)
1046 return ret;
1047
1048 /* A reference from some other section to a symbol in an overlay
1049 section needs a stub. */
1050 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1051 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1052 {
1053 unsigned int lrlive = 0;
1054 if (branch)
1055 lrlive = (contents[1] & 0x70) >> 4;
1056
1057 if (!lrlive && (call || sym_type == STT_FUNC))
1058 ret = call_ovl_stub;
1059 else
1060 ret = br000_ovl_stub + lrlive;
1061 }
1062
1063 /* If this insn isn't a branch then we are possibly taking the
1064 address of a function and passing it out somehow. Soft-icache code
1065 always generates inline code to do indirect branches. */
1066 if (!(branch || hint)
1067 && sym_type == STT_FUNC
1068 && htab->params->ovly_flavour != ovly_soft_icache)
1069 ret = nonovl_stub;
1070
1071 return ret;
1072 }
1073
1074 static bfd_boolean
1075 count_stub (struct spu_link_hash_table *htab,
1076 bfd *ibfd,
1077 asection *isec,
1078 enum _stub_type stub_type,
1079 struct elf_link_hash_entry *h,
1080 const Elf_Internal_Rela *irela)
1081 {
1082 unsigned int ovl = 0;
1083 struct got_entry *g, **head;
1084 bfd_vma addend;
1085
1086 /* If this instruction is a branch or call, we need a stub
1087 for it. One stub per function per overlay.
1088 If it isn't a branch, then we are taking the address of
1089 this function so need a stub in the non-overlay area
1090 for it. One stub per function. */
1091 if (stub_type != nonovl_stub)
1092 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1093
1094 if (h != NULL)
1095 head = &h->got.glist;
1096 else
1097 {
1098 if (elf_local_got_ents (ibfd) == NULL)
1099 {
1100 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1101 * sizeof (*elf_local_got_ents (ibfd)));
1102 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1103 if (elf_local_got_ents (ibfd) == NULL)
1104 return FALSE;
1105 }
1106 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1107 }
1108
1109 if (htab->params->ovly_flavour == ovly_soft_icache)
1110 {
1111 htab->stub_count[ovl] += 1;
1112 return TRUE;
1113 }
1114
1115 addend = 0;
1116 if (irela != NULL)
1117 addend = irela->r_addend;
1118
1119 if (ovl == 0)
1120 {
1121 struct got_entry *gnext;
1122
1123 for (g = *head; g != NULL; g = g->next)
1124 if (g->addend == addend && g->ovl == 0)
1125 break;
1126
1127 if (g == NULL)
1128 {
1129 /* Need a new non-overlay area stub. Zap other stubs. */
1130 for (g = *head; g != NULL; g = gnext)
1131 {
1132 gnext = g->next;
1133 if (g->addend == addend)
1134 {
1135 htab->stub_count[g->ovl] -= 1;
1136 free (g);
1137 }
1138 }
1139 }
1140 }
1141 else
1142 {
1143 for (g = *head; g != NULL; g = g->next)
1144 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1145 break;
1146 }
1147
1148 if (g == NULL)
1149 {
1150 g = bfd_malloc (sizeof *g);
1151 if (g == NULL)
1152 return FALSE;
1153 g->ovl = ovl;
1154 g->addend = addend;
1155 g->stub_addr = (bfd_vma) -1;
1156 g->next = *head;
1157 *head = g;
1158
1159 htab->stub_count[ovl] += 1;
1160 }
1161
1162 return TRUE;
1163 }
1164
1165 /* Support two sizes of overlay stubs, a slower more compact stub of two
1166 instructions, and a faster stub of four instructions.
1167 Soft-icache stubs are four or eight words. */
1168
1169 static unsigned int
1170 ovl_stub_size (struct spu_elf_params *params)
1171 {
1172 return 16 << params->ovly_flavour >> params->compact_stub;
1173 }
1174
1175 static unsigned int
1176 ovl_stub_size_log2 (struct spu_elf_params *params)
1177 {
1178 return 4 + params->ovly_flavour - params->compact_stub;
1179 }
1180
1181 /* Two instruction overlay stubs look like:
1182
1183 brsl $75,__ovly_load
1184 .word target_ovl_and_address
1185
1186 ovl_and_address is a word with the overlay number in the top 14 bits
1187 and local store address in the bottom 18 bits.
1188
1189 Four instruction overlay stubs look like:
1190
1191 ila $78,ovl_number
1192 lnop
1193 ila $79,target_address
1194 br __ovly_load
1195
1196 Software icache stubs are:
1197
1198 .word target_index
1199 .word target_ia;
1200 .word lrlive_branchlocalstoreaddr;
1201 brasl $75,__icache_br_handler
1202 .quad xor_pattern
1203 */
1204
1205 static bfd_boolean
1206 build_stub (struct bfd_link_info *info,
1207 bfd *ibfd,
1208 asection *isec,
1209 enum _stub_type stub_type,
1210 struct elf_link_hash_entry *h,
1211 const Elf_Internal_Rela *irela,
1212 bfd_vma dest,
1213 asection *dest_sec)
1214 {
1215 struct spu_link_hash_table *htab = spu_hash_table (info);
1216 unsigned int ovl, dest_ovl, set_id;
1217 struct got_entry *g, **head;
1218 asection *sec;
1219 bfd_vma addend, from, to, br_dest, patt;
1220 unsigned int lrlive;
1221
1222 ovl = 0;
1223 if (stub_type != nonovl_stub)
1224 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1225
1226 if (h != NULL)
1227 head = &h->got.glist;
1228 else
1229 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1230
1231 addend = 0;
1232 if (irela != NULL)
1233 addend = irela->r_addend;
1234
1235 if (htab->params->ovly_flavour == ovly_soft_icache)
1236 {
1237 g = bfd_malloc (sizeof *g);
1238 if (g == NULL)
1239 return FALSE;
1240 g->ovl = ovl;
1241 g->br_addr = 0;
1242 if (irela != NULL)
1243 g->br_addr = (irela->r_offset
1244 + isec->output_offset
1245 + isec->output_section->vma);
1246 g->next = *head;
1247 *head = g;
1248 }
1249 else
1250 {
1251 for (g = *head; g != NULL; g = g->next)
1252 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1253 break;
1254 if (g == NULL)
1255 abort ();
1256
1257 if (g->ovl == 0 && ovl != 0)
1258 return TRUE;
1259
1260 if (g->stub_addr != (bfd_vma) -1)
1261 return TRUE;
1262 }
1263
1264 sec = htab->stub_sec[ovl];
1265 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1266 from = sec->size + sec->output_offset + sec->output_section->vma;
1267 g->stub_addr = from;
1268 to = (htab->ovly_entry[0]->root.u.def.value
1269 + htab->ovly_entry[0]->root.u.def.section->output_offset
1270 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1271
1272 if (((dest | to | from) & 3) != 0)
1273 {
1274 htab->stub_err = 1;
1275 return FALSE;
1276 }
1277 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1278
1279 if (htab->params->ovly_flavour == ovly_normal
1280 && !htab->params->compact_stub)
1281 {
1282 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1283 sec->contents + sec->size);
1284 bfd_put_32 (sec->owner, LNOP,
1285 sec->contents + sec->size + 4);
1286 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1287 sec->contents + sec->size + 8);
1288 if (!BRA_STUBS)
1289 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1290 sec->contents + sec->size + 12);
1291 else
1292 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1293 sec->contents + sec->size + 12);
1294 }
1295 else if (htab->params->ovly_flavour == ovly_normal
1296 && htab->params->compact_stub)
1297 {
1298 if (!BRA_STUBS)
1299 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1300 sec->contents + sec->size);
1301 else
1302 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1303 sec->contents + sec->size);
1304 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1305 sec->contents + sec->size + 4);
1306 }
1307 else if (htab->params->ovly_flavour == ovly_soft_icache
1308 && htab->params->compact_stub)
1309 {
1310 lrlive = 0;
1311 if (stub_type == nonovl_stub)
1312 ;
1313 else if (stub_type == call_ovl_stub)
1314 /* A brsl makes lr live and *(*sp+16) is live.
1315 Tail calls have the same liveness. */
1316 lrlive = 5;
1317 else if (!htab->params->lrlive_analysis)
1318 /* Assume stack frame and lr save. */
1319 lrlive = 1;
1320 else if (irela != NULL)
1321 {
1322 /* Analyse branch instructions. */
1323 struct function_info *caller;
1324 bfd_vma off;
1325
1326 caller = find_function (isec, irela->r_offset, info);
1327 if (caller->start == NULL)
1328 off = irela->r_offset;
1329 else
1330 {
1331 struct function_info *found = NULL;
1332
1333 /* Find the earliest piece of this function that
1334 has frame adjusting instructions. We might
1335 see dynamic frame adjustment (eg. for alloca)
1336 in some later piece, but functions using
1337 alloca always set up a frame earlier. Frame
1338 setup instructions are always in one piece. */
1339 if (caller->lr_store != (bfd_vma) -1
1340 || caller->sp_adjust != (bfd_vma) -1)
1341 found = caller;
1342 while (caller->start != NULL)
1343 {
1344 caller = caller->start;
1345 if (caller->lr_store != (bfd_vma) -1
1346 || caller->sp_adjust != (bfd_vma) -1)
1347 found = caller;
1348 }
1349 if (found != NULL)
1350 caller = found;
1351 off = (bfd_vma) -1;
1352 }
1353
1354 if (off > caller->sp_adjust)
1355 {
1356 if (off > caller->lr_store)
1357 /* Only *(*sp+16) is live. */
1358 lrlive = 1;
1359 else
1360 /* If no lr save, then we must be in a
1361 leaf function with a frame.
1362 lr is still live. */
1363 lrlive = 4;
1364 }
1365 else if (off > caller->lr_store)
1366 {
1367 /* Between lr save and stack adjust. */
1368 lrlive = 3;
1369 /* This should never happen since prologues won't
1370 be split here. */
1371 BFD_ASSERT (0);
1372 }
1373 else
1374 /* On entry to function. */
1375 lrlive = 5;
1376
1377 if (stub_type != br000_ovl_stub
1378 && lrlive != stub_type - br000_ovl_stub)
1379 /* xgettext:c-format */
1380 info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1381 "from analysis (%u)\n"),
1382 isec, irela->r_offset, lrlive,
1383 stub_type - br000_ovl_stub);
1384 }
1385
1386 /* If given lrlive info via .brinfo, use it. */
1387 if (stub_type > br000_ovl_stub)
1388 lrlive = stub_type - br000_ovl_stub;
1389
1390 if (ovl == 0)
1391 to = (htab->ovly_entry[1]->root.u.def.value
1392 + htab->ovly_entry[1]->root.u.def.section->output_offset
1393 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1394
1395 /* The branch that uses this stub goes to stub_addr + 4. We'll
1396 set up an xor pattern that can be used by the icache manager
1397 to modify this branch to go directly to its destination. */
1398 g->stub_addr += 4;
1399 br_dest = g->stub_addr;
1400 if (irela == NULL)
1401 {
1402 /* Except in the case of _SPUEAR_ stubs, the branch in
1403 question is the one in the stub itself. */
1404 BFD_ASSERT (stub_type == nonovl_stub);
1405 g->br_addr = g->stub_addr;
1406 br_dest = to;
1407 }
1408
1409 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1410 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1411 sec->contents + sec->size);
1412 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1413 sec->contents + sec->size + 4);
1414 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1415 sec->contents + sec->size + 8);
1416 patt = dest ^ br_dest;
1417 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1418 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1419 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1420 sec->contents + sec->size + 12);
1421
1422 if (ovl == 0)
1423 /* Extra space for linked list entries. */
1424 sec->size += 16;
1425 }
1426 else
1427 abort ();
1428
1429 sec->size += ovl_stub_size (htab->params);
1430
1431 if (htab->params->emit_stub_syms)
1432 {
1433 size_t len;
1434 char *name;
1435 int add;
1436
1437 len = 8 + sizeof (".ovl_call.") - 1;
1438 if (h != NULL)
1439 len += strlen (h->root.root.string);
1440 else
1441 len += 8 + 1 + 8;
1442 add = 0;
1443 if (irela != NULL)
1444 add = (int) irela->r_addend & 0xffffffff;
1445 if (add != 0)
1446 len += 1 + 8;
1447 name = bfd_malloc (len + 1);
1448 if (name == NULL)
1449 return FALSE;
1450
1451 sprintf (name, "%08x.ovl_call.", g->ovl);
1452 if (h != NULL)
1453 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1454 else
1455 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1456 dest_sec->id & 0xffffffff,
1457 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1458 if (add != 0)
1459 sprintf (name + len - 9, "+%x", add);
1460
1461 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1462 free (name);
1463 if (h == NULL)
1464 return FALSE;
1465 if (h->root.type == bfd_link_hash_new)
1466 {
1467 h->root.type = bfd_link_hash_defined;
1468 h->root.u.def.section = sec;
1469 h->size = ovl_stub_size (htab->params);
1470 h->root.u.def.value = sec->size - h->size;
1471 h->type = STT_FUNC;
1472 h->ref_regular = 1;
1473 h->def_regular = 1;
1474 h->ref_regular_nonweak = 1;
1475 h->forced_local = 1;
1476 h->non_elf = 0;
1477 }
1478 }
1479
1480 return TRUE;
1481 }
1482
1483 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1484 symbols. */
1485
1486 static bfd_boolean
1487 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1488 {
1489 /* Symbols starting with _SPUEAR_ need a stub because they may be
1490 invoked by the PPU. */
1491 struct bfd_link_info *info = inf;
1492 struct spu_link_hash_table *htab = spu_hash_table (info);
1493 asection *sym_sec;
1494
1495 if ((h->root.type == bfd_link_hash_defined
1496 || h->root.type == bfd_link_hash_defweak)
1497 && h->def_regular
1498 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1499 && (sym_sec = h->root.u.def.section) != NULL
1500 && sym_sec->output_section != bfd_abs_section_ptr
1501 && spu_elf_section_data (sym_sec->output_section) != NULL
1502 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1503 || htab->params->non_overlay_stubs))
1504 {
1505 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1506 }
1507
1508 return TRUE;
1509 }
1510
1511 static bfd_boolean
1512 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1513 {
1514 /* Symbols starting with _SPUEAR_ need a stub because they may be
1515 invoked by the PPU. */
1516 struct bfd_link_info *info = inf;
1517 struct spu_link_hash_table *htab = spu_hash_table (info);
1518 asection *sym_sec;
1519
1520 if ((h->root.type == bfd_link_hash_defined
1521 || h->root.type == bfd_link_hash_defweak)
1522 && h->def_regular
1523 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1524 && (sym_sec = h->root.u.def.section) != NULL
1525 && sym_sec->output_section != bfd_abs_section_ptr
1526 && spu_elf_section_data (sym_sec->output_section) != NULL
1527 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1528 || htab->params->non_overlay_stubs))
1529 {
1530 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1531 h->root.u.def.value, sym_sec);
1532 }
1533
1534 return TRUE;
1535 }
1536
1537 /* Size or build stubs. */
1538
1539 static bfd_boolean
1540 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1541 {
1542 struct spu_link_hash_table *htab = spu_hash_table (info);
1543 bfd *ibfd;
1544
1545 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1546 {
1547 extern const bfd_target spu_elf32_vec;
1548 Elf_Internal_Shdr *symtab_hdr;
1549 asection *isec;
1550 Elf_Internal_Sym *local_syms = NULL;
1551
1552 if (ibfd->xvec != &spu_elf32_vec)
1553 continue;
1554
1555 /* We'll need the symbol table in a second. */
1556 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1557 if (symtab_hdr->sh_info == 0)
1558 continue;
1559
1560 /* Walk over each section attached to the input bfd. */
1561 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1562 {
1563 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1564
1565 /* If there aren't any relocs, then there's nothing more to do. */
1566 if ((isec->flags & SEC_RELOC) == 0
1567 || isec->reloc_count == 0)
1568 continue;
1569
1570 if (!maybe_needs_stubs (isec))
1571 continue;
1572
1573 /* Get the relocs. */
1574 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1575 info->keep_memory);
1576 if (internal_relocs == NULL)
1577 goto error_ret_free_local;
1578
1579 /* Now examine each relocation. */
1580 irela = internal_relocs;
1581 irelaend = irela + isec->reloc_count;
1582 for (; irela < irelaend; irela++)
1583 {
1584 enum elf_spu_reloc_type r_type;
1585 unsigned int r_indx;
1586 asection *sym_sec;
1587 Elf_Internal_Sym *sym;
1588 struct elf_link_hash_entry *h;
1589 enum _stub_type stub_type;
1590
1591 r_type = ELF32_R_TYPE (irela->r_info);
1592 r_indx = ELF32_R_SYM (irela->r_info);
1593
1594 if (r_type >= R_SPU_max)
1595 {
1596 bfd_set_error (bfd_error_bad_value);
1597 error_ret_free_internal:
1598 if (elf_section_data (isec)->relocs != internal_relocs)
1599 free (internal_relocs);
1600 error_ret_free_local:
1601 if (local_syms != NULL
1602 && (symtab_hdr->contents
1603 != (unsigned char *) local_syms))
1604 free (local_syms);
1605 return FALSE;
1606 }
1607
1608 /* Determine the reloc target section. */
1609 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1610 goto error_ret_free_internal;
1611
1612 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1613 NULL, info);
1614 if (stub_type == no_stub)
1615 continue;
1616 else if (stub_type == stub_error)
1617 goto error_ret_free_internal;
1618
1619 if (htab->stub_count == NULL)
1620 {
1621 bfd_size_type amt;
1622 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1623 htab->stub_count = bfd_zmalloc (amt);
1624 if (htab->stub_count == NULL)
1625 goto error_ret_free_internal;
1626 }
1627
1628 if (!build)
1629 {
1630 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1631 goto error_ret_free_internal;
1632 }
1633 else
1634 {
1635 bfd_vma dest;
1636
1637 if (h != NULL)
1638 dest = h->root.u.def.value;
1639 else
1640 dest = sym->st_value;
1641 dest += irela->r_addend;
1642 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1643 dest, sym_sec))
1644 goto error_ret_free_internal;
1645 }
1646 }
1647
1648 /* We're done with the internal relocs, free them. */
1649 if (elf_section_data (isec)->relocs != internal_relocs)
1650 free (internal_relocs);
1651 }
1652
1653 if (local_syms != NULL
1654 && symtab_hdr->contents != (unsigned char *) local_syms)
1655 {
1656 if (!info->keep_memory)
1657 free (local_syms);
1658 else
1659 symtab_hdr->contents = (unsigned char *) local_syms;
1660 }
1661 }
1662
1663 return TRUE;
1664 }
1665
1666 /* Allocate space for overlay call and return stubs.
1667 Return 0 on error, 1 if no overlays, 2 otherwise. */
1668
1669 int
1670 spu_elf_size_stubs (struct bfd_link_info *info)
1671 {
1672 struct spu_link_hash_table *htab;
1673 bfd *ibfd;
1674 bfd_size_type amt;
1675 flagword flags;
1676 unsigned int i;
1677 asection *stub;
1678
1679 if (!process_stubs (info, FALSE))
1680 return 0;
1681
1682 htab = spu_hash_table (info);
1683 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1684 if (htab->stub_err)
1685 return 0;
1686
1687 ibfd = info->input_bfds;
1688 if (htab->stub_count != NULL)
1689 {
1690 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1691 htab->stub_sec = bfd_zmalloc (amt);
1692 if (htab->stub_sec == NULL)
1693 return 0;
1694
1695 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1696 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1697 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1698 htab->stub_sec[0] = stub;
1699 if (stub == NULL
1700 || !bfd_set_section_alignment (stub,
1701 ovl_stub_size_log2 (htab->params)))
1702 return 0;
1703 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1704 if (htab->params->ovly_flavour == ovly_soft_icache)
1705 /* Extra space for linked list entries. */
1706 stub->size += htab->stub_count[0] * 16;
1707
1708 for (i = 0; i < htab->num_overlays; ++i)
1709 {
1710 asection *osec = htab->ovl_sec[i];
1711 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1712 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1713 htab->stub_sec[ovl] = stub;
1714 if (stub == NULL
1715 || !bfd_set_section_alignment (stub,
1716 ovl_stub_size_log2 (htab->params)))
1717 return 0;
1718 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1719 }
1720 }
1721
1722 if (htab->params->ovly_flavour == ovly_soft_icache)
1723 {
1724 /* Space for icache manager tables.
1725 a) Tag array, one quadword per cache line.
1726 b) Rewrite "to" list, one quadword per cache line.
1727 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1728 a power-of-two number of full quadwords) per cache line. */
1729
1730 flags = SEC_ALLOC;
1731 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1732 if (htab->ovtab == NULL
1733 || !bfd_set_section_alignment (htab->ovtab, 4))
1734 return 0;
1735
1736 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1737 << htab->num_lines_log2;
1738
1739 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1740 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1741 if (htab->init == NULL
1742 || !bfd_set_section_alignment (htab->init, 4))
1743 return 0;
1744
1745 htab->init->size = 16;
1746 }
1747 else if (htab->stub_count == NULL)
1748 return 1;
1749 else
1750 {
1751 /* htab->ovtab consists of two arrays.
1752 . struct {
1753 . u32 vma;
1754 . u32 size;
1755 . u32 file_off;
1756 . u32 buf;
1757 . } _ovly_table[];
1758 .
1759 . struct {
1760 . u32 mapped;
1761 . } _ovly_buf_table[];
1762 . */
1763
1764 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1765 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1766 if (htab->ovtab == NULL
1767 || !bfd_set_section_alignment (htab->ovtab, 4))
1768 return 0;
1769
1770 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1771 }
1772
1773 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1774 if (htab->toe == NULL
1775 || !bfd_set_section_alignment (htab->toe, 4))
1776 return 0;
1777 htab->toe->size = 16;
1778
1779 return 2;
1780 }
1781
1782 /* Called from ld to place overlay manager data sections. This is done
1783 after the overlay manager itself is loaded, mainly so that the
1784 linker's htab->init section is placed after any other .ovl.init
1785 sections. */
1786
1787 void
1788 spu_elf_place_overlay_data (struct bfd_link_info *info)
1789 {
1790 struct spu_link_hash_table *htab = spu_hash_table (info);
1791 unsigned int i;
1792
1793 if (htab->stub_sec != NULL)
1794 {
1795 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1796
1797 for (i = 0; i < htab->num_overlays; ++i)
1798 {
1799 asection *osec = htab->ovl_sec[i];
1800 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1801 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1802 }
1803 }
1804
1805 if (htab->params->ovly_flavour == ovly_soft_icache)
1806 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1807
1808 if (htab->ovtab != NULL)
1809 {
1810 const char *ovout = ".data";
1811 if (htab->params->ovly_flavour == ovly_soft_icache)
1812 ovout = ".bss";
1813 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1814 }
1815
1816 if (htab->toe != NULL)
1817 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1818 }
1819
1820 /* Functions to handle embedded spu_ovl.o object. */
1821
1822 static void *
1823 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1824 {
1825 return stream;
1826 }
1827
1828 static file_ptr
1829 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1830 void *stream,
1831 void *buf,
1832 file_ptr nbytes,
1833 file_ptr offset)
1834 {
1835 struct _ovl_stream *os;
1836 size_t count;
1837 size_t max;
1838
1839 os = (struct _ovl_stream *) stream;
1840 max = (const char *) os->end - (const char *) os->start;
1841
1842 if ((ufile_ptr) offset >= max)
1843 return 0;
1844
1845 count = nbytes;
1846 if (count > max - offset)
1847 count = max - offset;
1848
1849 memcpy (buf, (const char *) os->start + offset, count);
1850 return count;
1851 }
1852
1853 static int
1854 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1855 void *stream,
1856 struct stat *sb)
1857 {
1858 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1859
1860 memset (sb, 0, sizeof (*sb));
1861 sb->st_size = (const char *) os->end - (const char *) os->start;
1862 return 0;
1863 }
1864
1865 bfd_boolean
1866 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1867 {
1868 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1869 "elf32-spu",
1870 ovl_mgr_open,
1871 (void *) stream,
1872 ovl_mgr_pread,
1873 NULL,
1874 ovl_mgr_stat);
1875 return *ovl_bfd != NULL;
1876 }
1877
1878 static unsigned int
1879 overlay_index (asection *sec)
1880 {
1881 if (sec == NULL
1882 || sec->output_section == bfd_abs_section_ptr)
1883 return 0;
1884 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1885 }
1886
1887 /* Define an STT_OBJECT symbol. */
1888
1889 static struct elf_link_hash_entry *
1890 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1891 {
1892 struct elf_link_hash_entry *h;
1893
1894 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1895 if (h == NULL)
1896 return NULL;
1897
1898 if (h->root.type != bfd_link_hash_defined
1899 || !h->def_regular)
1900 {
1901 h->root.type = bfd_link_hash_defined;
1902 h->root.u.def.section = htab->ovtab;
1903 h->type = STT_OBJECT;
1904 h->ref_regular = 1;
1905 h->def_regular = 1;
1906 h->ref_regular_nonweak = 1;
1907 h->non_elf = 0;
1908 }
1909 else if (h->root.u.def.section->owner != NULL)
1910 {
1911 /* xgettext:c-format */
1912 _bfd_error_handler (_("%pB is not allowed to define %s"),
1913 h->root.u.def.section->owner,
1914 h->root.root.string);
1915 bfd_set_error (bfd_error_bad_value);
1916 return NULL;
1917 }
1918 else
1919 {
1920 _bfd_error_handler (_("you are not allowed to define %s in a script"),
1921 h->root.root.string);
1922 bfd_set_error (bfd_error_bad_value);
1923 return NULL;
1924 }
1925
1926 return h;
1927 }
1928
1929 /* Fill in all stubs and the overlay tables. */
1930
1931 static bfd_boolean
1932 spu_elf_build_stubs (struct bfd_link_info *info)
1933 {
1934 struct spu_link_hash_table *htab = spu_hash_table (info);
1935 struct elf_link_hash_entry *h;
1936 bfd_byte *p;
1937 asection *s;
1938 bfd *obfd;
1939 unsigned int i;
1940
1941 if (htab->num_overlays != 0)
1942 {
1943 for (i = 0; i < 2; i++)
1944 {
1945 h = htab->ovly_entry[i];
1946 if (h != NULL
1947 && (h->root.type == bfd_link_hash_defined
1948 || h->root.type == bfd_link_hash_defweak)
1949 && h->def_regular)
1950 {
1951 s = h->root.u.def.section->output_section;
1952 if (spu_elf_section_data (s)->u.o.ovl_index)
1953 {
1954 _bfd_error_handler (_("%s in overlay section"),
1955 h->root.root.string);
1956 bfd_set_error (bfd_error_bad_value);
1957 return FALSE;
1958 }
1959 }
1960 }
1961 }
1962
1963 if (htab->stub_sec != NULL)
1964 {
1965 for (i = 0; i <= htab->num_overlays; i++)
1966 if (htab->stub_sec[i]->size != 0)
1967 {
1968 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1969 htab->stub_sec[i]->size);
1970 if (htab->stub_sec[i]->contents == NULL)
1971 return FALSE;
1972 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1973 htab->stub_sec[i]->size = 0;
1974 }
1975
1976 /* Fill in all the stubs. */
1977 process_stubs (info, TRUE);
1978 if (!htab->stub_err)
1979 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1980
1981 if (htab->stub_err)
1982 {
1983 _bfd_error_handler (_("overlay stub relocation overflow"));
1984 bfd_set_error (bfd_error_bad_value);
1985 return FALSE;
1986 }
1987
1988 for (i = 0; i <= htab->num_overlays; i++)
1989 {
1990 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1991 {
1992 _bfd_error_handler (_("stubs don't match calculated size"));
1993 bfd_set_error (bfd_error_bad_value);
1994 return FALSE;
1995 }
1996 htab->stub_sec[i]->rawsize = 0;
1997 }
1998 }
1999
2000 if (htab->ovtab == NULL || htab->ovtab->size == 0)
2001 return TRUE;
2002
2003 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
2004 if (htab->ovtab->contents == NULL)
2005 return FALSE;
2006
2007 p = htab->ovtab->contents;
2008 if (htab->params->ovly_flavour == ovly_soft_icache)
2009 {
2010 bfd_vma off;
2011
2012 h = define_ovtab_symbol (htab, "__icache_tag_array");
2013 if (h == NULL)
2014 return FALSE;
2015 h->root.u.def.value = 0;
2016 h->size = 16 << htab->num_lines_log2;
2017 off = h->size;
2018
2019 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2020 if (h == NULL)
2021 return FALSE;
2022 h->root.u.def.value = 16 << htab->num_lines_log2;
2023 h->root.u.def.section = bfd_abs_section_ptr;
2024
2025 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2026 if (h == NULL)
2027 return FALSE;
2028 h->root.u.def.value = off;
2029 h->size = 16 << htab->num_lines_log2;
2030 off += h->size;
2031
2032 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2033 if (h == NULL)
2034 return FALSE;
2035 h->root.u.def.value = 16 << htab->num_lines_log2;
2036 h->root.u.def.section = bfd_abs_section_ptr;
2037
2038 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2039 if (h == NULL)
2040 return FALSE;
2041 h->root.u.def.value = off;
2042 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2043 off += h->size;
2044
2045 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2046 if (h == NULL)
2047 return FALSE;
2048 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2049 + htab->num_lines_log2);
2050 h->root.u.def.section = bfd_abs_section_ptr;
2051
2052 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2053 if (h == NULL)
2054 return FALSE;
2055 h->root.u.def.value = htab->fromelem_size_log2;
2056 h->root.u.def.section = bfd_abs_section_ptr;
2057
2058 h = define_ovtab_symbol (htab, "__icache_base");
2059 if (h == NULL)
2060 return FALSE;
2061 h->root.u.def.value = htab->ovl_sec[0]->vma;
2062 h->root.u.def.section = bfd_abs_section_ptr;
2063 h->size = htab->num_buf << htab->line_size_log2;
2064
2065 h = define_ovtab_symbol (htab, "__icache_linesize");
2066 if (h == NULL)
2067 return FALSE;
2068 h->root.u.def.value = 1 << htab->line_size_log2;
2069 h->root.u.def.section = bfd_abs_section_ptr;
2070
2071 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2072 if (h == NULL)
2073 return FALSE;
2074 h->root.u.def.value = htab->line_size_log2;
2075 h->root.u.def.section = bfd_abs_section_ptr;
2076
2077 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2078 if (h == NULL)
2079 return FALSE;
2080 h->root.u.def.value = -htab->line_size_log2;
2081 h->root.u.def.section = bfd_abs_section_ptr;
2082
2083 h = define_ovtab_symbol (htab, "__icache_cachesize");
2084 if (h == NULL)
2085 return FALSE;
2086 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2087 h->root.u.def.section = bfd_abs_section_ptr;
2088
2089 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2090 if (h == NULL)
2091 return FALSE;
2092 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2093 h->root.u.def.section = bfd_abs_section_ptr;
2094
2095 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2096 if (h == NULL)
2097 return FALSE;
2098 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2099 h->root.u.def.section = bfd_abs_section_ptr;
2100
2101 if (htab->init != NULL && htab->init->size != 0)
2102 {
2103 htab->init->contents = bfd_zalloc (htab->init->owner,
2104 htab->init->size);
2105 if (htab->init->contents == NULL)
2106 return FALSE;
2107
2108 h = define_ovtab_symbol (htab, "__icache_fileoff");
2109 if (h == NULL)
2110 return FALSE;
2111 h->root.u.def.value = 0;
2112 h->root.u.def.section = htab->init;
2113 h->size = 8;
2114 }
2115 }
2116 else
2117 {
2118 /* Write out _ovly_table. */
2119 /* set low bit of .size to mark non-overlay area as present. */
2120 p[7] = 1;
2121 obfd = htab->ovtab->output_section->owner;
2122 for (s = obfd->sections; s != NULL; s = s->next)
2123 {
2124 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2125
2126 if (ovl_index != 0)
2127 {
2128 unsigned long off = ovl_index * 16;
2129 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2130
2131 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2132 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2133 p + off + 4);
2134 /* file_off written later in spu_elf_modify_headers. */
2135 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2136 }
2137 }
2138
2139 h = define_ovtab_symbol (htab, "_ovly_table");
2140 if (h == NULL)
2141 return FALSE;
2142 h->root.u.def.value = 16;
2143 h->size = htab->num_overlays * 16;
2144
2145 h = define_ovtab_symbol (htab, "_ovly_table_end");
2146 if (h == NULL)
2147 return FALSE;
2148 h->root.u.def.value = htab->num_overlays * 16 + 16;
2149 h->size = 0;
2150
2151 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2152 if (h == NULL)
2153 return FALSE;
2154 h->root.u.def.value = htab->num_overlays * 16 + 16;
2155 h->size = htab->num_buf * 4;
2156
2157 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2158 if (h == NULL)
2159 return FALSE;
2160 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2161 h->size = 0;
2162 }
2163
2164 h = define_ovtab_symbol (htab, "_EAR_");
2165 if (h == NULL)
2166 return FALSE;
2167 h->root.u.def.section = htab->toe;
2168 h->root.u.def.value = 0;
2169 h->size = 16;
2170
2171 return TRUE;
2172 }
2173
2174 /* Check that all loadable section VMAs lie in the range
2175 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2176
2177 asection *
2178 spu_elf_check_vma (struct bfd_link_info *info)
2179 {
2180 struct elf_segment_map *m;
2181 unsigned int i;
2182 struct spu_link_hash_table *htab = spu_hash_table (info);
2183 bfd *abfd = info->output_bfd;
2184 bfd_vma hi = htab->params->local_store_hi;
2185 bfd_vma lo = htab->params->local_store_lo;
2186
2187 htab->local_store = hi + 1 - lo;
2188
2189 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2190 if (m->p_type == PT_LOAD)
2191 for (i = 0; i < m->count; i++)
2192 if (m->sections[i]->size != 0
2193 && (m->sections[i]->vma < lo
2194 || m->sections[i]->vma > hi
2195 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2196 return m->sections[i];
2197
2198 return NULL;
2199 }
2200
2201 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2202 Search for stack adjusting insns, and return the sp delta.
2203 If a store of lr is found save the instruction offset to *LR_STORE.
2204 If a stack adjusting instruction is found, save that offset to
2205 *SP_ADJUST. */
2206
2207 static int
2208 find_function_stack_adjust (asection *sec,
2209 bfd_vma offset,
2210 bfd_vma *lr_store,
2211 bfd_vma *sp_adjust)
2212 {
2213 int reg[128];
2214
2215 memset (reg, 0, sizeof (reg));
2216 for ( ; offset + 4 <= sec->size; offset += 4)
2217 {
2218 unsigned char buf[4];
2219 int rt, ra;
2220 int imm;
2221
2222 /* Assume no relocs on stack adjusing insns. */
2223 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2224 break;
2225
2226 rt = buf[3] & 0x7f;
2227 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2228
2229 if (buf[0] == 0x24 /* stqd */)
2230 {
2231 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2232 *lr_store = offset;
2233 continue;
2234 }
2235
2236 /* Partly decoded immediate field. */
2237 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2238
2239 if (buf[0] == 0x1c /* ai */)
2240 {
2241 imm >>= 7;
2242 imm = (imm ^ 0x200) - 0x200;
2243 reg[rt] = reg[ra] + imm;
2244
2245 if (rt == 1 /* sp */)
2246 {
2247 if (reg[rt] > 0)
2248 break;
2249 *sp_adjust = offset;
2250 return reg[rt];
2251 }
2252 }
2253 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2254 {
2255 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2256
2257 reg[rt] = reg[ra] + reg[rb];
2258 if (rt == 1)
2259 {
2260 if (reg[rt] > 0)
2261 break;
2262 *sp_adjust = offset;
2263 return reg[rt];
2264 }
2265 }
2266 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2267 {
2268 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2269
2270 reg[rt] = reg[rb] - reg[ra];
2271 if (rt == 1)
2272 {
2273 if (reg[rt] > 0)
2274 break;
2275 *sp_adjust = offset;
2276 return reg[rt];
2277 }
2278 }
2279 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2280 {
2281 if (buf[0] >= 0x42 /* ila */)
2282 imm |= (buf[0] & 1) << 17;
2283 else
2284 {
2285 imm &= 0xffff;
2286
2287 if (buf[0] == 0x40 /* il */)
2288 {
2289 if ((buf[1] & 0x80) == 0)
2290 continue;
2291 imm = (imm ^ 0x8000) - 0x8000;
2292 }
2293 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2294 imm <<= 16;
2295 }
2296 reg[rt] = imm;
2297 continue;
2298 }
2299 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2300 {
2301 reg[rt] |= imm & 0xffff;
2302 continue;
2303 }
2304 else if (buf[0] == 0x04 /* ori */)
2305 {
2306 imm >>= 7;
2307 imm = (imm ^ 0x200) - 0x200;
2308 reg[rt] = reg[ra] | imm;
2309 continue;
2310 }
2311 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2312 {
2313 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2314 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2315 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2316 | ((imm & 0x1000) ? 0x000000ff : 0));
2317 continue;
2318 }
2319 else if (buf[0] == 0x16 /* andbi */)
2320 {
2321 imm >>= 7;
2322 imm &= 0xff;
2323 imm |= imm << 8;
2324 imm |= imm << 16;
2325 reg[rt] = reg[ra] & imm;
2326 continue;
2327 }
2328 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2329 {
2330 /* Used in pic reg load. Say rt is trashed. Won't be used
2331 in stack adjust, but we need to continue past this branch. */
2332 reg[rt] = 0;
2333 continue;
2334 }
2335 else if (is_branch (buf) || is_indirect_branch (buf))
2336 /* If we hit a branch then we must be out of the prologue. */
2337 break;
2338 }
2339
2340 return 0;
2341 }
2342
2343 /* qsort predicate to sort symbols by section and value. */
2344
2345 static Elf_Internal_Sym *sort_syms_syms;
2346 static asection **sort_syms_psecs;
2347
2348 static int
2349 sort_syms (const void *a, const void *b)
2350 {
2351 Elf_Internal_Sym *const *s1 = a;
2352 Elf_Internal_Sym *const *s2 = b;
2353 asection *sec1,*sec2;
2354 bfd_signed_vma delta;
2355
2356 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2357 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2358
2359 if (sec1 != sec2)
2360 return sec1->index - sec2->index;
2361
2362 delta = (*s1)->st_value - (*s2)->st_value;
2363 if (delta != 0)
2364 return delta < 0 ? -1 : 1;
2365
2366 delta = (*s2)->st_size - (*s1)->st_size;
2367 if (delta != 0)
2368 return delta < 0 ? -1 : 1;
2369
2370 return *s1 < *s2 ? -1 : 1;
2371 }
2372
2373 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2374 entries for section SEC. */
2375
2376 static struct spu_elf_stack_info *
2377 alloc_stack_info (asection *sec, int max_fun)
2378 {
2379 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2380 bfd_size_type amt;
2381
2382 amt = sizeof (struct spu_elf_stack_info);
2383 amt += (max_fun - 1) * sizeof (struct function_info);
2384 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2385 if (sec_data->u.i.stack_info != NULL)
2386 sec_data->u.i.stack_info->max_fun = max_fun;
2387 return sec_data->u.i.stack_info;
2388 }
2389
2390 /* Add a new struct function_info describing a (part of a) function
2391 starting at SYM_H. Keep the array sorted by address. */
2392
2393 static struct function_info *
2394 maybe_insert_function (asection *sec,
2395 void *sym_h,
2396 bfd_boolean global,
2397 bfd_boolean is_func)
2398 {
2399 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2400 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2401 int i;
2402 bfd_vma off, size;
2403
2404 if (sinfo == NULL)
2405 {
2406 sinfo = alloc_stack_info (sec, 20);
2407 if (sinfo == NULL)
2408 return NULL;
2409 }
2410
2411 if (!global)
2412 {
2413 Elf_Internal_Sym *sym = sym_h;
2414 off = sym->st_value;
2415 size = sym->st_size;
2416 }
2417 else
2418 {
2419 struct elf_link_hash_entry *h = sym_h;
2420 off = h->root.u.def.value;
2421 size = h->size;
2422 }
2423
2424 for (i = sinfo->num_fun; --i >= 0; )
2425 if (sinfo->fun[i].lo <= off)
2426 break;
2427
2428 if (i >= 0)
2429 {
2430 /* Don't add another entry for an alias, but do update some
2431 info. */
2432 if (sinfo->fun[i].lo == off)
2433 {
2434 /* Prefer globals over local syms. */
2435 if (global && !sinfo->fun[i].global)
2436 {
2437 sinfo->fun[i].global = TRUE;
2438 sinfo->fun[i].u.h = sym_h;
2439 }
2440 if (is_func)
2441 sinfo->fun[i].is_func = TRUE;
2442 return &sinfo->fun[i];
2443 }
2444 /* Ignore a zero-size symbol inside an existing function. */
2445 else if (sinfo->fun[i].hi > off && size == 0)
2446 return &sinfo->fun[i];
2447 }
2448
2449 if (sinfo->num_fun >= sinfo->max_fun)
2450 {
2451 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2452 bfd_size_type old = amt;
2453
2454 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2455 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2456 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2457 sinfo = bfd_realloc (sinfo, amt);
2458 if (sinfo == NULL)
2459 return NULL;
2460 memset ((char *) sinfo + old, 0, amt - old);
2461 sec_data->u.i.stack_info = sinfo;
2462 }
2463
2464 if (++i < sinfo->num_fun)
2465 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2466 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2467 sinfo->fun[i].is_func = is_func;
2468 sinfo->fun[i].global = global;
2469 sinfo->fun[i].sec = sec;
2470 if (global)
2471 sinfo->fun[i].u.h = sym_h;
2472 else
2473 sinfo->fun[i].u.sym = sym_h;
2474 sinfo->fun[i].lo = off;
2475 sinfo->fun[i].hi = off + size;
2476 sinfo->fun[i].lr_store = -1;
2477 sinfo->fun[i].sp_adjust = -1;
2478 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2479 &sinfo->fun[i].lr_store,
2480 &sinfo->fun[i].sp_adjust);
2481 sinfo->num_fun += 1;
2482 return &sinfo->fun[i];
2483 }
2484
2485 /* Return the name of FUN. */
2486
2487 static const char *
2488 func_name (struct function_info *fun)
2489 {
2490 asection *sec;
2491 bfd *ibfd;
2492 Elf_Internal_Shdr *symtab_hdr;
2493
2494 while (fun->start != NULL)
2495 fun = fun->start;
2496
2497 if (fun->global)
2498 return fun->u.h->root.root.string;
2499
2500 sec = fun->sec;
2501 if (fun->u.sym->st_name == 0)
2502 {
2503 size_t len = strlen (sec->name);
2504 char *name = bfd_malloc (len + 10);
2505 if (name == NULL)
2506 return "(null)";
2507 sprintf (name, "%s+%lx", sec->name,
2508 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2509 return name;
2510 }
2511 ibfd = sec->owner;
2512 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2513 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2514 }
2515
2516 /* Read the instruction at OFF in SEC. Return true iff the instruction
2517 is a nop, lnop, or stop 0 (all zero insn). */
2518
2519 static bfd_boolean
2520 is_nop (asection *sec, bfd_vma off)
2521 {
2522 unsigned char insn[4];
2523
2524 if (off + 4 > sec->size
2525 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2526 return FALSE;
2527 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2528 return TRUE;
2529 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2530 return TRUE;
2531 return FALSE;
2532 }
2533
2534 /* Extend the range of FUN to cover nop padding up to LIMIT.
2535 Return TRUE iff some instruction other than a NOP was found. */
2536
2537 static bfd_boolean
2538 insns_at_end (struct function_info *fun, bfd_vma limit)
2539 {
2540 bfd_vma off = (fun->hi + 3) & -4;
2541
2542 while (off < limit && is_nop (fun->sec, off))
2543 off += 4;
2544 if (off < limit)
2545 {
2546 fun->hi = off;
2547 return TRUE;
2548 }
2549 fun->hi = limit;
2550 return FALSE;
2551 }
2552
2553 /* Check and fix overlapping function ranges. Return TRUE iff there
2554 are gaps in the current info we have about functions in SEC. */
2555
2556 static bfd_boolean
2557 check_function_ranges (asection *sec, struct bfd_link_info *info)
2558 {
2559 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2560 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2561 int i;
2562 bfd_boolean gaps = FALSE;
2563
2564 if (sinfo == NULL)
2565 return FALSE;
2566
2567 for (i = 1; i < sinfo->num_fun; i++)
2568 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2569 {
2570 /* Fix overlapping symbols. */
2571 const char *f1 = func_name (&sinfo->fun[i - 1]);
2572 const char *f2 = func_name (&sinfo->fun[i]);
2573
2574 /* xgettext:c-format */
2575 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2576 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2577 }
2578 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2579 gaps = TRUE;
2580
2581 if (sinfo->num_fun == 0)
2582 gaps = TRUE;
2583 else
2584 {
2585 if (sinfo->fun[0].lo != 0)
2586 gaps = TRUE;
2587 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2588 {
2589 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2590
2591 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2592 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2593 }
2594 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2595 gaps = TRUE;
2596 }
2597 return gaps;
2598 }
2599
2600 /* Search current function info for a function that contains address
2601 OFFSET in section SEC. */
2602
2603 static struct function_info *
2604 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2605 {
2606 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2607 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2608 int lo, hi, mid;
2609
2610 lo = 0;
2611 hi = sinfo->num_fun;
2612 while (lo < hi)
2613 {
2614 mid = (lo + hi) / 2;
2615 if (offset < sinfo->fun[mid].lo)
2616 hi = mid;
2617 else if (offset >= sinfo->fun[mid].hi)
2618 lo = mid + 1;
2619 else
2620 return &sinfo->fun[mid];
2621 }
2622 /* xgettext:c-format */
2623 info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2624 sec, offset);
2625 bfd_set_error (bfd_error_bad_value);
2626 return NULL;
2627 }
2628
2629 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2630 if CALLEE was new. If this function return FALSE, CALLEE should
2631 be freed. */
2632
2633 static bfd_boolean
2634 insert_callee (struct function_info *caller, struct call_info *callee)
2635 {
2636 struct call_info **pp, *p;
2637
2638 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2639 if (p->fun == callee->fun)
2640 {
2641 /* Tail calls use less stack than normal calls. Retain entry
2642 for normal call over one for tail call. */
2643 p->is_tail &= callee->is_tail;
2644 if (!p->is_tail)
2645 {
2646 p->fun->start = NULL;
2647 p->fun->is_func = TRUE;
2648 }
2649 p->count += callee->count;
2650 /* Reorder list so most recent call is first. */
2651 *pp = p->next;
2652 p->next = caller->call_list;
2653 caller->call_list = p;
2654 return FALSE;
2655 }
2656 callee->next = caller->call_list;
2657 caller->call_list = callee;
2658 return TRUE;
2659 }
2660
2661 /* Copy CALL and insert the copy into CALLER. */
2662
2663 static bfd_boolean
2664 copy_callee (struct function_info *caller, const struct call_info *call)
2665 {
2666 struct call_info *callee;
2667 callee = bfd_malloc (sizeof (*callee));
2668 if (callee == NULL)
2669 return FALSE;
2670 *callee = *call;
2671 if (!insert_callee (caller, callee))
2672 free (callee);
2673 return TRUE;
2674 }
2675
2676 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2677 overlay stub sections. */
2678
2679 static bfd_boolean
2680 interesting_section (asection *s)
2681 {
2682 return (s->output_section != bfd_abs_section_ptr
2683 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2684 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2685 && s->size != 0);
2686 }
2687
2688 /* Rummage through the relocs for SEC, looking for function calls.
2689 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2690 mark destination symbols on calls as being functions. Also
2691 look at branches, which may be tail calls or go to hot/cold
2692 section part of same function. */
2693
2694 static bfd_boolean
2695 mark_functions_via_relocs (asection *sec,
2696 struct bfd_link_info *info,
2697 int call_tree)
2698 {
2699 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2700 Elf_Internal_Shdr *symtab_hdr;
2701 void *psyms;
2702 unsigned int priority = 0;
2703 static bfd_boolean warned;
2704
2705 if (!interesting_section (sec)
2706 || sec->reloc_count == 0)
2707 return TRUE;
2708
2709 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2710 info->keep_memory);
2711 if (internal_relocs == NULL)
2712 return FALSE;
2713
2714 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2715 psyms = &symtab_hdr->contents;
2716 irela = internal_relocs;
2717 irelaend = irela + sec->reloc_count;
2718 for (; irela < irelaend; irela++)
2719 {
2720 enum elf_spu_reloc_type r_type;
2721 unsigned int r_indx;
2722 asection *sym_sec;
2723 Elf_Internal_Sym *sym;
2724 struct elf_link_hash_entry *h;
2725 bfd_vma val;
2726 bfd_boolean nonbranch, is_call;
2727 struct function_info *caller;
2728 struct call_info *callee;
2729
2730 r_type = ELF32_R_TYPE (irela->r_info);
2731 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2732
2733 r_indx = ELF32_R_SYM (irela->r_info);
2734 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2735 return FALSE;
2736
2737 if (sym_sec == NULL
2738 || sym_sec->output_section == bfd_abs_section_ptr)
2739 continue;
2740
2741 is_call = FALSE;
2742 if (!nonbranch)
2743 {
2744 unsigned char insn[4];
2745
2746 if (!bfd_get_section_contents (sec->owner, sec, insn,
2747 irela->r_offset, 4))
2748 return FALSE;
2749 if (is_branch (insn))
2750 {
2751 is_call = (insn[0] & 0xfd) == 0x31;
2752 priority = insn[1] & 0x0f;
2753 priority <<= 8;
2754 priority |= insn[2];
2755 priority <<= 8;
2756 priority |= insn[3];
2757 priority >>= 7;
2758 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2759 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2760 {
2761 if (!warned)
2762 info->callbacks->einfo
2763 /* xgettext:c-format */
2764 (_("%pB(%pA+0x%v): call to non-code section"
2765 " %pB(%pA), analysis incomplete\n"),
2766 sec->owner, sec, irela->r_offset,
2767 sym_sec->owner, sym_sec);
2768 warned = TRUE;
2769 continue;
2770 }
2771 }
2772 else
2773 {
2774 nonbranch = TRUE;
2775 if (is_hint (insn))
2776 continue;
2777 }
2778 }
2779
2780 if (nonbranch)
2781 {
2782 /* For --auto-overlay, count possible stubs we need for
2783 function pointer references. */
2784 unsigned int sym_type;
2785 if (h)
2786 sym_type = h->type;
2787 else
2788 sym_type = ELF_ST_TYPE (sym->st_info);
2789 if (sym_type == STT_FUNC)
2790 {
2791 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2792 spu_hash_table (info)->non_ovly_stub += 1;
2793 /* If the symbol type is STT_FUNC then this must be a
2794 function pointer initialisation. */
2795 continue;
2796 }
2797 /* Ignore data references. */
2798 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2799 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2800 continue;
2801 /* Otherwise we probably have a jump table reloc for
2802 a switch statement or some other reference to a
2803 code label. */
2804 }
2805
2806 if (h)
2807 val = h->root.u.def.value;
2808 else
2809 val = sym->st_value;
2810 val += irela->r_addend;
2811
2812 if (!call_tree)
2813 {
2814 struct function_info *fun;
2815
2816 if (irela->r_addend != 0)
2817 {
2818 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2819 if (fake == NULL)
2820 return FALSE;
2821 fake->st_value = val;
2822 fake->st_shndx
2823 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2824 sym = fake;
2825 }
2826 if (sym)
2827 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2828 else
2829 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2830 if (fun == NULL)
2831 return FALSE;
2832 if (irela->r_addend != 0
2833 && fun->u.sym != sym)
2834 free (sym);
2835 continue;
2836 }
2837
2838 caller = find_function (sec, irela->r_offset, info);
2839 if (caller == NULL)
2840 return FALSE;
2841 callee = bfd_malloc (sizeof *callee);
2842 if (callee == NULL)
2843 return FALSE;
2844
2845 callee->fun = find_function (sym_sec, val, info);
2846 if (callee->fun == NULL)
2847 return FALSE;
2848 callee->is_tail = !is_call;
2849 callee->is_pasted = FALSE;
2850 callee->broken_cycle = FALSE;
2851 callee->priority = priority;
2852 callee->count = nonbranch? 0 : 1;
2853 if (callee->fun->last_caller != sec)
2854 {
2855 callee->fun->last_caller = sec;
2856 callee->fun->call_count += 1;
2857 }
2858 if (!insert_callee (caller, callee))
2859 free (callee);
2860 else if (!is_call
2861 && !callee->fun->is_func
2862 && callee->fun->stack == 0)
2863 {
2864 /* This is either a tail call or a branch from one part of
2865 the function to another, ie. hot/cold section. If the
2866 destination has been called by some other function then
2867 it is a separate function. We also assume that functions
2868 are not split across input files. */
2869 if (sec->owner != sym_sec->owner)
2870 {
2871 callee->fun->start = NULL;
2872 callee->fun->is_func = TRUE;
2873 }
2874 else if (callee->fun->start == NULL)
2875 {
2876 struct function_info *caller_start = caller;
2877 while (caller_start->start)
2878 caller_start = caller_start->start;
2879
2880 if (caller_start != callee->fun)
2881 callee->fun->start = caller_start;
2882 }
2883 else
2884 {
2885 struct function_info *callee_start;
2886 struct function_info *caller_start;
2887 callee_start = callee->fun;
2888 while (callee_start->start)
2889 callee_start = callee_start->start;
2890 caller_start = caller;
2891 while (caller_start->start)
2892 caller_start = caller_start->start;
2893 if (caller_start != callee_start)
2894 {
2895 callee->fun->start = NULL;
2896 callee->fun->is_func = TRUE;
2897 }
2898 }
2899 }
2900 }
2901
2902 return TRUE;
2903 }
2904
2905 /* Handle something like .init or .fini, which has a piece of a function.
2906 These sections are pasted together to form a single function. */
2907
2908 static bfd_boolean
2909 pasted_function (asection *sec)
2910 {
2911 struct bfd_link_order *l;
2912 struct _spu_elf_section_data *sec_data;
2913 struct spu_elf_stack_info *sinfo;
2914 Elf_Internal_Sym *fake;
2915 struct function_info *fun, *fun_start;
2916
2917 fake = bfd_zmalloc (sizeof (*fake));
2918 if (fake == NULL)
2919 return FALSE;
2920 fake->st_value = 0;
2921 fake->st_size = sec->size;
2922 fake->st_shndx
2923 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2924 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2925 if (!fun)
2926 return FALSE;
2927
2928 /* Find a function immediately preceding this section. */
2929 fun_start = NULL;
2930 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2931 {
2932 if (l->u.indirect.section == sec)
2933 {
2934 if (fun_start != NULL)
2935 {
2936 struct call_info *callee = bfd_malloc (sizeof *callee);
2937 if (callee == NULL)
2938 return FALSE;
2939
2940 fun->start = fun_start;
2941 callee->fun = fun;
2942 callee->is_tail = TRUE;
2943 callee->is_pasted = TRUE;
2944 callee->broken_cycle = FALSE;
2945 callee->priority = 0;
2946 callee->count = 1;
2947 if (!insert_callee (fun_start, callee))
2948 free (callee);
2949 return TRUE;
2950 }
2951 break;
2952 }
2953 if (l->type == bfd_indirect_link_order
2954 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2955 && (sinfo = sec_data->u.i.stack_info) != NULL
2956 && sinfo->num_fun != 0)
2957 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2958 }
2959
2960 /* Don't return an error if we did not find a function preceding this
2961 section. The section may have incorrect flags. */
2962 return TRUE;
2963 }
2964
2965 /* Map address ranges in code sections to functions. */
2966
2967 static bfd_boolean
2968 discover_functions (struct bfd_link_info *info)
2969 {
2970 bfd *ibfd;
2971 int bfd_idx;
2972 Elf_Internal_Sym ***psym_arr;
2973 asection ***sec_arr;
2974 bfd_boolean gaps = FALSE;
2975
2976 bfd_idx = 0;
2977 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2978 bfd_idx++;
2979
2980 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2981 if (psym_arr == NULL)
2982 return FALSE;
2983 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2984 if (sec_arr == NULL)
2985 return FALSE;
2986
2987 for (ibfd = info->input_bfds, bfd_idx = 0;
2988 ibfd != NULL;
2989 ibfd = ibfd->link.next, bfd_idx++)
2990 {
2991 extern const bfd_target spu_elf32_vec;
2992 Elf_Internal_Shdr *symtab_hdr;
2993 asection *sec;
2994 size_t symcount;
2995 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2996 asection **psecs, **p;
2997
2998 if (ibfd->xvec != &spu_elf32_vec)
2999 continue;
3000
3001 /* Read all the symbols. */
3002 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3003 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
3004 if (symcount == 0)
3005 {
3006 if (!gaps)
3007 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3008 if (interesting_section (sec))
3009 {
3010 gaps = TRUE;
3011 break;
3012 }
3013 continue;
3014 }
3015
3016 if (symtab_hdr->contents != NULL)
3017 {
3018 /* Don't use cached symbols since the generic ELF linker
3019 code only reads local symbols, and we need globals too. */
3020 free (symtab_hdr->contents);
3021 symtab_hdr->contents = NULL;
3022 }
3023 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3024 NULL, NULL, NULL);
3025 symtab_hdr->contents = (void *) syms;
3026 if (syms == NULL)
3027 return FALSE;
3028
3029 /* Select defined function symbols that are going to be output. */
3030 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3031 if (psyms == NULL)
3032 return FALSE;
3033 psym_arr[bfd_idx] = psyms;
3034 psecs = bfd_malloc (symcount * sizeof (*psecs));
3035 if (psecs == NULL)
3036 return FALSE;
3037 sec_arr[bfd_idx] = psecs;
3038 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3039 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3040 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3041 {
3042 asection *s;
3043
3044 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3045 if (s != NULL && interesting_section (s))
3046 *psy++ = sy;
3047 }
3048 symcount = psy - psyms;
3049 *psy = NULL;
3050
3051 /* Sort them by section and offset within section. */
3052 sort_syms_syms = syms;
3053 sort_syms_psecs = psecs;
3054 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3055
3056 /* Now inspect the function symbols. */
3057 for (psy = psyms; psy < psyms + symcount; )
3058 {
3059 asection *s = psecs[*psy - syms];
3060 Elf_Internal_Sym **psy2;
3061
3062 for (psy2 = psy; ++psy2 < psyms + symcount; )
3063 if (psecs[*psy2 - syms] != s)
3064 break;
3065
3066 if (!alloc_stack_info (s, psy2 - psy))
3067 return FALSE;
3068 psy = psy2;
3069 }
3070
3071 /* First install info about properly typed and sized functions.
3072 In an ideal world this will cover all code sections, except
3073 when partitioning functions into hot and cold sections,
3074 and the horrible pasted together .init and .fini functions. */
3075 for (psy = psyms; psy < psyms + symcount; ++psy)
3076 {
3077 sy = *psy;
3078 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3079 {
3080 asection *s = psecs[sy - syms];
3081 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3082 return FALSE;
3083 }
3084 }
3085
3086 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3087 if (interesting_section (sec))
3088 gaps |= check_function_ranges (sec, info);
3089 }
3090
3091 if (gaps)
3092 {
3093 /* See if we can discover more function symbols by looking at
3094 relocations. */
3095 for (ibfd = info->input_bfds, bfd_idx = 0;
3096 ibfd != NULL;
3097 ibfd = ibfd->link.next, bfd_idx++)
3098 {
3099 asection *sec;
3100
3101 if (psym_arr[bfd_idx] == NULL)
3102 continue;
3103
3104 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3105 if (!mark_functions_via_relocs (sec, info, FALSE))
3106 return FALSE;
3107 }
3108
3109 for (ibfd = info->input_bfds, bfd_idx = 0;
3110 ibfd != NULL;
3111 ibfd = ibfd->link.next, bfd_idx++)
3112 {
3113 Elf_Internal_Shdr *symtab_hdr;
3114 asection *sec;
3115 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3116 asection **psecs;
3117
3118 if ((psyms = psym_arr[bfd_idx]) == NULL)
3119 continue;
3120
3121 psecs = sec_arr[bfd_idx];
3122
3123 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3124 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3125
3126 gaps = FALSE;
3127 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3128 if (interesting_section (sec))
3129 gaps |= check_function_ranges (sec, info);
3130 if (!gaps)
3131 continue;
3132
3133 /* Finally, install all globals. */
3134 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3135 {
3136 asection *s;
3137
3138 s = psecs[sy - syms];
3139
3140 /* Global syms might be improperly typed functions. */
3141 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3142 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3143 {
3144 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3145 return FALSE;
3146 }
3147 }
3148 }
3149
3150 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3151 {
3152 extern const bfd_target spu_elf32_vec;
3153 asection *sec;
3154
3155 if (ibfd->xvec != &spu_elf32_vec)
3156 continue;
3157
3158 /* Some of the symbols we've installed as marking the
3159 beginning of functions may have a size of zero. Extend
3160 the range of such functions to the beginning of the
3161 next symbol of interest. */
3162 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3163 if (interesting_section (sec))
3164 {
3165 struct _spu_elf_section_data *sec_data;
3166 struct spu_elf_stack_info *sinfo;
3167
3168 sec_data = spu_elf_section_data (sec);
3169 sinfo = sec_data->u.i.stack_info;
3170 if (sinfo != NULL && sinfo->num_fun != 0)
3171 {
3172 int fun_idx;
3173 bfd_vma hi = sec->size;
3174
3175 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3176 {
3177 sinfo->fun[fun_idx].hi = hi;
3178 hi = sinfo->fun[fun_idx].lo;
3179 }
3180
3181 sinfo->fun[0].lo = 0;
3182 }
3183 /* No symbols in this section. Must be .init or .fini
3184 or something similar. */
3185 else if (!pasted_function (sec))
3186 return FALSE;
3187 }
3188 }
3189 }
3190
3191 for (ibfd = info->input_bfds, bfd_idx = 0;
3192 ibfd != NULL;
3193 ibfd = ibfd->link.next, bfd_idx++)
3194 {
3195 if (psym_arr[bfd_idx] == NULL)
3196 continue;
3197
3198 free (psym_arr[bfd_idx]);
3199 free (sec_arr[bfd_idx]);
3200 }
3201
3202 free (psym_arr);
3203 free (sec_arr);
3204
3205 return TRUE;
3206 }
3207
3208 /* Iterate over all function_info we have collected, calling DOIT on
3209 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3210 if ROOT_ONLY. */
3211
3212 static bfd_boolean
3213 for_each_node (bfd_boolean (*doit) (struct function_info *,
3214 struct bfd_link_info *,
3215 void *),
3216 struct bfd_link_info *info,
3217 void *param,
3218 int root_only)
3219 {
3220 bfd *ibfd;
3221
3222 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3223 {
3224 extern const bfd_target spu_elf32_vec;
3225 asection *sec;
3226
3227 if (ibfd->xvec != &spu_elf32_vec)
3228 continue;
3229
3230 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3231 {
3232 struct _spu_elf_section_data *sec_data;
3233 struct spu_elf_stack_info *sinfo;
3234
3235 if ((sec_data = spu_elf_section_data (sec)) != NULL
3236 && (sinfo = sec_data->u.i.stack_info) != NULL)
3237 {
3238 int i;
3239 for (i = 0; i < sinfo->num_fun; ++i)
3240 if (!root_only || !sinfo->fun[i].non_root)
3241 if (!doit (&sinfo->fun[i], info, param))
3242 return FALSE;
3243 }
3244 }
3245 }
3246 return TRUE;
3247 }
3248
3249 /* Transfer call info attached to struct function_info entries for
3250 all of a given function's sections to the first entry. */
3251
3252 static bfd_boolean
3253 transfer_calls (struct function_info *fun,
3254 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3255 void *param ATTRIBUTE_UNUSED)
3256 {
3257 struct function_info *start = fun->start;
3258
3259 if (start != NULL)
3260 {
3261 struct call_info *call, *call_next;
3262
3263 while (start->start != NULL)
3264 start = start->start;
3265 for (call = fun->call_list; call != NULL; call = call_next)
3266 {
3267 call_next = call->next;
3268 if (!insert_callee (start, call))
3269 free (call);
3270 }
3271 fun->call_list = NULL;
3272 }
3273 return TRUE;
3274 }
3275
3276 /* Mark nodes in the call graph that are called by some other node. */
3277
3278 static bfd_boolean
3279 mark_non_root (struct function_info *fun,
3280 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3281 void *param ATTRIBUTE_UNUSED)
3282 {
3283 struct call_info *call;
3284
3285 if (fun->visit1)
3286 return TRUE;
3287 fun->visit1 = TRUE;
3288 for (call = fun->call_list; call; call = call->next)
3289 {
3290 call->fun->non_root = TRUE;
3291 mark_non_root (call->fun, 0, 0);
3292 }
3293 return TRUE;
3294 }
3295
3296 /* Remove cycles from the call graph. Set depth of nodes. */
3297
3298 static bfd_boolean
3299 remove_cycles (struct function_info *fun,
3300 struct bfd_link_info *info,
3301 void *param)
3302 {
3303 struct call_info **callp, *call;
3304 unsigned int depth = *(unsigned int *) param;
3305 unsigned int max_depth = depth;
3306
3307 fun->depth = depth;
3308 fun->visit2 = TRUE;
3309 fun->marking = TRUE;
3310
3311 callp = &fun->call_list;
3312 while ((call = *callp) != NULL)
3313 {
3314 call->max_depth = depth + !call->is_pasted;
3315 if (!call->fun->visit2)
3316 {
3317 if (!remove_cycles (call->fun, info, &call->max_depth))
3318 return FALSE;
3319 if (max_depth < call->max_depth)
3320 max_depth = call->max_depth;
3321 }
3322 else if (call->fun->marking)
3323 {
3324 struct spu_link_hash_table *htab = spu_hash_table (info);
3325
3326 if (!htab->params->auto_overlay
3327 && htab->params->stack_analysis)
3328 {
3329 const char *f1 = func_name (fun);
3330 const char *f2 = func_name (call->fun);
3331
3332 /* xgettext:c-format */
3333 info->callbacks->info (_("stack analysis will ignore the call "
3334 "from %s to %s\n"),
3335 f1, f2);
3336 }
3337
3338 call->broken_cycle = TRUE;
3339 }
3340 callp = &call->next;
3341 }
3342 fun->marking = FALSE;
3343 *(unsigned int *) param = max_depth;
3344 return TRUE;
3345 }
3346
3347 /* Check that we actually visited all nodes in remove_cycles. If we
3348 didn't, then there is some cycle in the call graph not attached to
3349 any root node. Arbitrarily choose a node in the cycle as a new
3350 root and break the cycle. */
3351
3352 static bfd_boolean
3353 mark_detached_root (struct function_info *fun,
3354 struct bfd_link_info *info,
3355 void *param)
3356 {
3357 if (fun->visit2)
3358 return TRUE;
3359 fun->non_root = FALSE;
3360 *(unsigned int *) param = 0;
3361 return remove_cycles (fun, info, param);
3362 }
3363
3364 /* Populate call_list for each function. */
3365
3366 static bfd_boolean
3367 build_call_tree (struct bfd_link_info *info)
3368 {
3369 bfd *ibfd;
3370 unsigned int depth;
3371
3372 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3373 {
3374 extern const bfd_target spu_elf32_vec;
3375 asection *sec;
3376
3377 if (ibfd->xvec != &spu_elf32_vec)
3378 continue;
3379
3380 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3381 if (!mark_functions_via_relocs (sec, info, TRUE))
3382 return FALSE;
3383 }
3384
3385 /* Transfer call info from hot/cold section part of function
3386 to main entry. */
3387 if (!spu_hash_table (info)->params->auto_overlay
3388 && !for_each_node (transfer_calls, info, 0, FALSE))
3389 return FALSE;
3390
3391 /* Find the call graph root(s). */
3392 if (!for_each_node (mark_non_root, info, 0, FALSE))
3393 return FALSE;
3394
3395 /* Remove cycles from the call graph. We start from the root node(s)
3396 so that we break cycles in a reasonable place. */
3397 depth = 0;
3398 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3399 return FALSE;
3400
3401 return for_each_node (mark_detached_root, info, &depth, FALSE);
3402 }
3403
3404 /* qsort predicate to sort calls by priority, max_depth then count. */
3405
3406 static int
3407 sort_calls (const void *a, const void *b)
3408 {
3409 struct call_info *const *c1 = a;
3410 struct call_info *const *c2 = b;
3411 int delta;
3412
3413 delta = (*c2)->priority - (*c1)->priority;
3414 if (delta != 0)
3415 return delta;
3416
3417 delta = (*c2)->max_depth - (*c1)->max_depth;
3418 if (delta != 0)
3419 return delta;
3420
3421 delta = (*c2)->count - (*c1)->count;
3422 if (delta != 0)
3423 return delta;
3424
3425 return (char *) c1 - (char *) c2;
3426 }
3427
3428 struct _mos_param {
3429 unsigned int max_overlay_size;
3430 };
3431
3432 /* Set linker_mark and gc_mark on any sections that we will put in
3433 overlays. These flags are used by the generic ELF linker, but we
3434 won't be continuing on to bfd_elf_final_link so it is OK to use
3435 them. linker_mark is clear before we get here. Set segment_mark
3436 on sections that are part of a pasted function (excluding the last
3437 section).
3438
3439 Set up function rodata section if --overlay-rodata. We don't
3440 currently include merged string constant rodata sections since
3441
3442 Sort the call graph so that the deepest nodes will be visited
3443 first. */
3444
3445 static bfd_boolean
3446 mark_overlay_section (struct function_info *fun,
3447 struct bfd_link_info *info,
3448 void *param)
3449 {
3450 struct call_info *call;
3451 unsigned int count;
3452 struct _mos_param *mos_param = param;
3453 struct spu_link_hash_table *htab = spu_hash_table (info);
3454
3455 if (fun->visit4)
3456 return TRUE;
3457
3458 fun->visit4 = TRUE;
3459 if (!fun->sec->linker_mark
3460 && (htab->params->ovly_flavour != ovly_soft_icache
3461 || htab->params->non_ia_text
3462 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3463 || strcmp (fun->sec->name, ".init") == 0
3464 || strcmp (fun->sec->name, ".fini") == 0))
3465 {
3466 unsigned int size;
3467
3468 fun->sec->linker_mark = 1;
3469 fun->sec->gc_mark = 1;
3470 fun->sec->segment_mark = 0;
3471 /* Ensure SEC_CODE is set on this text section (it ought to
3472 be!), and SEC_CODE is clear on rodata sections. We use
3473 this flag to differentiate the two overlay section types. */
3474 fun->sec->flags |= SEC_CODE;
3475
3476 size = fun->sec->size;
3477 if (htab->params->auto_overlay & OVERLAY_RODATA)
3478 {
3479 char *name = NULL;
3480
3481 /* Find the rodata section corresponding to this function's
3482 text section. */
3483 if (strcmp (fun->sec->name, ".text") == 0)
3484 {
3485 name = bfd_malloc (sizeof (".rodata"));
3486 if (name == NULL)
3487 return FALSE;
3488 memcpy (name, ".rodata", sizeof (".rodata"));
3489 }
3490 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3491 {
3492 size_t len = strlen (fun->sec->name);
3493 name = bfd_malloc (len + 3);
3494 if (name == NULL)
3495 return FALSE;
3496 memcpy (name, ".rodata", sizeof (".rodata"));
3497 memcpy (name + 7, fun->sec->name + 5, len - 4);
3498 }
3499 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3500 {
3501 size_t len = strlen (fun->sec->name) + 1;
3502 name = bfd_malloc (len);
3503 if (name == NULL)
3504 return FALSE;
3505 memcpy (name, fun->sec->name, len);
3506 name[14] = 'r';
3507 }
3508
3509 if (name != NULL)
3510 {
3511 asection *rodata = NULL;
3512 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3513 if (group_sec == NULL)
3514 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3515 else
3516 while (group_sec != NULL && group_sec != fun->sec)
3517 {
3518 if (strcmp (group_sec->name, name) == 0)
3519 {
3520 rodata = group_sec;
3521 break;
3522 }
3523 group_sec = elf_section_data (group_sec)->next_in_group;
3524 }
3525 fun->rodata = rodata;
3526 if (fun->rodata)
3527 {
3528 size += fun->rodata->size;
3529 if (htab->params->line_size != 0
3530 && size > htab->params->line_size)
3531 {
3532 size -= fun->rodata->size;
3533 fun->rodata = NULL;
3534 }
3535 else
3536 {
3537 fun->rodata->linker_mark = 1;
3538 fun->rodata->gc_mark = 1;
3539 fun->rodata->flags &= ~SEC_CODE;
3540 }
3541 }
3542 free (name);
3543 }
3544 }
3545 if (mos_param->max_overlay_size < size)
3546 mos_param->max_overlay_size = size;
3547 }
3548
3549 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3550 count += 1;
3551
3552 if (count > 1)
3553 {
3554 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3555 if (calls == NULL)
3556 return FALSE;
3557
3558 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3559 calls[count++] = call;
3560
3561 qsort (calls, count, sizeof (*calls), sort_calls);
3562
3563 fun->call_list = NULL;
3564 while (count != 0)
3565 {
3566 --count;
3567 calls[count]->next = fun->call_list;
3568 fun->call_list = calls[count];
3569 }
3570 free (calls);
3571 }
3572
3573 for (call = fun->call_list; call != NULL; call = call->next)
3574 {
3575 if (call->is_pasted)
3576 {
3577 /* There can only be one is_pasted call per function_info. */
3578 BFD_ASSERT (!fun->sec->segment_mark);
3579 fun->sec->segment_mark = 1;
3580 }
3581 if (!call->broken_cycle
3582 && !mark_overlay_section (call->fun, info, param))
3583 return FALSE;
3584 }
3585
3586 /* Don't put entry code into an overlay. The overlay manager needs
3587 a stack! Also, don't mark .ovl.init as an overlay. */
3588 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3589 == info->output_bfd->start_address
3590 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3591 {
3592 fun->sec->linker_mark = 0;
3593 if (fun->rodata != NULL)
3594 fun->rodata->linker_mark = 0;
3595 }
3596 return TRUE;
3597 }
3598
3599 /* If non-zero then unmark functions called from those within sections
3600 that we need to unmark. Unfortunately this isn't reliable since the
3601 call graph cannot know the destination of function pointer calls. */
3602 #define RECURSE_UNMARK 0
3603
3604 struct _uos_param {
3605 asection *exclude_input_section;
3606 asection *exclude_output_section;
3607 unsigned long clearing;
3608 };
3609
3610 /* Undo some of mark_overlay_section's work. */
3611
3612 static bfd_boolean
3613 unmark_overlay_section (struct function_info *fun,
3614 struct bfd_link_info *info,
3615 void *param)
3616 {
3617 struct call_info *call;
3618 struct _uos_param *uos_param = param;
3619 unsigned int excluded = 0;
3620
3621 if (fun->visit5)
3622 return TRUE;
3623
3624 fun->visit5 = TRUE;
3625
3626 excluded = 0;
3627 if (fun->sec == uos_param->exclude_input_section
3628 || fun->sec->output_section == uos_param->exclude_output_section)
3629 excluded = 1;
3630
3631 if (RECURSE_UNMARK)
3632 uos_param->clearing += excluded;
3633
3634 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3635 {
3636 fun->sec->linker_mark = 0;
3637 if (fun->rodata)
3638 fun->rodata->linker_mark = 0;
3639 }
3640
3641 for (call = fun->call_list; call != NULL; call = call->next)
3642 if (!call->broken_cycle
3643 && !unmark_overlay_section (call->fun, info, param))
3644 return FALSE;
3645
3646 if (RECURSE_UNMARK)
3647 uos_param->clearing -= excluded;
3648 return TRUE;
3649 }
3650
3651 struct _cl_param {
3652 unsigned int lib_size;
3653 asection **lib_sections;
3654 };
3655
3656 /* Add sections we have marked as belonging to overlays to an array
3657 for consideration as non-overlay sections. The array consist of
3658 pairs of sections, (text,rodata), for functions in the call graph. */
3659
3660 static bfd_boolean
3661 collect_lib_sections (struct function_info *fun,
3662 struct bfd_link_info *info,
3663 void *param)
3664 {
3665 struct _cl_param *lib_param = param;
3666 struct call_info *call;
3667 unsigned int size;
3668
3669 if (fun->visit6)
3670 return TRUE;
3671
3672 fun->visit6 = TRUE;
3673 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3674 return TRUE;
3675
3676 size = fun->sec->size;
3677 if (fun->rodata)
3678 size += fun->rodata->size;
3679
3680 if (size <= lib_param->lib_size)
3681 {
3682 *lib_param->lib_sections++ = fun->sec;
3683 fun->sec->gc_mark = 0;
3684 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3685 {
3686 *lib_param->lib_sections++ = fun->rodata;
3687 fun->rodata->gc_mark = 0;
3688 }
3689 else
3690 *lib_param->lib_sections++ = NULL;
3691 }
3692
3693 for (call = fun->call_list; call != NULL; call = call->next)
3694 if (!call->broken_cycle)
3695 collect_lib_sections (call->fun, info, param);
3696
3697 return TRUE;
3698 }
3699
3700 /* qsort predicate to sort sections by call count. */
3701
3702 static int
3703 sort_lib (const void *a, const void *b)
3704 {
3705 asection *const *s1 = a;
3706 asection *const *s2 = b;
3707 struct _spu_elf_section_data *sec_data;
3708 struct spu_elf_stack_info *sinfo;
3709 int delta;
3710
3711 delta = 0;
3712 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3713 && (sinfo = sec_data->u.i.stack_info) != NULL)
3714 {
3715 int i;
3716 for (i = 0; i < sinfo->num_fun; ++i)
3717 delta -= sinfo->fun[i].call_count;
3718 }
3719
3720 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3721 && (sinfo = sec_data->u.i.stack_info) != NULL)
3722 {
3723 int i;
3724 for (i = 0; i < sinfo->num_fun; ++i)
3725 delta += sinfo->fun[i].call_count;
3726 }
3727
3728 if (delta != 0)
3729 return delta;
3730
3731 return s1 - s2;
3732 }
3733
3734 /* Remove some sections from those marked to be in overlays. Choose
3735 those that are called from many places, likely library functions. */
3736
3737 static unsigned int
3738 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3739 {
3740 bfd *ibfd;
3741 asection **lib_sections;
3742 unsigned int i, lib_count;
3743 struct _cl_param collect_lib_param;
3744 struct function_info dummy_caller;
3745 struct spu_link_hash_table *htab;
3746
3747 memset (&dummy_caller, 0, sizeof (dummy_caller));
3748 lib_count = 0;
3749 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3750 {
3751 extern const bfd_target spu_elf32_vec;
3752 asection *sec;
3753
3754 if (ibfd->xvec != &spu_elf32_vec)
3755 continue;
3756
3757 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3758 if (sec->linker_mark
3759 && sec->size < lib_size
3760 && (sec->flags & SEC_CODE) != 0)
3761 lib_count += 1;
3762 }
3763 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3764 if (lib_sections == NULL)
3765 return (unsigned int) -1;
3766 collect_lib_param.lib_size = lib_size;
3767 collect_lib_param.lib_sections = lib_sections;
3768 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3769 TRUE))
3770 return (unsigned int) -1;
3771 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3772
3773 /* Sort sections so that those with the most calls are first. */
3774 if (lib_count > 1)
3775 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3776
3777 htab = spu_hash_table (info);
3778 for (i = 0; i < lib_count; i++)
3779 {
3780 unsigned int tmp, stub_size;
3781 asection *sec;
3782 struct _spu_elf_section_data *sec_data;
3783 struct spu_elf_stack_info *sinfo;
3784
3785 sec = lib_sections[2 * i];
3786 /* If this section is OK, its size must be less than lib_size. */
3787 tmp = sec->size;
3788 /* If it has a rodata section, then add that too. */
3789 if (lib_sections[2 * i + 1])
3790 tmp += lib_sections[2 * i + 1]->size;
3791 /* Add any new overlay call stubs needed by the section. */
3792 stub_size = 0;
3793 if (tmp < lib_size
3794 && (sec_data = spu_elf_section_data (sec)) != NULL
3795 && (sinfo = sec_data->u.i.stack_info) != NULL)
3796 {
3797 int k;
3798 struct call_info *call;
3799
3800 for (k = 0; k < sinfo->num_fun; ++k)
3801 for (call = sinfo->fun[k].call_list; call; call = call->next)
3802 if (call->fun->sec->linker_mark)
3803 {
3804 struct call_info *p;
3805 for (p = dummy_caller.call_list; p; p = p->next)
3806 if (p->fun == call->fun)
3807 break;
3808 if (!p)
3809 stub_size += ovl_stub_size (htab->params);
3810 }
3811 }
3812 if (tmp + stub_size < lib_size)
3813 {
3814 struct call_info **pp, *p;
3815
3816 /* This section fits. Mark it as non-overlay. */
3817 lib_sections[2 * i]->linker_mark = 0;
3818 if (lib_sections[2 * i + 1])
3819 lib_sections[2 * i + 1]->linker_mark = 0;
3820 lib_size -= tmp + stub_size;
3821 /* Call stubs to the section we just added are no longer
3822 needed. */
3823 pp = &dummy_caller.call_list;
3824 while ((p = *pp) != NULL)
3825 if (!p->fun->sec->linker_mark)
3826 {
3827 lib_size += ovl_stub_size (htab->params);
3828 *pp = p->next;
3829 free (p);
3830 }
3831 else
3832 pp = &p->next;
3833 /* Add new call stubs to dummy_caller. */
3834 if ((sec_data = spu_elf_section_data (sec)) != NULL
3835 && (sinfo = sec_data->u.i.stack_info) != NULL)
3836 {
3837 int k;
3838 struct call_info *call;
3839
3840 for (k = 0; k < sinfo->num_fun; ++k)
3841 for (call = sinfo->fun[k].call_list;
3842 call;
3843 call = call->next)
3844 if (call->fun->sec->linker_mark)
3845 {
3846 struct call_info *callee;
3847 callee = bfd_malloc (sizeof (*callee));
3848 if (callee == NULL)
3849 return (unsigned int) -1;
3850 *callee = *call;
3851 if (!insert_callee (&dummy_caller, callee))
3852 free (callee);
3853 }
3854 }
3855 }
3856 }
3857 while (dummy_caller.call_list != NULL)
3858 {
3859 struct call_info *call = dummy_caller.call_list;
3860 dummy_caller.call_list = call->next;
3861 free (call);
3862 }
3863 for (i = 0; i < 2 * lib_count; i++)
3864 if (lib_sections[i])
3865 lib_sections[i]->gc_mark = 1;
3866 free (lib_sections);
3867 return lib_size;
3868 }
3869
3870 /* Build an array of overlay sections. The deepest node's section is
3871 added first, then its parent node's section, then everything called
3872 from the parent section. The idea being to group sections to
3873 minimise calls between different overlays. */
3874
3875 static bfd_boolean
3876 collect_overlays (struct function_info *fun,
3877 struct bfd_link_info *info,
3878 void *param)
3879 {
3880 struct call_info *call;
3881 bfd_boolean added_fun;
3882 asection ***ovly_sections = param;
3883
3884 if (fun->visit7)
3885 return TRUE;
3886
3887 fun->visit7 = TRUE;
3888 for (call = fun->call_list; call != NULL; call = call->next)
3889 if (!call->is_pasted && !call->broken_cycle)
3890 {
3891 if (!collect_overlays (call->fun, info, ovly_sections))
3892 return FALSE;
3893 break;
3894 }
3895
3896 added_fun = FALSE;
3897 if (fun->sec->linker_mark && fun->sec->gc_mark)
3898 {
3899 fun->sec->gc_mark = 0;
3900 *(*ovly_sections)++ = fun->sec;
3901 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3902 {
3903 fun->rodata->gc_mark = 0;
3904 *(*ovly_sections)++ = fun->rodata;
3905 }
3906 else
3907 *(*ovly_sections)++ = NULL;
3908 added_fun = TRUE;
3909
3910 /* Pasted sections must stay with the first section. We don't
3911 put pasted sections in the array, just the first section.
3912 Mark subsequent sections as already considered. */
3913 if (fun->sec->segment_mark)
3914 {
3915 struct function_info *call_fun = fun;
3916 do
3917 {
3918 for (call = call_fun->call_list; call != NULL; call = call->next)
3919 if (call->is_pasted)
3920 {
3921 call_fun = call->fun;
3922 call_fun->sec->gc_mark = 0;
3923 if (call_fun->rodata)
3924 call_fun->rodata->gc_mark = 0;
3925 break;
3926 }
3927 if (call == NULL)
3928 abort ();
3929 }
3930 while (call_fun->sec->segment_mark);
3931 }
3932 }
3933
3934 for (call = fun->call_list; call != NULL; call = call->next)
3935 if (!call->broken_cycle
3936 && !collect_overlays (call->fun, info, ovly_sections))
3937 return FALSE;
3938
3939 if (added_fun)
3940 {
3941 struct _spu_elf_section_data *sec_data;
3942 struct spu_elf_stack_info *sinfo;
3943
3944 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3945 && (sinfo = sec_data->u.i.stack_info) != NULL)
3946 {
3947 int i;
3948 for (i = 0; i < sinfo->num_fun; ++i)
3949 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3950 return FALSE;
3951 }
3952 }
3953
3954 return TRUE;
3955 }
3956
3957 struct _sum_stack_param {
3958 size_t cum_stack;
3959 size_t overall_stack;
3960 bfd_boolean emit_stack_syms;
3961 };
3962
3963 /* Descend the call graph for FUN, accumulating total stack required. */
3964
3965 static bfd_boolean
3966 sum_stack (struct function_info *fun,
3967 struct bfd_link_info *info,
3968 void *param)
3969 {
3970 struct call_info *call;
3971 struct function_info *max;
3972 size_t stack, cum_stack;
3973 const char *f1;
3974 bfd_boolean has_call;
3975 struct _sum_stack_param *sum_stack_param = param;
3976 struct spu_link_hash_table *htab;
3977
3978 cum_stack = fun->stack;
3979 sum_stack_param->cum_stack = cum_stack;
3980 if (fun->visit3)
3981 return TRUE;
3982
3983 has_call = FALSE;
3984 max = NULL;
3985 for (call = fun->call_list; call; call = call->next)
3986 {
3987 if (call->broken_cycle)
3988 continue;
3989 if (!call->is_pasted)
3990 has_call = TRUE;
3991 if (!sum_stack (call->fun, info, sum_stack_param))
3992 return FALSE;
3993 stack = sum_stack_param->cum_stack;
3994 /* Include caller stack for normal calls, don't do so for
3995 tail calls. fun->stack here is local stack usage for
3996 this function. */
3997 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3998 stack += fun->stack;
3999 if (cum_stack < stack)
4000 {
4001 cum_stack = stack;
4002 max = call->fun;
4003 }
4004 }
4005
4006 sum_stack_param->cum_stack = cum_stack;
4007 stack = fun->stack;
4008 /* Now fun->stack holds cumulative stack. */
4009 fun->stack = cum_stack;
4010 fun->visit3 = TRUE;
4011
4012 if (!fun->non_root
4013 && sum_stack_param->overall_stack < cum_stack)
4014 sum_stack_param->overall_stack = cum_stack;
4015
4016 htab = spu_hash_table (info);
4017 if (htab->params->auto_overlay)
4018 return TRUE;
4019
4020 f1 = func_name (fun);
4021 if (htab->params->stack_analysis)
4022 {
4023 if (!fun->non_root)
4024 info->callbacks->info (" %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4025 info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4026 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4027
4028 if (has_call)
4029 {
4030 info->callbacks->minfo (_(" calls:\n"));
4031 for (call = fun->call_list; call; call = call->next)
4032 if (!call->is_pasted && !call->broken_cycle)
4033 {
4034 const char *f2 = func_name (call->fun);
4035 const char *ann1 = call->fun == max ? "*" : " ";
4036 const char *ann2 = call->is_tail ? "t" : " ";
4037
4038 info->callbacks->minfo (" %s%s %s\n", ann1, ann2, f2);
4039 }
4040 }
4041 }
4042
4043 if (sum_stack_param->emit_stack_syms)
4044 {
4045 char *name = bfd_malloc (18 + strlen (f1));
4046 struct elf_link_hash_entry *h;
4047
4048 if (name == NULL)
4049 return FALSE;
4050
4051 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4052 sprintf (name, "__stack_%s", f1);
4053 else
4054 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4055
4056 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4057 free (name);
4058 if (h != NULL
4059 && (h->root.type == bfd_link_hash_new
4060 || h->root.type == bfd_link_hash_undefined
4061 || h->root.type == bfd_link_hash_undefweak))
4062 {
4063 h->root.type = bfd_link_hash_defined;
4064 h->root.u.def.section = bfd_abs_section_ptr;
4065 h->root.u.def.value = cum_stack;
4066 h->size = 0;
4067 h->type = 0;
4068 h->ref_regular = 1;
4069 h->def_regular = 1;
4070 h->ref_regular_nonweak = 1;
4071 h->forced_local = 1;
4072 h->non_elf = 0;
4073 }
4074 }
4075
4076 return TRUE;
4077 }
4078
4079 /* SEC is part of a pasted function. Return the call_info for the
4080 next section of this function. */
4081
4082 static struct call_info *
4083 find_pasted_call (asection *sec)
4084 {
4085 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4086 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4087 struct call_info *call;
4088 int k;
4089
4090 for (k = 0; k < sinfo->num_fun; ++k)
4091 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4092 if (call->is_pasted)
4093 return call;
4094 abort ();
4095 return 0;
4096 }
4097
4098 /* qsort predicate to sort bfds by file name. */
4099
4100 static int
4101 sort_bfds (const void *a, const void *b)
4102 {
4103 bfd *const *abfd1 = a;
4104 bfd *const *abfd2 = b;
4105
4106 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4107 }
4108
4109 static unsigned int
4110 print_one_overlay_section (FILE *script,
4111 unsigned int base,
4112 unsigned int count,
4113 unsigned int ovlynum,
4114 unsigned int *ovly_map,
4115 asection **ovly_sections,
4116 struct bfd_link_info *info)
4117 {
4118 unsigned int j;
4119
4120 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4121 {
4122 asection *sec = ovly_sections[2 * j];
4123
4124 if (fprintf (script, " %s%c%s (%s)\n",
4125 (sec->owner->my_archive != NULL
4126 ? sec->owner->my_archive->filename : ""),
4127 info->path_separator,
4128 sec->owner->filename,
4129 sec->name) <= 0)
4130 return -1;
4131 if (sec->segment_mark)
4132 {
4133 struct call_info *call = find_pasted_call (sec);
4134 while (call != NULL)
4135 {
4136 struct function_info *call_fun = call->fun;
4137 sec = call_fun->sec;
4138 if (fprintf (script, " %s%c%s (%s)\n",
4139 (sec->owner->my_archive != NULL
4140 ? sec->owner->my_archive->filename : ""),
4141 info->path_separator,
4142 sec->owner->filename,
4143 sec->name) <= 0)
4144 return -1;
4145 for (call = call_fun->call_list; call; call = call->next)
4146 if (call->is_pasted)
4147 break;
4148 }
4149 }
4150 }
4151
4152 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4153 {
4154 asection *sec = ovly_sections[2 * j + 1];
4155 if (sec != NULL
4156 && fprintf (script, " %s%c%s (%s)\n",
4157 (sec->owner->my_archive != NULL
4158 ? sec->owner->my_archive->filename : ""),
4159 info->path_separator,
4160 sec->owner->filename,
4161 sec->name) <= 0)
4162 return -1;
4163
4164 sec = ovly_sections[2 * j];
4165 if (sec->segment_mark)
4166 {
4167 struct call_info *call = find_pasted_call (sec);
4168 while (call != NULL)
4169 {
4170 struct function_info *call_fun = call->fun;
4171 sec = call_fun->rodata;
4172 if (sec != NULL
4173 && fprintf (script, " %s%c%s (%s)\n",
4174 (sec->owner->my_archive != NULL
4175 ? sec->owner->my_archive->filename : ""),
4176 info->path_separator,
4177 sec->owner->filename,
4178 sec->name) <= 0)
4179 return -1;
4180 for (call = call_fun->call_list; call; call = call->next)
4181 if (call->is_pasted)
4182 break;
4183 }
4184 }
4185 }
4186
4187 return j;
4188 }
4189
4190 /* Handle --auto-overlay. */
4191
4192 static void
4193 spu_elf_auto_overlay (struct bfd_link_info *info)
4194 {
4195 bfd *ibfd;
4196 bfd **bfd_arr;
4197 struct elf_segment_map *m;
4198 unsigned int fixed_size, lo, hi;
4199 unsigned int reserved;
4200 struct spu_link_hash_table *htab;
4201 unsigned int base, i, count, bfd_count;
4202 unsigned int region, ovlynum;
4203 asection **ovly_sections, **ovly_p;
4204 unsigned int *ovly_map;
4205 FILE *script;
4206 unsigned int total_overlay_size, overlay_size;
4207 const char *ovly_mgr_entry;
4208 struct elf_link_hash_entry *h;
4209 struct _mos_param mos_param;
4210 struct _uos_param uos_param;
4211 struct function_info dummy_caller;
4212
4213 /* Find the extents of our loadable image. */
4214 lo = (unsigned int) -1;
4215 hi = 0;
4216 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4217 if (m->p_type == PT_LOAD)
4218 for (i = 0; i < m->count; i++)
4219 if (m->sections[i]->size != 0)
4220 {
4221 if (m->sections[i]->vma < lo)
4222 lo = m->sections[i]->vma;
4223 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4224 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4225 }
4226 fixed_size = hi + 1 - lo;
4227
4228 if (!discover_functions (info))
4229 goto err_exit;
4230
4231 if (!build_call_tree (info))
4232 goto err_exit;
4233
4234 htab = spu_hash_table (info);
4235 reserved = htab->params->auto_overlay_reserved;
4236 if (reserved == 0)
4237 {
4238 struct _sum_stack_param sum_stack_param;
4239
4240 sum_stack_param.emit_stack_syms = 0;
4241 sum_stack_param.overall_stack = 0;
4242 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4243 goto err_exit;
4244 reserved = (sum_stack_param.overall_stack
4245 + htab->params->extra_stack_space);
4246 }
4247
4248 /* No need for overlays if everything already fits. */
4249 if (fixed_size + reserved <= htab->local_store
4250 && htab->params->ovly_flavour != ovly_soft_icache)
4251 {
4252 htab->params->auto_overlay = 0;
4253 return;
4254 }
4255
4256 uos_param.exclude_input_section = 0;
4257 uos_param.exclude_output_section
4258 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4259
4260 ovly_mgr_entry = "__ovly_load";
4261 if (htab->params->ovly_flavour == ovly_soft_icache)
4262 ovly_mgr_entry = "__icache_br_handler";
4263 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4264 FALSE, FALSE, FALSE);
4265 if (h != NULL
4266 && (h->root.type == bfd_link_hash_defined
4267 || h->root.type == bfd_link_hash_defweak)
4268 && h->def_regular)
4269 {
4270 /* We have a user supplied overlay manager. */
4271 uos_param.exclude_input_section = h->root.u.def.section;
4272 }
4273 else
4274 {
4275 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4276 builtin version to .text, and will adjust .text size. */
4277 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4278 }
4279
4280 /* Mark overlay sections, and find max overlay section size. */
4281 mos_param.max_overlay_size = 0;
4282 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4283 goto err_exit;
4284
4285 /* We can't put the overlay manager or interrupt routines in
4286 overlays. */
4287 uos_param.clearing = 0;
4288 if ((uos_param.exclude_input_section
4289 || uos_param.exclude_output_section)
4290 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4291 goto err_exit;
4292
4293 bfd_count = 0;
4294 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4295 ++bfd_count;
4296 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4297 if (bfd_arr == NULL)
4298 goto err_exit;
4299
4300 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4301 count = 0;
4302 bfd_count = 0;
4303 total_overlay_size = 0;
4304 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4305 {
4306 extern const bfd_target spu_elf32_vec;
4307 asection *sec;
4308 unsigned int old_count;
4309
4310 if (ibfd->xvec != &spu_elf32_vec)
4311 continue;
4312
4313 old_count = count;
4314 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4315 if (sec->linker_mark)
4316 {
4317 if ((sec->flags & SEC_CODE) != 0)
4318 count += 1;
4319 fixed_size -= sec->size;
4320 total_overlay_size += sec->size;
4321 }
4322 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4323 && sec->output_section->owner == info->output_bfd
4324 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4325 fixed_size -= sec->size;
4326 if (count != old_count)
4327 bfd_arr[bfd_count++] = ibfd;
4328 }
4329
4330 /* Since the overlay link script selects sections by file name and
4331 section name, ensure that file names are unique. */
4332 if (bfd_count > 1)
4333 {
4334 bfd_boolean ok = TRUE;
4335
4336 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4337 for (i = 1; i < bfd_count; ++i)
4338 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4339 {
4340 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4341 {
4342 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4343 /* xgettext:c-format */
4344 info->callbacks->einfo (_("%s duplicated in %s\n"),
4345 bfd_arr[i]->filename,
4346 bfd_arr[i]->my_archive->filename);
4347 else
4348 info->callbacks->einfo (_("%s duplicated\n"),
4349 bfd_arr[i]->filename);
4350 ok = FALSE;
4351 }
4352 }
4353 if (!ok)
4354 {
4355 info->callbacks->einfo (_("sorry, no support for duplicate "
4356 "object files in auto-overlay script\n"));
4357 bfd_set_error (bfd_error_bad_value);
4358 goto err_exit;
4359 }
4360 }
4361 free (bfd_arr);
4362
4363 fixed_size += reserved;
4364 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4365 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4366 {
4367 if (htab->params->ovly_flavour == ovly_soft_icache)
4368 {
4369 /* Stubs in the non-icache area are bigger. */
4370 fixed_size += htab->non_ovly_stub * 16;
4371 /* Space for icache manager tables.
4372 a) Tag array, one quadword per cache line.
4373 - word 0: ia address of present line, init to zero. */
4374 fixed_size += 16 << htab->num_lines_log2;
4375 /* b) Rewrite "to" list, one quadword per cache line. */
4376 fixed_size += 16 << htab->num_lines_log2;
4377 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4378 to a power-of-two number of full quadwords) per cache line. */
4379 fixed_size += 16 << (htab->fromelem_size_log2
4380 + htab->num_lines_log2);
4381 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4382 fixed_size += 16;
4383 }
4384 else
4385 {
4386 /* Guess number of overlays. Assuming overlay buffer is on
4387 average only half full should be conservative. */
4388 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4389 / (htab->local_store - fixed_size));
4390 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4391 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4392 }
4393 }
4394
4395 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4396 /* xgettext:c-format */
4397 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4398 "size of 0x%v exceeds local store\n"),
4399 (bfd_vma) fixed_size,
4400 (bfd_vma) mos_param.max_overlay_size);
4401
4402 /* Now see if we should put some functions in the non-overlay area. */
4403 else if (fixed_size < htab->params->auto_overlay_fixed)
4404 {
4405 unsigned int max_fixed, lib_size;
4406
4407 max_fixed = htab->local_store - mos_param.max_overlay_size;
4408 if (max_fixed > htab->params->auto_overlay_fixed)
4409 max_fixed = htab->params->auto_overlay_fixed;
4410 lib_size = max_fixed - fixed_size;
4411 lib_size = auto_ovl_lib_functions (info, lib_size);
4412 if (lib_size == (unsigned int) -1)
4413 goto err_exit;
4414 fixed_size = max_fixed - lib_size;
4415 }
4416
4417 /* Build an array of sections, suitably sorted to place into
4418 overlays. */
4419 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4420 if (ovly_sections == NULL)
4421 goto err_exit;
4422 ovly_p = ovly_sections;
4423 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4424 goto err_exit;
4425 count = (size_t) (ovly_p - ovly_sections) / 2;
4426 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4427 if (ovly_map == NULL)
4428 goto err_exit;
4429
4430 memset (&dummy_caller, 0, sizeof (dummy_caller));
4431 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4432 if (htab->params->line_size != 0)
4433 overlay_size = htab->params->line_size;
4434 base = 0;
4435 ovlynum = 0;
4436 while (base < count)
4437 {
4438 unsigned int size = 0, rosize = 0, roalign = 0;
4439
4440 for (i = base; i < count; i++)
4441 {
4442 asection *sec, *rosec;
4443 unsigned int tmp, rotmp;
4444 unsigned int num_stubs;
4445 struct call_info *call, *pasty;
4446 struct _spu_elf_section_data *sec_data;
4447 struct spu_elf_stack_info *sinfo;
4448 unsigned int k;
4449
4450 /* See whether we can add this section to the current
4451 overlay without overflowing our overlay buffer. */
4452 sec = ovly_sections[2 * i];
4453 tmp = align_power (size, sec->alignment_power) + sec->size;
4454 rotmp = rosize;
4455 rosec = ovly_sections[2 * i + 1];
4456 if (rosec != NULL)
4457 {
4458 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4459 if (roalign < rosec->alignment_power)
4460 roalign = rosec->alignment_power;
4461 }
4462 if (align_power (tmp, roalign) + rotmp > overlay_size)
4463 break;
4464 if (sec->segment_mark)
4465 {
4466 /* Pasted sections must stay together, so add their
4467 sizes too. */
4468 pasty = find_pasted_call (sec);
4469 while (pasty != NULL)
4470 {
4471 struct function_info *call_fun = pasty->fun;
4472 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4473 + call_fun->sec->size);
4474 if (call_fun->rodata)
4475 {
4476 rotmp = (align_power (rotmp,
4477 call_fun->rodata->alignment_power)
4478 + call_fun->rodata->size);
4479 if (roalign < rosec->alignment_power)
4480 roalign = rosec->alignment_power;
4481 }
4482 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4483 if (pasty->is_pasted)
4484 break;
4485 }
4486 }
4487 if (align_power (tmp, roalign) + rotmp > overlay_size)
4488 break;
4489
4490 /* If we add this section, we might need new overlay call
4491 stubs. Add any overlay section calls to dummy_call. */
4492 pasty = NULL;
4493 sec_data = spu_elf_section_data (sec);
4494 sinfo = sec_data->u.i.stack_info;
4495 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4496 for (call = sinfo->fun[k].call_list; call; call = call->next)
4497 if (call->is_pasted)
4498 {
4499 BFD_ASSERT (pasty == NULL);
4500 pasty = call;
4501 }
4502 else if (call->fun->sec->linker_mark)
4503 {
4504 if (!copy_callee (&dummy_caller, call))
4505 goto err_exit;
4506 }
4507 while (pasty != NULL)
4508 {
4509 struct function_info *call_fun = pasty->fun;
4510 pasty = NULL;
4511 for (call = call_fun->call_list; call; call = call->next)
4512 if (call->is_pasted)
4513 {
4514 BFD_ASSERT (pasty == NULL);
4515 pasty = call;
4516 }
4517 else if (!copy_callee (&dummy_caller, call))
4518 goto err_exit;
4519 }
4520
4521 /* Calculate call stub size. */
4522 num_stubs = 0;
4523 for (call = dummy_caller.call_list; call; call = call->next)
4524 {
4525 unsigned int stub_delta = 1;
4526
4527 if (htab->params->ovly_flavour == ovly_soft_icache)
4528 stub_delta = call->count;
4529 num_stubs += stub_delta;
4530
4531 /* If the call is within this overlay, we won't need a
4532 stub. */
4533 for (k = base; k < i + 1; k++)
4534 if (call->fun->sec == ovly_sections[2 * k])
4535 {
4536 num_stubs -= stub_delta;
4537 break;
4538 }
4539 }
4540 if (htab->params->ovly_flavour == ovly_soft_icache
4541 && num_stubs > htab->params->max_branch)
4542 break;
4543 if (align_power (tmp, roalign) + rotmp
4544 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4545 break;
4546 size = tmp;
4547 rosize = rotmp;
4548 }
4549
4550 if (i == base)
4551 {
4552 /* xgettext:c-format */
4553 info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4554 ovly_sections[2 * i]->owner,
4555 ovly_sections[2 * i],
4556 ovly_sections[2 * i + 1] ? " + rodata" : "");
4557 bfd_set_error (bfd_error_bad_value);
4558 goto err_exit;
4559 }
4560
4561 while (dummy_caller.call_list != NULL)
4562 {
4563 struct call_info *call = dummy_caller.call_list;
4564 dummy_caller.call_list = call->next;
4565 free (call);
4566 }
4567
4568 ++ovlynum;
4569 while (base < i)
4570 ovly_map[base++] = ovlynum;
4571 }
4572
4573 script = htab->params->spu_elf_open_overlay_script ();
4574
4575 if (htab->params->ovly_flavour == ovly_soft_icache)
4576 {
4577 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4578 goto file_err;
4579
4580 if (fprintf (script,
4581 " . = ALIGN (%u);\n"
4582 " .ovl.init : { *(.ovl.init) }\n"
4583 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4584 htab->params->line_size) <= 0)
4585 goto file_err;
4586
4587 base = 0;
4588 ovlynum = 1;
4589 while (base < count)
4590 {
4591 unsigned int indx = ovlynum - 1;
4592 unsigned int vma, lma;
4593
4594 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4595 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4596
4597 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4598 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4599 ovlynum, vma, lma) <= 0)
4600 goto file_err;
4601
4602 base = print_one_overlay_section (script, base, count, ovlynum,
4603 ovly_map, ovly_sections, info);
4604 if (base == (unsigned) -1)
4605 goto file_err;
4606
4607 if (fprintf (script, " }\n") <= 0)
4608 goto file_err;
4609
4610 ovlynum++;
4611 }
4612
4613 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4614 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4615 goto file_err;
4616
4617 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4618 goto file_err;
4619 }
4620 else
4621 {
4622 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4623 goto file_err;
4624
4625 if (fprintf (script,
4626 " . = ALIGN (16);\n"
4627 " .ovl.init : { *(.ovl.init) }\n"
4628 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4629 goto file_err;
4630
4631 for (region = 1; region <= htab->params->num_lines; region++)
4632 {
4633 ovlynum = region;
4634 base = 0;
4635 while (base < count && ovly_map[base] < ovlynum)
4636 base++;
4637
4638 if (base == count)
4639 break;
4640
4641 if (region == 1)
4642 {
4643 /* We need to set lma since we are overlaying .ovl.init. */
4644 if (fprintf (script,
4645 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4646 goto file_err;
4647 }
4648 else
4649 {
4650 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4651 goto file_err;
4652 }
4653
4654 while (base < count)
4655 {
4656 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4657 goto file_err;
4658
4659 base = print_one_overlay_section (script, base, count, ovlynum,
4660 ovly_map, ovly_sections, info);
4661 if (base == (unsigned) -1)
4662 goto file_err;
4663
4664 if (fprintf (script, " }\n") <= 0)
4665 goto file_err;
4666
4667 ovlynum += htab->params->num_lines;
4668 while (base < count && ovly_map[base] < ovlynum)
4669 base++;
4670 }
4671
4672 if (fprintf (script, " }\n") <= 0)
4673 goto file_err;
4674 }
4675
4676 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4677 goto file_err;
4678 }
4679
4680 free (ovly_map);
4681 free (ovly_sections);
4682
4683 if (fclose (script) != 0)
4684 goto file_err;
4685
4686 if (htab->params->auto_overlay & AUTO_RELINK)
4687 (*htab->params->spu_elf_relink) ();
4688
4689 xexit (0);
4690
4691 file_err:
4692 bfd_set_error (bfd_error_system_call);
4693 err_exit:
4694 info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4695 xexit (1);
4696 }
4697
4698 /* Provide an estimate of total stack required. */
4699
4700 static bfd_boolean
4701 spu_elf_stack_analysis (struct bfd_link_info *info)
4702 {
4703 struct spu_link_hash_table *htab;
4704 struct _sum_stack_param sum_stack_param;
4705
4706 if (!discover_functions (info))
4707 return FALSE;
4708
4709 if (!build_call_tree (info))
4710 return FALSE;
4711
4712 htab = spu_hash_table (info);
4713 if (htab->params->stack_analysis)
4714 {
4715 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4716 info->callbacks->minfo (_("\nStack size for functions. "
4717 "Annotations: '*' max stack, 't' tail call\n"));
4718 }
4719
4720 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4721 sum_stack_param.overall_stack = 0;
4722 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4723 return FALSE;
4724
4725 if (htab->params->stack_analysis)
4726 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4727 (bfd_vma) sum_stack_param.overall_stack);
4728 return TRUE;
4729 }
4730
4731 /* Perform a final link. */
4732
4733 static bfd_boolean
4734 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4735 {
4736 struct spu_link_hash_table *htab = spu_hash_table (info);
4737
4738 if (htab->params->auto_overlay)
4739 spu_elf_auto_overlay (info);
4740
4741 if ((htab->params->stack_analysis
4742 || (htab->params->ovly_flavour == ovly_soft_icache
4743 && htab->params->lrlive_analysis))
4744 && !spu_elf_stack_analysis (info))
4745 info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4746
4747 if (!spu_elf_build_stubs (info))
4748 info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4749
4750 return bfd_elf_final_link (output_bfd, info);
4751 }
4752
4753 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4754 and !info->emitrelocations. Returns a count of special relocs
4755 that need to be emitted. */
4756
4757 static unsigned int
4758 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4759 {
4760 Elf_Internal_Rela *relocs;
4761 unsigned int count = 0;
4762
4763 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4764 info->keep_memory);
4765 if (relocs != NULL)
4766 {
4767 Elf_Internal_Rela *rel;
4768 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4769
4770 for (rel = relocs; rel < relend; rel++)
4771 {
4772 int r_type = ELF32_R_TYPE (rel->r_info);
4773 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4774 ++count;
4775 }
4776
4777 if (elf_section_data (sec)->relocs != relocs)
4778 free (relocs);
4779 }
4780
4781 return count;
4782 }
4783
4784 /* Functions for adding fixup records to .fixup */
4785
4786 #define FIXUP_RECORD_SIZE 4
4787
4788 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4789 bfd_put_32 (output_bfd, addr, \
4790 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4791 #define FIXUP_GET(output_bfd,htab,index) \
4792 bfd_get_32 (output_bfd, \
4793 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4794
4795 /* Store OFFSET in .fixup. This assumes it will be called with an
4796 increasing OFFSET. When this OFFSET fits with the last base offset,
4797 it just sets a bit, otherwise it adds a new fixup record. */
4798 static void
4799 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4800 bfd_vma offset)
4801 {
4802 struct spu_link_hash_table *htab = spu_hash_table (info);
4803 asection *sfixup = htab->sfixup;
4804 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4805 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4806 if (sfixup->reloc_count == 0)
4807 {
4808 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4809 sfixup->reloc_count++;
4810 }
4811 else
4812 {
4813 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4814 if (qaddr != (base & ~(bfd_vma) 15))
4815 {
4816 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4817 _bfd_error_handler (_("fatal error while creating .fixup"));
4818 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4819 sfixup->reloc_count++;
4820 }
4821 else
4822 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4823 }
4824 }
4825
4826 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4827
4828 static int
4829 spu_elf_relocate_section (bfd *output_bfd,
4830 struct bfd_link_info *info,
4831 bfd *input_bfd,
4832 asection *input_section,
4833 bfd_byte *contents,
4834 Elf_Internal_Rela *relocs,
4835 Elf_Internal_Sym *local_syms,
4836 asection **local_sections)
4837 {
4838 Elf_Internal_Shdr *symtab_hdr;
4839 struct elf_link_hash_entry **sym_hashes;
4840 Elf_Internal_Rela *rel, *relend;
4841 struct spu_link_hash_table *htab;
4842 asection *ea;
4843 int ret = TRUE;
4844 bfd_boolean emit_these_relocs = FALSE;
4845 bfd_boolean is_ea_sym;
4846 bfd_boolean stubs;
4847 unsigned int iovl = 0;
4848
4849 htab = spu_hash_table (info);
4850 stubs = (htab->stub_sec != NULL
4851 && maybe_needs_stubs (input_section));
4852 iovl = overlay_index (input_section);
4853 ea = bfd_get_section_by_name (output_bfd, "._ea");
4854 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4855 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4856
4857 rel = relocs;
4858 relend = relocs + input_section->reloc_count;
4859 for (; rel < relend; rel++)
4860 {
4861 int r_type;
4862 reloc_howto_type *howto;
4863 unsigned int r_symndx;
4864 Elf_Internal_Sym *sym;
4865 asection *sec;
4866 struct elf_link_hash_entry *h;
4867 const char *sym_name;
4868 bfd_vma relocation;
4869 bfd_vma addend;
4870 bfd_reloc_status_type r;
4871 bfd_boolean unresolved_reloc;
4872 enum _stub_type stub_type;
4873
4874 r_symndx = ELF32_R_SYM (rel->r_info);
4875 r_type = ELF32_R_TYPE (rel->r_info);
4876 howto = elf_howto_table + r_type;
4877 unresolved_reloc = FALSE;
4878 h = NULL;
4879 sym = NULL;
4880 sec = NULL;
4881 if (r_symndx < symtab_hdr->sh_info)
4882 {
4883 sym = local_syms + r_symndx;
4884 sec = local_sections[r_symndx];
4885 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4886 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4887 }
4888 else
4889 {
4890 if (sym_hashes == NULL)
4891 return FALSE;
4892
4893 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4894
4895 if (info->wrap_hash != NULL
4896 && (input_section->flags & SEC_DEBUGGING) != 0)
4897 h = ((struct elf_link_hash_entry *)
4898 unwrap_hash_lookup (info, input_bfd, &h->root));
4899
4900 while (h->root.type == bfd_link_hash_indirect
4901 || h->root.type == bfd_link_hash_warning)
4902 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4903
4904 relocation = 0;
4905 if (h->root.type == bfd_link_hash_defined
4906 || h->root.type == bfd_link_hash_defweak)
4907 {
4908 sec = h->root.u.def.section;
4909 if (sec == NULL
4910 || sec->output_section == NULL)
4911 /* Set a flag that will be cleared later if we find a
4912 relocation value for this symbol. output_section
4913 is typically NULL for symbols satisfied by a shared
4914 library. */
4915 unresolved_reloc = TRUE;
4916 else
4917 relocation = (h->root.u.def.value
4918 + sec->output_section->vma
4919 + sec->output_offset);
4920 }
4921 else if (h->root.type == bfd_link_hash_undefweak)
4922 ;
4923 else if (info->unresolved_syms_in_objects == RM_IGNORE
4924 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4925 ;
4926 else if (!bfd_link_relocatable (info)
4927 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4928 {
4929 bfd_boolean err;
4930 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4931 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4932 (*info->callbacks->undefined_symbol) (info,
4933 h->root.root.string,
4934 input_bfd,
4935 input_section,
4936 rel->r_offset, err);
4937 }
4938 sym_name = h->root.root.string;
4939 }
4940
4941 if (sec != NULL && discarded_section (sec))
4942 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4943 rel, 1, relend, howto, 0, contents);
4944
4945 if (bfd_link_relocatable (info))
4946 continue;
4947
4948 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4949 if (r_type == R_SPU_ADD_PIC
4950 && h != NULL
4951 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4952 {
4953 bfd_byte *loc = contents + rel->r_offset;
4954 loc[0] = 0x1c;
4955 loc[1] = 0x00;
4956 loc[2] &= 0x3f;
4957 }
4958
4959 is_ea_sym = (ea != NULL
4960 && sec != NULL
4961 && sec->output_section == ea);
4962
4963 /* If this symbol is in an overlay area, we may need to relocate
4964 to the overlay stub. */
4965 addend = rel->r_addend;
4966 if (stubs
4967 && !is_ea_sym
4968 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4969 contents, info)) != no_stub)
4970 {
4971 unsigned int ovl = 0;
4972 struct got_entry *g, **head;
4973
4974 if (stub_type != nonovl_stub)
4975 ovl = iovl;
4976
4977 if (h != NULL)
4978 head = &h->got.glist;
4979 else
4980 head = elf_local_got_ents (input_bfd) + r_symndx;
4981
4982 for (g = *head; g != NULL; g = g->next)
4983 if (htab->params->ovly_flavour == ovly_soft_icache
4984 ? (g->ovl == ovl
4985 && g->br_addr == (rel->r_offset
4986 + input_section->output_offset
4987 + input_section->output_section->vma))
4988 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4989 break;
4990 if (g == NULL)
4991 abort ();
4992
4993 relocation = g->stub_addr;
4994 addend = 0;
4995 }
4996 else
4997 {
4998 /* For soft icache, encode the overlay index into addresses. */
4999 if (htab->params->ovly_flavour == ovly_soft_icache
5000 && (r_type == R_SPU_ADDR16_HI
5001 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
5002 && !is_ea_sym)
5003 {
5004 unsigned int ovl = overlay_index (sec);
5005 if (ovl != 0)
5006 {
5007 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5008 relocation += set_id << 18;
5009 }
5010 }
5011 }
5012
5013 if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5014 && (input_section->flags & SEC_ALLOC) != 0
5015 && r_type == R_SPU_ADDR32)
5016 {
5017 bfd_vma offset;
5018 offset = rel->r_offset + input_section->output_section->vma
5019 + input_section->output_offset;
5020 spu_elf_emit_fixup (output_bfd, info, offset);
5021 }
5022
5023 if (unresolved_reloc)
5024 ;
5025 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5026 {
5027 if (is_ea_sym)
5028 {
5029 /* ._ea is a special section that isn't allocated in SPU
5030 memory, but rather occupies space in PPU memory as
5031 part of an embedded ELF image. If this reloc is
5032 against a symbol defined in ._ea, then transform the
5033 reloc into an equivalent one without a symbol
5034 relative to the start of the ELF image. */
5035 rel->r_addend += (relocation
5036 - ea->vma
5037 + elf_section_data (ea)->this_hdr.sh_offset);
5038 rel->r_info = ELF32_R_INFO (0, r_type);
5039 }
5040 emit_these_relocs = TRUE;
5041 continue;
5042 }
5043 else if (is_ea_sym)
5044 unresolved_reloc = TRUE;
5045
5046 if (unresolved_reloc
5047 && _bfd_elf_section_offset (output_bfd, info, input_section,
5048 rel->r_offset) != (bfd_vma) -1)
5049 {
5050 _bfd_error_handler
5051 /* xgettext:c-format */
5052 (_("%pB(%s+%#" PRIx64 "): "
5053 "unresolvable %s relocation against symbol `%s'"),
5054 input_bfd,
5055 bfd_section_name (input_section),
5056 (uint64_t) rel->r_offset,
5057 howto->name,
5058 sym_name);
5059 ret = FALSE;
5060 }
5061
5062 r = _bfd_final_link_relocate (howto,
5063 input_bfd,
5064 input_section,
5065 contents,
5066 rel->r_offset, relocation, addend);
5067
5068 if (r != bfd_reloc_ok)
5069 {
5070 const char *msg = (const char *) 0;
5071
5072 switch (r)
5073 {
5074 case bfd_reloc_overflow:
5075 (*info->callbacks->reloc_overflow)
5076 (info, (h ? &h->root : NULL), sym_name, howto->name,
5077 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5078 break;
5079
5080 case bfd_reloc_undefined:
5081 (*info->callbacks->undefined_symbol)
5082 (info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
5083 break;
5084
5085 case bfd_reloc_outofrange:
5086 msg = _("internal error: out of range error");
5087 goto common_error;
5088
5089 case bfd_reloc_notsupported:
5090 msg = _("internal error: unsupported relocation error");
5091 goto common_error;
5092
5093 case bfd_reloc_dangerous:
5094 msg = _("internal error: dangerous error");
5095 goto common_error;
5096
5097 default:
5098 msg = _("internal error: unknown error");
5099 /* fall through */
5100
5101 common_error:
5102 ret = FALSE;
5103 (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5104 input_section, rel->r_offset);
5105 break;
5106 }
5107 }
5108 }
5109
5110 if (ret
5111 && emit_these_relocs
5112 && !info->emitrelocations)
5113 {
5114 Elf_Internal_Rela *wrel;
5115 Elf_Internal_Shdr *rel_hdr;
5116
5117 wrel = rel = relocs;
5118 relend = relocs + input_section->reloc_count;
5119 for (; rel < relend; rel++)
5120 {
5121 int r_type;
5122
5123 r_type = ELF32_R_TYPE (rel->r_info);
5124 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5125 *wrel++ = *rel;
5126 }
5127 input_section->reloc_count = wrel - relocs;
5128 /* Backflips for _bfd_elf_link_output_relocs. */
5129 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5130 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5131 ret = 2;
5132 }
5133
5134 return ret;
5135 }
5136
5137 static bfd_boolean
5138 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5139 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5140 {
5141 return TRUE;
5142 }
5143
5144 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5145
5146 static int
5147 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5148 const char *sym_name ATTRIBUTE_UNUSED,
5149 Elf_Internal_Sym *sym,
5150 asection *sym_sec ATTRIBUTE_UNUSED,
5151 struct elf_link_hash_entry *h)
5152 {
5153 struct spu_link_hash_table *htab = spu_hash_table (info);
5154
5155 if (!bfd_link_relocatable (info)
5156 && htab->stub_sec != NULL
5157 && h != NULL
5158 && (h->root.type == bfd_link_hash_defined
5159 || h->root.type == bfd_link_hash_defweak)
5160 && h->def_regular
5161 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5162 {
5163 struct got_entry *g;
5164
5165 for (g = h->got.glist; g != NULL; g = g->next)
5166 if (htab->params->ovly_flavour == ovly_soft_icache
5167 ? g->br_addr == g->stub_addr
5168 : g->addend == 0 && g->ovl == 0)
5169 {
5170 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5171 (htab->stub_sec[0]->output_section->owner,
5172 htab->stub_sec[0]->output_section));
5173 sym->st_value = g->stub_addr;
5174 break;
5175 }
5176 }
5177
5178 return 1;
5179 }
5180
5181 static int spu_plugin = 0;
5182
5183 void
5184 spu_elf_plugin (int val)
5185 {
5186 spu_plugin = val;
5187 }
5188
5189 /* Set ELF header e_type for plugins. */
5190
5191 static bfd_boolean
5192 spu_elf_init_file_header (bfd *abfd, struct bfd_link_info *info)
5193 {
5194 if (!_bfd_elf_init_file_header (abfd, info))
5195 return FALSE;
5196
5197 if (spu_plugin)
5198 {
5199 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5200
5201 i_ehdrp->e_type = ET_DYN;
5202 }
5203 return TRUE;
5204 }
5205
5206 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5207 segments for overlays. */
5208
5209 static int
5210 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5211 {
5212 int extra = 0;
5213 asection *sec;
5214
5215 if (info != NULL)
5216 {
5217 struct spu_link_hash_table *htab = spu_hash_table (info);
5218 extra = htab->num_overlays;
5219 }
5220
5221 if (extra)
5222 ++extra;
5223
5224 sec = bfd_get_section_by_name (abfd, ".toe");
5225 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5226 ++extra;
5227
5228 return extra;
5229 }
5230
5231 /* Remove .toe section from other PT_LOAD segments and put it in
5232 a segment of its own. Put overlays in separate segments too. */
5233
5234 static bfd_boolean
5235 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5236 {
5237 asection *toe, *s;
5238 struct elf_segment_map *m, *m_overlay;
5239 struct elf_segment_map **p, **p_overlay, **first_load;
5240 unsigned int i;
5241
5242 if (info == NULL)
5243 return TRUE;
5244
5245 toe = bfd_get_section_by_name (abfd, ".toe");
5246 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5247 if (m->p_type == PT_LOAD && m->count > 1)
5248 for (i = 0; i < m->count; i++)
5249 if ((s = m->sections[i]) == toe
5250 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5251 {
5252 struct elf_segment_map *m2;
5253 bfd_vma amt;
5254
5255 if (i + 1 < m->count)
5256 {
5257 amt = sizeof (struct elf_segment_map);
5258 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5259 m2 = bfd_zalloc (abfd, amt);
5260 if (m2 == NULL)
5261 return FALSE;
5262 m2->count = m->count - (i + 1);
5263 memcpy (m2->sections, m->sections + i + 1,
5264 m2->count * sizeof (m->sections[0]));
5265 m2->p_type = PT_LOAD;
5266 m2->next = m->next;
5267 m->next = m2;
5268 }
5269 m->count = 1;
5270 if (i != 0)
5271 {
5272 m->count = i;
5273 amt = sizeof (struct elf_segment_map);
5274 m2 = bfd_zalloc (abfd, amt);
5275 if (m2 == NULL)
5276 return FALSE;
5277 m2->p_type = PT_LOAD;
5278 m2->count = 1;
5279 m2->sections[0] = s;
5280 m2->next = m->next;
5281 m->next = m2;
5282 }
5283 break;
5284 }
5285
5286
5287 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5288 PT_LOAD segments. This can cause the .ovl.init section to be
5289 overwritten with the contents of some overlay segment. To work
5290 around this issue, we ensure that all PF_OVERLAY segments are
5291 sorted first amongst the program headers; this ensures that even
5292 with a broken loader, the .ovl.init section (which is not marked
5293 as PF_OVERLAY) will be placed into SPU local store on startup. */
5294
5295 /* Move all overlay segments onto a separate list. */
5296 p = &elf_seg_map (abfd);
5297 p_overlay = &m_overlay;
5298 m_overlay = NULL;
5299 first_load = NULL;
5300 while (*p != NULL)
5301 {
5302 if ((*p)->p_type == PT_LOAD)
5303 {
5304 if (!first_load)
5305 first_load = p;
5306 if ((*p)->count == 1
5307 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5308 {
5309 m = *p;
5310 m->no_sort_lma = 1;
5311 *p = m->next;
5312 *p_overlay = m;
5313 p_overlay = &m->next;
5314 continue;
5315 }
5316 }
5317 p = &((*p)->next);
5318 }
5319
5320 /* Re-insert overlay segments at the head of the segment map. */
5321 if (m_overlay != NULL)
5322 {
5323 p = first_load;
5324 if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
5325 /* It doesn't really make sense for someone to include the ELF
5326 file header into an spu image, but if they do the code that
5327 assigns p_offset needs to see the segment containing the
5328 header first. */
5329 p = &(*p)->next;
5330 *p_overlay = *p;
5331 *p = m_overlay;
5332 }
5333
5334 return TRUE;
5335 }
5336
5337 /* Tweak the section type of .note.spu_name. */
5338
5339 static bfd_boolean
5340 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5341 Elf_Internal_Shdr *hdr,
5342 asection *sec)
5343 {
5344 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5345 hdr->sh_type = SHT_NOTE;
5346 return TRUE;
5347 }
5348
5349 /* Tweak phdrs before writing them out. */
5350
5351 static int
5352 spu_elf_modify_headers (bfd *abfd, struct bfd_link_info *info)
5353 {
5354 if (info != NULL)
5355 {
5356 const struct elf_backend_data *bed;
5357 struct elf_obj_tdata *tdata;
5358 Elf_Internal_Phdr *phdr, *last;
5359 struct spu_link_hash_table *htab;
5360 unsigned int count;
5361 unsigned int i;
5362
5363 bed = get_elf_backend_data (abfd);
5364 tdata = elf_tdata (abfd);
5365 phdr = tdata->phdr;
5366 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5367 htab = spu_hash_table (info);
5368 if (htab->num_overlays != 0)
5369 {
5370 struct elf_segment_map *m;
5371 unsigned int o;
5372
5373 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5374 if (m->count != 0
5375 && ((o = spu_elf_section_data (m->sections[0])->u.o.ovl_index)
5376 != 0))
5377 {
5378 /* Mark this as an overlay header. */
5379 phdr[i].p_flags |= PF_OVERLAY;
5380
5381 if (htab->ovtab != NULL && htab->ovtab->size != 0
5382 && htab->params->ovly_flavour != ovly_soft_icache)
5383 {
5384 bfd_byte *p = htab->ovtab->contents;
5385 unsigned int off = o * 16 + 8;
5386
5387 /* Write file_off into _ovly_table. */
5388 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5389 }
5390 }
5391 /* Soft-icache has its file offset put in .ovl.init. */
5392 if (htab->init != NULL && htab->init->size != 0)
5393 {
5394 bfd_vma val
5395 = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5396
5397 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5398 }
5399 }
5400
5401 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5402 of 16. This should always be possible when using the standard
5403 linker scripts, but don't create overlapping segments if
5404 someone is playing games with linker scripts. */
5405 last = NULL;
5406 for (i = count; i-- != 0; )
5407 if (phdr[i].p_type == PT_LOAD)
5408 {
5409 unsigned adjust;
5410
5411 adjust = -phdr[i].p_filesz & 15;
5412 if (adjust != 0
5413 && last != NULL
5414 && (phdr[i].p_offset + phdr[i].p_filesz
5415 > last->p_offset - adjust))
5416 break;
5417
5418 adjust = -phdr[i].p_memsz & 15;
5419 if (adjust != 0
5420 && last != NULL
5421 && phdr[i].p_filesz != 0
5422 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5423 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5424 break;
5425
5426 if (phdr[i].p_filesz != 0)
5427 last = &phdr[i];
5428 }
5429
5430 if (i == (unsigned int) -1)
5431 for (i = count; i-- != 0; )
5432 if (phdr[i].p_type == PT_LOAD)
5433 {
5434 unsigned adjust;
5435
5436 adjust = -phdr[i].p_filesz & 15;
5437 phdr[i].p_filesz += adjust;
5438
5439 adjust = -phdr[i].p_memsz & 15;
5440 phdr[i].p_memsz += adjust;
5441 }
5442 }
5443
5444 return _bfd_elf_modify_headers (abfd, info);
5445 }
5446
5447 bfd_boolean
5448 spu_elf_size_sections (bfd *obfd ATTRIBUTE_UNUSED, struct bfd_link_info *info)
5449 {
5450 struct spu_link_hash_table *htab = spu_hash_table (info);
5451 if (htab->params->emit_fixups)
5452 {
5453 asection *sfixup = htab->sfixup;
5454 int fixup_count = 0;
5455 bfd *ibfd;
5456 size_t size;
5457
5458 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5459 {
5460 asection *isec;
5461
5462 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5463 continue;
5464
5465 /* Walk over each section attached to the input bfd. */
5466 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5467 {
5468 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5469 bfd_vma base_end;
5470
5471 /* If there aren't any relocs, then there's nothing more
5472 to do. */
5473 if ((isec->flags & SEC_ALLOC) == 0
5474 || (isec->flags & SEC_RELOC) == 0
5475 || isec->reloc_count == 0)
5476 continue;
5477
5478 /* Get the relocs. */
5479 internal_relocs =
5480 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5481 info->keep_memory);
5482 if (internal_relocs == NULL)
5483 return FALSE;
5484
5485 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5486 relocations. They are stored in a single word by
5487 saving the upper 28 bits of the address and setting the
5488 lower 4 bits to a bit mask of the words that have the
5489 relocation. BASE_END keeps track of the next quadword. */
5490 irela = internal_relocs;
5491 irelaend = irela + isec->reloc_count;
5492 base_end = 0;
5493 for (; irela < irelaend; irela++)
5494 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5495 && irela->r_offset >= base_end)
5496 {
5497 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5498 fixup_count++;
5499 }
5500 }
5501 }
5502
5503 /* We always have a NULL fixup as a sentinel */
5504 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5505 if (!bfd_set_section_size (sfixup, size))
5506 return FALSE;
5507 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5508 if (sfixup->contents == NULL)
5509 return FALSE;
5510 }
5511 return TRUE;
5512 }
5513
5514 #define TARGET_BIG_SYM spu_elf32_vec
5515 #define TARGET_BIG_NAME "elf32-spu"
5516 #define ELF_ARCH bfd_arch_spu
5517 #define ELF_TARGET_ID SPU_ELF_DATA
5518 #define ELF_MACHINE_CODE EM_SPU
5519 /* This matches the alignment need for DMA. */
5520 #define ELF_MAXPAGESIZE 0x80
5521 #define elf_backend_rela_normal 1
5522 #define elf_backend_can_gc_sections 1
5523
5524 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5525 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5526 #define elf_info_to_howto spu_elf_info_to_howto
5527 #define elf_backend_count_relocs spu_elf_count_relocs
5528 #define elf_backend_relocate_section spu_elf_relocate_section
5529 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5530 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5531 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5532 #define elf_backend_object_p spu_elf_object_p
5533 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5534 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5535
5536 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5537 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5538 #define elf_backend_modify_headers spu_elf_modify_headers
5539 #define elf_backend_init_file_header spu_elf_init_file_header
5540 #define elf_backend_fake_sections spu_elf_fake_sections
5541 #define elf_backend_special_sections spu_elf_special_sections
5542 #define bfd_elf32_bfd_final_link spu_elf_final_link
5543
5544 #include "elf32-target.h"
This page took 0.147542 seconds and 4 git commands to generate.