Fix memory access violations triggered by running objdump on fuzzed binaries.
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright (C) 2006-2015 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* We use RELA style relocs. Don't define USE_REL. */
31
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
94 };
95
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 { NULL, 0, 0, 0, 0 }
100 };
101
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 {
105 switch (code)
106 {
107 default:
108 return R_SPU_NONE;
109 case BFD_RELOC_SPU_IMM10W:
110 return R_SPU_ADDR10;
111 case BFD_RELOC_SPU_IMM16W:
112 return R_SPU_ADDR16;
113 case BFD_RELOC_SPU_LO16:
114 return R_SPU_ADDR16_LO;
115 case BFD_RELOC_SPU_HI16:
116 return R_SPU_ADDR16_HI;
117 case BFD_RELOC_SPU_IMM18:
118 return R_SPU_ADDR18;
119 case BFD_RELOC_SPU_PCREL16:
120 return R_SPU_REL16;
121 case BFD_RELOC_SPU_IMM7:
122 return R_SPU_ADDR7;
123 case BFD_RELOC_SPU_IMM8:
124 return R_SPU_NONE;
125 case BFD_RELOC_SPU_PCREL9a:
126 return R_SPU_REL9;
127 case BFD_RELOC_SPU_PCREL9b:
128 return R_SPU_REL9I;
129 case BFD_RELOC_SPU_IMM10:
130 return R_SPU_ADDR10I;
131 case BFD_RELOC_SPU_IMM16:
132 return R_SPU_ADDR16I;
133 case BFD_RELOC_32:
134 return R_SPU_ADDR32;
135 case BFD_RELOC_32_PCREL:
136 return R_SPU_REL32;
137 case BFD_RELOC_SPU_PPU32:
138 return R_SPU_PPU32;
139 case BFD_RELOC_SPU_PPU64:
140 return R_SPU_PPU64;
141 case BFD_RELOC_SPU_ADD_PIC:
142 return R_SPU_ADD_PIC;
143 }
144 }
145
146 static void
147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
148 arelent *cache_ptr,
149 Elf_Internal_Rela *dst)
150 {
151 enum elf_spu_reloc_type r_type;
152
153 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154 /* PR 17512: file: 90c2a92e. */
155 if (r_type >= R_SPU_max)
156 {
157 (*_bfd_error_handler) (_("%A: unrecognised SPU reloc number: %d"),
158 abfd, r_type);
159 bfd_set_error (bfd_error_bad_value);
160 r_type = R_SPU_NONE;
161 }
162 cache_ptr->howto = &elf_howto_table[(int) r_type];
163 }
164
165 static reloc_howto_type *
166 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167 bfd_reloc_code_real_type code)
168 {
169 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
170
171 if (r_type == R_SPU_NONE)
172 return NULL;
173
174 return elf_howto_table + r_type;
175 }
176
177 static reloc_howto_type *
178 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
179 const char *r_name)
180 {
181 unsigned int i;
182
183 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
184 if (elf_howto_table[i].name != NULL
185 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
186 return &elf_howto_table[i];
187
188 return NULL;
189 }
190
191 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
192
193 static bfd_reloc_status_type
194 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
195 void *data, asection *input_section,
196 bfd *output_bfd, char **error_message)
197 {
198 bfd_size_type octets;
199 bfd_vma val;
200 long insn;
201
202 /* If this is a relocatable link (output_bfd test tells us), just
203 call the generic function. Any adjustment will be done at final
204 link time. */
205 if (output_bfd != NULL)
206 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
207 input_section, output_bfd, error_message);
208
209 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
210 return bfd_reloc_outofrange;
211 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
212
213 /* Get symbol value. */
214 val = 0;
215 if (!bfd_is_com_section (symbol->section))
216 val = symbol->value;
217 if (symbol->section->output_section)
218 val += symbol->section->output_section->vma;
219
220 val += reloc_entry->addend;
221
222 /* Make it pc-relative. */
223 val -= input_section->output_section->vma + input_section->output_offset;
224
225 val >>= 2;
226 if (val + 256 >= 512)
227 return bfd_reloc_overflow;
228
229 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
230
231 /* Move two high bits of value to REL9I and REL9 position.
232 The mask will take care of selecting the right field. */
233 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
234 insn &= ~reloc_entry->howto->dst_mask;
235 insn |= val & reloc_entry->howto->dst_mask;
236 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
237 return bfd_reloc_ok;
238 }
239
240 static bfd_boolean
241 spu_elf_new_section_hook (bfd *abfd, asection *sec)
242 {
243 if (!sec->used_by_bfd)
244 {
245 struct _spu_elf_section_data *sdata;
246
247 sdata = bfd_zalloc (abfd, sizeof (*sdata));
248 if (sdata == NULL)
249 return FALSE;
250 sec->used_by_bfd = sdata;
251 }
252
253 return _bfd_elf_new_section_hook (abfd, sec);
254 }
255
256 /* Set up overlay info for executables. */
257
258 static bfd_boolean
259 spu_elf_object_p (bfd *abfd)
260 {
261 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
262 {
263 unsigned int i, num_ovl, num_buf;
264 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
265 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
266 Elf_Internal_Phdr *last_phdr = NULL;
267
268 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
269 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
270 {
271 unsigned int j;
272
273 ++num_ovl;
274 if (last_phdr == NULL
275 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
276 ++num_buf;
277 last_phdr = phdr;
278 for (j = 1; j < elf_numsections (abfd); j++)
279 {
280 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
281
282 if (ELF_SECTION_SIZE (shdr, phdr) != 0
283 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
284 {
285 asection *sec = shdr->bfd_section;
286 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
287 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
288 }
289 }
290 }
291 }
292 return TRUE;
293 }
294
295 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
296 strip --strip-unneeded will not remove them. */
297
298 static void
299 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
300 {
301 if (sym->name != NULL
302 && sym->section != bfd_abs_section_ptr
303 && strncmp (sym->name, "_EAR_", 5) == 0)
304 sym->flags |= BSF_KEEP;
305 }
306
307 /* SPU ELF linker hash table. */
308
309 struct spu_link_hash_table
310 {
311 struct elf_link_hash_table elf;
312
313 struct spu_elf_params *params;
314
315 /* Shortcuts to overlay sections. */
316 asection *ovtab;
317 asection *init;
318 asection *toe;
319 asection **ovl_sec;
320
321 /* Count of stubs in each overlay section. */
322 unsigned int *stub_count;
323
324 /* The stub section for each overlay section. */
325 asection **stub_sec;
326
327 struct elf_link_hash_entry *ovly_entry[2];
328
329 /* Number of overlay buffers. */
330 unsigned int num_buf;
331
332 /* Total number of overlays. */
333 unsigned int num_overlays;
334
335 /* For soft icache. */
336 unsigned int line_size_log2;
337 unsigned int num_lines_log2;
338 unsigned int fromelem_size_log2;
339
340 /* How much memory we have. */
341 unsigned int local_store;
342
343 /* Count of overlay stubs needed in non-overlay area. */
344 unsigned int non_ovly_stub;
345
346 /* Pointer to the fixup section */
347 asection *sfixup;
348
349 /* Set on error. */
350 unsigned int stub_err : 1;
351 };
352
353 /* Hijack the generic got fields for overlay stub accounting. */
354
355 struct got_entry
356 {
357 struct got_entry *next;
358 unsigned int ovl;
359 union {
360 bfd_vma addend;
361 bfd_vma br_addr;
362 };
363 bfd_vma stub_addr;
364 };
365
366 #define spu_hash_table(p) \
367 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
368 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
369
370 struct call_info
371 {
372 struct function_info *fun;
373 struct call_info *next;
374 unsigned int count;
375 unsigned int max_depth;
376 unsigned int is_tail : 1;
377 unsigned int is_pasted : 1;
378 unsigned int broken_cycle : 1;
379 unsigned int priority : 13;
380 };
381
382 struct function_info
383 {
384 /* List of functions called. Also branches to hot/cold part of
385 function. */
386 struct call_info *call_list;
387 /* For hot/cold part of function, point to owner. */
388 struct function_info *start;
389 /* Symbol at start of function. */
390 union {
391 Elf_Internal_Sym *sym;
392 struct elf_link_hash_entry *h;
393 } u;
394 /* Function section. */
395 asection *sec;
396 asection *rodata;
397 /* Where last called from, and number of sections called from. */
398 asection *last_caller;
399 unsigned int call_count;
400 /* Address range of (this part of) function. */
401 bfd_vma lo, hi;
402 /* Offset where we found a store of lr, or -1 if none found. */
403 bfd_vma lr_store;
404 /* Offset where we found the stack adjustment insn. */
405 bfd_vma sp_adjust;
406 /* Stack usage. */
407 int stack;
408 /* Distance from root of call tree. Tail and hot/cold branches
409 count as one deeper. We aren't counting stack frames here. */
410 unsigned int depth;
411 /* Set if global symbol. */
412 unsigned int global : 1;
413 /* Set if known to be start of function (as distinct from a hunk
414 in hot/cold section. */
415 unsigned int is_func : 1;
416 /* Set if not a root node. */
417 unsigned int non_root : 1;
418 /* Flags used during call tree traversal. It's cheaper to replicate
419 the visit flags than have one which needs clearing after a traversal. */
420 unsigned int visit1 : 1;
421 unsigned int visit2 : 1;
422 unsigned int marking : 1;
423 unsigned int visit3 : 1;
424 unsigned int visit4 : 1;
425 unsigned int visit5 : 1;
426 unsigned int visit6 : 1;
427 unsigned int visit7 : 1;
428 };
429
430 struct spu_elf_stack_info
431 {
432 int num_fun;
433 int max_fun;
434 /* Variable size array describing functions, one per contiguous
435 address range belonging to a function. */
436 struct function_info fun[1];
437 };
438
439 static struct function_info *find_function (asection *, bfd_vma,
440 struct bfd_link_info *);
441
442 /* Create a spu ELF linker hash table. */
443
444 static struct bfd_link_hash_table *
445 spu_elf_link_hash_table_create (bfd *abfd)
446 {
447 struct spu_link_hash_table *htab;
448
449 htab = bfd_zmalloc (sizeof (*htab));
450 if (htab == NULL)
451 return NULL;
452
453 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
454 _bfd_elf_link_hash_newfunc,
455 sizeof (struct elf_link_hash_entry),
456 SPU_ELF_DATA))
457 {
458 free (htab);
459 return NULL;
460 }
461
462 htab->elf.init_got_refcount.refcount = 0;
463 htab->elf.init_got_refcount.glist = NULL;
464 htab->elf.init_got_offset.offset = 0;
465 htab->elf.init_got_offset.glist = NULL;
466 return &htab->elf.root;
467 }
468
469 void
470 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
471 {
472 bfd_vma max_branch_log2;
473
474 struct spu_link_hash_table *htab = spu_hash_table (info);
475 htab->params = params;
476 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
477 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
478
479 /* For the software i-cache, we provide a "from" list whose size
480 is a power-of-two number of quadwords, big enough to hold one
481 byte per outgoing branch. Compute this number here. */
482 max_branch_log2 = bfd_log2 (htab->params->max_branch);
483 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
484 }
485
486 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
487 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
488 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
489
490 static bfd_boolean
491 get_sym_h (struct elf_link_hash_entry **hp,
492 Elf_Internal_Sym **symp,
493 asection **symsecp,
494 Elf_Internal_Sym **locsymsp,
495 unsigned long r_symndx,
496 bfd *ibfd)
497 {
498 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
499
500 if (r_symndx >= symtab_hdr->sh_info)
501 {
502 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
503 struct elf_link_hash_entry *h;
504
505 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
506 while (h->root.type == bfd_link_hash_indirect
507 || h->root.type == bfd_link_hash_warning)
508 h = (struct elf_link_hash_entry *) h->root.u.i.link;
509
510 if (hp != NULL)
511 *hp = h;
512
513 if (symp != NULL)
514 *symp = NULL;
515
516 if (symsecp != NULL)
517 {
518 asection *symsec = NULL;
519 if (h->root.type == bfd_link_hash_defined
520 || h->root.type == bfd_link_hash_defweak)
521 symsec = h->root.u.def.section;
522 *symsecp = symsec;
523 }
524 }
525 else
526 {
527 Elf_Internal_Sym *sym;
528 Elf_Internal_Sym *locsyms = *locsymsp;
529
530 if (locsyms == NULL)
531 {
532 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
533 if (locsyms == NULL)
534 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
535 symtab_hdr->sh_info,
536 0, NULL, NULL, NULL);
537 if (locsyms == NULL)
538 return FALSE;
539 *locsymsp = locsyms;
540 }
541 sym = locsyms + r_symndx;
542
543 if (hp != NULL)
544 *hp = NULL;
545
546 if (symp != NULL)
547 *symp = sym;
548
549 if (symsecp != NULL)
550 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
551 }
552
553 return TRUE;
554 }
555
556 /* Create the note section if not already present. This is done early so
557 that the linker maps the sections to the right place in the output. */
558
559 bfd_boolean
560 spu_elf_create_sections (struct bfd_link_info *info)
561 {
562 struct spu_link_hash_table *htab = spu_hash_table (info);
563 bfd *ibfd;
564
565 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
566 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
567 break;
568
569 if (ibfd == NULL)
570 {
571 /* Make SPU_PTNOTE_SPUNAME section. */
572 asection *s;
573 size_t name_len;
574 size_t size;
575 bfd_byte *data;
576 flagword flags;
577
578 ibfd = info->input_bfds;
579 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
580 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
581 if (s == NULL
582 || !bfd_set_section_alignment (ibfd, s, 4))
583 return FALSE;
584
585 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
586 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
587 size += (name_len + 3) & -4;
588
589 if (!bfd_set_section_size (ibfd, s, size))
590 return FALSE;
591
592 data = bfd_zalloc (ibfd, size);
593 if (data == NULL)
594 return FALSE;
595
596 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
597 bfd_put_32 (ibfd, name_len, data + 4);
598 bfd_put_32 (ibfd, 1, data + 8);
599 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
600 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
601 bfd_get_filename (info->output_bfd), name_len);
602 s->contents = data;
603 }
604
605 if (htab->params->emit_fixups)
606 {
607 asection *s;
608 flagword flags;
609
610 if (htab->elf.dynobj == NULL)
611 htab->elf.dynobj = ibfd;
612 ibfd = htab->elf.dynobj;
613 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
614 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
615 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
616 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
617 return FALSE;
618 htab->sfixup = s;
619 }
620
621 return TRUE;
622 }
623
624 /* qsort predicate to sort sections by vma. */
625
626 static int
627 sort_sections (const void *a, const void *b)
628 {
629 const asection *const *s1 = a;
630 const asection *const *s2 = b;
631 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
632
633 if (delta != 0)
634 return delta < 0 ? -1 : 1;
635
636 return (*s1)->index - (*s2)->index;
637 }
638
639 /* Identify overlays in the output bfd, and number them.
640 Returns 0 on error, 1 if no overlays, 2 if overlays. */
641
642 int
643 spu_elf_find_overlays (struct bfd_link_info *info)
644 {
645 struct spu_link_hash_table *htab = spu_hash_table (info);
646 asection **alloc_sec;
647 unsigned int i, n, ovl_index, num_buf;
648 asection *s;
649 bfd_vma ovl_end;
650 static const char *const entry_names[2][2] = {
651 { "__ovly_load", "__icache_br_handler" },
652 { "__ovly_return", "__icache_call_handler" }
653 };
654
655 if (info->output_bfd->section_count < 2)
656 return 1;
657
658 alloc_sec
659 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
660 if (alloc_sec == NULL)
661 return 0;
662
663 /* Pick out all the alloced sections. */
664 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
665 if ((s->flags & SEC_ALLOC) != 0
666 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
667 && s->size != 0)
668 alloc_sec[n++] = s;
669
670 if (n == 0)
671 {
672 free (alloc_sec);
673 return 1;
674 }
675
676 /* Sort them by vma. */
677 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
678
679 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
680 if (htab->params->ovly_flavour == ovly_soft_icache)
681 {
682 unsigned int prev_buf = 0, set_id = 0;
683
684 /* Look for an overlapping vma to find the first overlay section. */
685 bfd_vma vma_start = 0;
686
687 for (i = 1; i < n; i++)
688 {
689 s = alloc_sec[i];
690 if (s->vma < ovl_end)
691 {
692 asection *s0 = alloc_sec[i - 1];
693 vma_start = s0->vma;
694 ovl_end = (s0->vma
695 + ((bfd_vma) 1
696 << (htab->num_lines_log2 + htab->line_size_log2)));
697 --i;
698 break;
699 }
700 else
701 ovl_end = s->vma + s->size;
702 }
703
704 /* Now find any sections within the cache area. */
705 for (ovl_index = 0, num_buf = 0; i < n; i++)
706 {
707 s = alloc_sec[i];
708 if (s->vma >= ovl_end)
709 break;
710
711 /* A section in an overlay area called .ovl.init is not
712 an overlay, in the sense that it might be loaded in
713 by the overlay manager, but rather the initial
714 section contents for the overlay buffer. */
715 if (strncmp (s->name, ".ovl.init", 9) != 0)
716 {
717 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
718 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
719 prev_buf = num_buf;
720
721 if ((s->vma - vma_start) & (htab->params->line_size - 1))
722 {
723 info->callbacks->einfo (_("%X%P: overlay section %A "
724 "does not start on a cache line.\n"),
725 s);
726 bfd_set_error (bfd_error_bad_value);
727 return 0;
728 }
729 else if (s->size > htab->params->line_size)
730 {
731 info->callbacks->einfo (_("%X%P: overlay section %A "
732 "is larger than a cache line.\n"),
733 s);
734 bfd_set_error (bfd_error_bad_value);
735 return 0;
736 }
737
738 alloc_sec[ovl_index++] = s;
739 spu_elf_section_data (s)->u.o.ovl_index
740 = (set_id << htab->num_lines_log2) + num_buf;
741 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
742 }
743 }
744
745 /* Ensure there are no more overlay sections. */
746 for ( ; i < n; i++)
747 {
748 s = alloc_sec[i];
749 if (s->vma < ovl_end)
750 {
751 info->callbacks->einfo (_("%X%P: overlay section %A "
752 "is not in cache area.\n"),
753 alloc_sec[i-1]);
754 bfd_set_error (bfd_error_bad_value);
755 return 0;
756 }
757 else
758 ovl_end = s->vma + s->size;
759 }
760 }
761 else
762 {
763 /* Look for overlapping vmas. Any with overlap must be overlays.
764 Count them. Also count the number of overlay regions. */
765 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
766 {
767 s = alloc_sec[i];
768 if (s->vma < ovl_end)
769 {
770 asection *s0 = alloc_sec[i - 1];
771
772 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
773 {
774 ++num_buf;
775 if (strncmp (s0->name, ".ovl.init", 9) != 0)
776 {
777 alloc_sec[ovl_index] = s0;
778 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
779 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
780 }
781 else
782 ovl_end = s->vma + s->size;
783 }
784 if (strncmp (s->name, ".ovl.init", 9) != 0)
785 {
786 alloc_sec[ovl_index] = s;
787 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
788 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
789 if (s0->vma != s->vma)
790 {
791 info->callbacks->einfo (_("%X%P: overlay sections %A "
792 "and %A do not start at the "
793 "same address.\n"),
794 s0, s);
795 bfd_set_error (bfd_error_bad_value);
796 return 0;
797 }
798 if (ovl_end < s->vma + s->size)
799 ovl_end = s->vma + s->size;
800 }
801 }
802 else
803 ovl_end = s->vma + s->size;
804 }
805 }
806
807 htab->num_overlays = ovl_index;
808 htab->num_buf = num_buf;
809 htab->ovl_sec = alloc_sec;
810
811 if (ovl_index == 0)
812 return 1;
813
814 for (i = 0; i < 2; i++)
815 {
816 const char *name;
817 struct elf_link_hash_entry *h;
818
819 name = entry_names[i][htab->params->ovly_flavour];
820 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
821 if (h == NULL)
822 return 0;
823
824 if (h->root.type == bfd_link_hash_new)
825 {
826 h->root.type = bfd_link_hash_undefined;
827 h->ref_regular = 1;
828 h->ref_regular_nonweak = 1;
829 h->non_elf = 0;
830 }
831 htab->ovly_entry[i] = h;
832 }
833
834 return 2;
835 }
836
837 /* Non-zero to use bra in overlay stubs rather than br. */
838 #define BRA_STUBS 0
839
840 #define BRA 0x30000000
841 #define BRASL 0x31000000
842 #define BR 0x32000000
843 #define BRSL 0x33000000
844 #define NOP 0x40200000
845 #define LNOP 0x00200000
846 #define ILA 0x42000000
847
848 /* Return true for all relative and absolute branch instructions.
849 bra 00110000 0..
850 brasl 00110001 0..
851 br 00110010 0..
852 brsl 00110011 0..
853 brz 00100000 0..
854 brnz 00100001 0..
855 brhz 00100010 0..
856 brhnz 00100011 0.. */
857
858 static bfd_boolean
859 is_branch (const unsigned char *insn)
860 {
861 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
862 }
863
864 /* Return true for all indirect branch instructions.
865 bi 00110101 000
866 bisl 00110101 001
867 iret 00110101 010
868 bisled 00110101 011
869 biz 00100101 000
870 binz 00100101 001
871 bihz 00100101 010
872 bihnz 00100101 011 */
873
874 static bfd_boolean
875 is_indirect_branch (const unsigned char *insn)
876 {
877 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
878 }
879
880 /* Return true for branch hint instructions.
881 hbra 0001000..
882 hbrr 0001001.. */
883
884 static bfd_boolean
885 is_hint (const unsigned char *insn)
886 {
887 return (insn[0] & 0xfc) == 0x10;
888 }
889
890 /* True if INPUT_SECTION might need overlay stubs. */
891
892 static bfd_boolean
893 maybe_needs_stubs (asection *input_section)
894 {
895 /* No stubs for debug sections and suchlike. */
896 if ((input_section->flags & SEC_ALLOC) == 0)
897 return FALSE;
898
899 /* No stubs for link-once sections that will be discarded. */
900 if (input_section->output_section == bfd_abs_section_ptr)
901 return FALSE;
902
903 /* Don't create stubs for .eh_frame references. */
904 if (strcmp (input_section->name, ".eh_frame") == 0)
905 return FALSE;
906
907 return TRUE;
908 }
909
910 enum _stub_type
911 {
912 no_stub,
913 call_ovl_stub,
914 br000_ovl_stub,
915 br001_ovl_stub,
916 br010_ovl_stub,
917 br011_ovl_stub,
918 br100_ovl_stub,
919 br101_ovl_stub,
920 br110_ovl_stub,
921 br111_ovl_stub,
922 nonovl_stub,
923 stub_error
924 };
925
926 /* Return non-zero if this reloc symbol should go via an overlay stub.
927 Return 2 if the stub must be in non-overlay area. */
928
929 static enum _stub_type
930 needs_ovl_stub (struct elf_link_hash_entry *h,
931 Elf_Internal_Sym *sym,
932 asection *sym_sec,
933 asection *input_section,
934 Elf_Internal_Rela *irela,
935 bfd_byte *contents,
936 struct bfd_link_info *info)
937 {
938 struct spu_link_hash_table *htab = spu_hash_table (info);
939 enum elf_spu_reloc_type r_type;
940 unsigned int sym_type;
941 bfd_boolean branch, hint, call;
942 enum _stub_type ret = no_stub;
943 bfd_byte insn[4];
944
945 if (sym_sec == NULL
946 || sym_sec->output_section == bfd_abs_section_ptr
947 || spu_elf_section_data (sym_sec->output_section) == NULL)
948 return ret;
949
950 if (h != NULL)
951 {
952 /* Ensure no stubs for user supplied overlay manager syms. */
953 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
954 return ret;
955
956 /* setjmp always goes via an overlay stub, because then the return
957 and hence the longjmp goes via __ovly_return. That magically
958 makes setjmp/longjmp between overlays work. */
959 if (strncmp (h->root.root.string, "setjmp", 6) == 0
960 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
961 ret = call_ovl_stub;
962 }
963
964 if (h != NULL)
965 sym_type = h->type;
966 else
967 sym_type = ELF_ST_TYPE (sym->st_info);
968
969 r_type = ELF32_R_TYPE (irela->r_info);
970 branch = FALSE;
971 hint = FALSE;
972 call = FALSE;
973 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
974 {
975 if (contents == NULL)
976 {
977 contents = insn;
978 if (!bfd_get_section_contents (input_section->owner,
979 input_section,
980 contents,
981 irela->r_offset, 4))
982 return stub_error;
983 }
984 else
985 contents += irela->r_offset;
986
987 branch = is_branch (contents);
988 hint = is_hint (contents);
989 if (branch || hint)
990 {
991 call = (contents[0] & 0xfd) == 0x31;
992 if (call
993 && sym_type != STT_FUNC
994 && contents != insn)
995 {
996 /* It's common for people to write assembly and forget
997 to give function symbols the right type. Handle
998 calls to such symbols, but warn so that (hopefully)
999 people will fix their code. We need the symbol
1000 type to be correct to distinguish function pointer
1001 initialisation from other pointer initialisations. */
1002 const char *sym_name;
1003
1004 if (h != NULL)
1005 sym_name = h->root.root.string;
1006 else
1007 {
1008 Elf_Internal_Shdr *symtab_hdr;
1009 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1010 sym_name = bfd_elf_sym_name (input_section->owner,
1011 symtab_hdr,
1012 sym,
1013 sym_sec);
1014 }
1015 (*_bfd_error_handler) (_("warning: call to non-function"
1016 " symbol %s defined in %B"),
1017 sym_sec->owner, sym_name);
1018
1019 }
1020 }
1021 }
1022
1023 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1024 || (sym_type != STT_FUNC
1025 && !(branch || hint)
1026 && (sym_sec->flags & SEC_CODE) == 0))
1027 return no_stub;
1028
1029 /* Usually, symbols in non-overlay sections don't need stubs. */
1030 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1031 && !htab->params->non_overlay_stubs)
1032 return ret;
1033
1034 /* A reference from some other section to a symbol in an overlay
1035 section needs a stub. */
1036 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1037 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1038 {
1039 unsigned int lrlive = 0;
1040 if (branch)
1041 lrlive = (contents[1] & 0x70) >> 4;
1042
1043 if (!lrlive && (call || sym_type == STT_FUNC))
1044 ret = call_ovl_stub;
1045 else
1046 ret = br000_ovl_stub + lrlive;
1047 }
1048
1049 /* If this insn isn't a branch then we are possibly taking the
1050 address of a function and passing it out somehow. Soft-icache code
1051 always generates inline code to do indirect branches. */
1052 if (!(branch || hint)
1053 && sym_type == STT_FUNC
1054 && htab->params->ovly_flavour != ovly_soft_icache)
1055 ret = nonovl_stub;
1056
1057 return ret;
1058 }
1059
1060 static bfd_boolean
1061 count_stub (struct spu_link_hash_table *htab,
1062 bfd *ibfd,
1063 asection *isec,
1064 enum _stub_type stub_type,
1065 struct elf_link_hash_entry *h,
1066 const Elf_Internal_Rela *irela)
1067 {
1068 unsigned int ovl = 0;
1069 struct got_entry *g, **head;
1070 bfd_vma addend;
1071
1072 /* If this instruction is a branch or call, we need a stub
1073 for it. One stub per function per overlay.
1074 If it isn't a branch, then we are taking the address of
1075 this function so need a stub in the non-overlay area
1076 for it. One stub per function. */
1077 if (stub_type != nonovl_stub)
1078 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1079
1080 if (h != NULL)
1081 head = &h->got.glist;
1082 else
1083 {
1084 if (elf_local_got_ents (ibfd) == NULL)
1085 {
1086 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1087 * sizeof (*elf_local_got_ents (ibfd)));
1088 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1089 if (elf_local_got_ents (ibfd) == NULL)
1090 return FALSE;
1091 }
1092 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1093 }
1094
1095 if (htab->params->ovly_flavour == ovly_soft_icache)
1096 {
1097 htab->stub_count[ovl] += 1;
1098 return TRUE;
1099 }
1100
1101 addend = 0;
1102 if (irela != NULL)
1103 addend = irela->r_addend;
1104
1105 if (ovl == 0)
1106 {
1107 struct got_entry *gnext;
1108
1109 for (g = *head; g != NULL; g = g->next)
1110 if (g->addend == addend && g->ovl == 0)
1111 break;
1112
1113 if (g == NULL)
1114 {
1115 /* Need a new non-overlay area stub. Zap other stubs. */
1116 for (g = *head; g != NULL; g = gnext)
1117 {
1118 gnext = g->next;
1119 if (g->addend == addend)
1120 {
1121 htab->stub_count[g->ovl] -= 1;
1122 free (g);
1123 }
1124 }
1125 }
1126 }
1127 else
1128 {
1129 for (g = *head; g != NULL; g = g->next)
1130 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1131 break;
1132 }
1133
1134 if (g == NULL)
1135 {
1136 g = bfd_malloc (sizeof *g);
1137 if (g == NULL)
1138 return FALSE;
1139 g->ovl = ovl;
1140 g->addend = addend;
1141 g->stub_addr = (bfd_vma) -1;
1142 g->next = *head;
1143 *head = g;
1144
1145 htab->stub_count[ovl] += 1;
1146 }
1147
1148 return TRUE;
1149 }
1150
1151 /* Support two sizes of overlay stubs, a slower more compact stub of two
1152 instructions, and a faster stub of four instructions.
1153 Soft-icache stubs are four or eight words. */
1154
1155 static unsigned int
1156 ovl_stub_size (struct spu_elf_params *params)
1157 {
1158 return 16 << params->ovly_flavour >> params->compact_stub;
1159 }
1160
1161 static unsigned int
1162 ovl_stub_size_log2 (struct spu_elf_params *params)
1163 {
1164 return 4 + params->ovly_flavour - params->compact_stub;
1165 }
1166
1167 /* Two instruction overlay stubs look like:
1168
1169 brsl $75,__ovly_load
1170 .word target_ovl_and_address
1171
1172 ovl_and_address is a word with the overlay number in the top 14 bits
1173 and local store address in the bottom 18 bits.
1174
1175 Four instruction overlay stubs look like:
1176
1177 ila $78,ovl_number
1178 lnop
1179 ila $79,target_address
1180 br __ovly_load
1181
1182 Software icache stubs are:
1183
1184 .word target_index
1185 .word target_ia;
1186 .word lrlive_branchlocalstoreaddr;
1187 brasl $75,__icache_br_handler
1188 .quad xor_pattern
1189 */
1190
1191 static bfd_boolean
1192 build_stub (struct bfd_link_info *info,
1193 bfd *ibfd,
1194 asection *isec,
1195 enum _stub_type stub_type,
1196 struct elf_link_hash_entry *h,
1197 const Elf_Internal_Rela *irela,
1198 bfd_vma dest,
1199 asection *dest_sec)
1200 {
1201 struct spu_link_hash_table *htab = spu_hash_table (info);
1202 unsigned int ovl, dest_ovl, set_id;
1203 struct got_entry *g, **head;
1204 asection *sec;
1205 bfd_vma addend, from, to, br_dest, patt;
1206 unsigned int lrlive;
1207
1208 ovl = 0;
1209 if (stub_type != nonovl_stub)
1210 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1211
1212 if (h != NULL)
1213 head = &h->got.glist;
1214 else
1215 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1216
1217 addend = 0;
1218 if (irela != NULL)
1219 addend = irela->r_addend;
1220
1221 if (htab->params->ovly_flavour == ovly_soft_icache)
1222 {
1223 g = bfd_malloc (sizeof *g);
1224 if (g == NULL)
1225 return FALSE;
1226 g->ovl = ovl;
1227 g->br_addr = 0;
1228 if (irela != NULL)
1229 g->br_addr = (irela->r_offset
1230 + isec->output_offset
1231 + isec->output_section->vma);
1232 g->next = *head;
1233 *head = g;
1234 }
1235 else
1236 {
1237 for (g = *head; g != NULL; g = g->next)
1238 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1239 break;
1240 if (g == NULL)
1241 abort ();
1242
1243 if (g->ovl == 0 && ovl != 0)
1244 return TRUE;
1245
1246 if (g->stub_addr != (bfd_vma) -1)
1247 return TRUE;
1248 }
1249
1250 sec = htab->stub_sec[ovl];
1251 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1252 from = sec->size + sec->output_offset + sec->output_section->vma;
1253 g->stub_addr = from;
1254 to = (htab->ovly_entry[0]->root.u.def.value
1255 + htab->ovly_entry[0]->root.u.def.section->output_offset
1256 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1257
1258 if (((dest | to | from) & 3) != 0)
1259 {
1260 htab->stub_err = 1;
1261 return FALSE;
1262 }
1263 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1264
1265 if (htab->params->ovly_flavour == ovly_normal
1266 && !htab->params->compact_stub)
1267 {
1268 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1269 sec->contents + sec->size);
1270 bfd_put_32 (sec->owner, LNOP,
1271 sec->contents + sec->size + 4);
1272 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1273 sec->contents + sec->size + 8);
1274 if (!BRA_STUBS)
1275 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1276 sec->contents + sec->size + 12);
1277 else
1278 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1279 sec->contents + sec->size + 12);
1280 }
1281 else if (htab->params->ovly_flavour == ovly_normal
1282 && htab->params->compact_stub)
1283 {
1284 if (!BRA_STUBS)
1285 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1286 sec->contents + sec->size);
1287 else
1288 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1289 sec->contents + sec->size);
1290 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1291 sec->contents + sec->size + 4);
1292 }
1293 else if (htab->params->ovly_flavour == ovly_soft_icache
1294 && htab->params->compact_stub)
1295 {
1296 lrlive = 0;
1297 if (stub_type == nonovl_stub)
1298 ;
1299 else if (stub_type == call_ovl_stub)
1300 /* A brsl makes lr live and *(*sp+16) is live.
1301 Tail calls have the same liveness. */
1302 lrlive = 5;
1303 else if (!htab->params->lrlive_analysis)
1304 /* Assume stack frame and lr save. */
1305 lrlive = 1;
1306 else if (irela != NULL)
1307 {
1308 /* Analyse branch instructions. */
1309 struct function_info *caller;
1310 bfd_vma off;
1311
1312 caller = find_function (isec, irela->r_offset, info);
1313 if (caller->start == NULL)
1314 off = irela->r_offset;
1315 else
1316 {
1317 struct function_info *found = NULL;
1318
1319 /* Find the earliest piece of this function that
1320 has frame adjusting instructions. We might
1321 see dynamic frame adjustment (eg. for alloca)
1322 in some later piece, but functions using
1323 alloca always set up a frame earlier. Frame
1324 setup instructions are always in one piece. */
1325 if (caller->lr_store != (bfd_vma) -1
1326 || caller->sp_adjust != (bfd_vma) -1)
1327 found = caller;
1328 while (caller->start != NULL)
1329 {
1330 caller = caller->start;
1331 if (caller->lr_store != (bfd_vma) -1
1332 || caller->sp_adjust != (bfd_vma) -1)
1333 found = caller;
1334 }
1335 if (found != NULL)
1336 caller = found;
1337 off = (bfd_vma) -1;
1338 }
1339
1340 if (off > caller->sp_adjust)
1341 {
1342 if (off > caller->lr_store)
1343 /* Only *(*sp+16) is live. */
1344 lrlive = 1;
1345 else
1346 /* If no lr save, then we must be in a
1347 leaf function with a frame.
1348 lr is still live. */
1349 lrlive = 4;
1350 }
1351 else if (off > caller->lr_store)
1352 {
1353 /* Between lr save and stack adjust. */
1354 lrlive = 3;
1355 /* This should never happen since prologues won't
1356 be split here. */
1357 BFD_ASSERT (0);
1358 }
1359 else
1360 /* On entry to function. */
1361 lrlive = 5;
1362
1363 if (stub_type != br000_ovl_stub
1364 && lrlive != stub_type - br000_ovl_stub)
1365 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1366 "from analysis (%u)\n"),
1367 isec, irela->r_offset, lrlive,
1368 stub_type - br000_ovl_stub);
1369 }
1370
1371 /* If given lrlive info via .brinfo, use it. */
1372 if (stub_type > br000_ovl_stub)
1373 lrlive = stub_type - br000_ovl_stub;
1374
1375 if (ovl == 0)
1376 to = (htab->ovly_entry[1]->root.u.def.value
1377 + htab->ovly_entry[1]->root.u.def.section->output_offset
1378 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1379
1380 /* The branch that uses this stub goes to stub_addr + 4. We'll
1381 set up an xor pattern that can be used by the icache manager
1382 to modify this branch to go directly to its destination. */
1383 g->stub_addr += 4;
1384 br_dest = g->stub_addr;
1385 if (irela == NULL)
1386 {
1387 /* Except in the case of _SPUEAR_ stubs, the branch in
1388 question is the one in the stub itself. */
1389 BFD_ASSERT (stub_type == nonovl_stub);
1390 g->br_addr = g->stub_addr;
1391 br_dest = to;
1392 }
1393
1394 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1395 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1396 sec->contents + sec->size);
1397 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1398 sec->contents + sec->size + 4);
1399 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1400 sec->contents + sec->size + 8);
1401 patt = dest ^ br_dest;
1402 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1403 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1404 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1405 sec->contents + sec->size + 12);
1406
1407 if (ovl == 0)
1408 /* Extra space for linked list entries. */
1409 sec->size += 16;
1410 }
1411 else
1412 abort ();
1413
1414 sec->size += ovl_stub_size (htab->params);
1415
1416 if (htab->params->emit_stub_syms)
1417 {
1418 size_t len;
1419 char *name;
1420 int add;
1421
1422 len = 8 + sizeof (".ovl_call.") - 1;
1423 if (h != NULL)
1424 len += strlen (h->root.root.string);
1425 else
1426 len += 8 + 1 + 8;
1427 add = 0;
1428 if (irela != NULL)
1429 add = (int) irela->r_addend & 0xffffffff;
1430 if (add != 0)
1431 len += 1 + 8;
1432 name = bfd_malloc (len + 1);
1433 if (name == NULL)
1434 return FALSE;
1435
1436 sprintf (name, "%08x.ovl_call.", g->ovl);
1437 if (h != NULL)
1438 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1439 else
1440 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1441 dest_sec->id & 0xffffffff,
1442 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1443 if (add != 0)
1444 sprintf (name + len - 9, "+%x", add);
1445
1446 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1447 free (name);
1448 if (h == NULL)
1449 return FALSE;
1450 if (h->root.type == bfd_link_hash_new)
1451 {
1452 h->root.type = bfd_link_hash_defined;
1453 h->root.u.def.section = sec;
1454 h->size = ovl_stub_size (htab->params);
1455 h->root.u.def.value = sec->size - h->size;
1456 h->type = STT_FUNC;
1457 h->ref_regular = 1;
1458 h->def_regular = 1;
1459 h->ref_regular_nonweak = 1;
1460 h->forced_local = 1;
1461 h->non_elf = 0;
1462 }
1463 }
1464
1465 return TRUE;
1466 }
1467
1468 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1469 symbols. */
1470
1471 static bfd_boolean
1472 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1473 {
1474 /* Symbols starting with _SPUEAR_ need a stub because they may be
1475 invoked by the PPU. */
1476 struct bfd_link_info *info = inf;
1477 struct spu_link_hash_table *htab = spu_hash_table (info);
1478 asection *sym_sec;
1479
1480 if ((h->root.type == bfd_link_hash_defined
1481 || h->root.type == bfd_link_hash_defweak)
1482 && h->def_regular
1483 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1484 && (sym_sec = h->root.u.def.section) != NULL
1485 && sym_sec->output_section != bfd_abs_section_ptr
1486 && spu_elf_section_data (sym_sec->output_section) != NULL
1487 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1488 || htab->params->non_overlay_stubs))
1489 {
1490 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1491 }
1492
1493 return TRUE;
1494 }
1495
1496 static bfd_boolean
1497 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1498 {
1499 /* Symbols starting with _SPUEAR_ need a stub because they may be
1500 invoked by the PPU. */
1501 struct bfd_link_info *info = inf;
1502 struct spu_link_hash_table *htab = spu_hash_table (info);
1503 asection *sym_sec;
1504
1505 if ((h->root.type == bfd_link_hash_defined
1506 || h->root.type == bfd_link_hash_defweak)
1507 && h->def_regular
1508 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1509 && (sym_sec = h->root.u.def.section) != NULL
1510 && sym_sec->output_section != bfd_abs_section_ptr
1511 && spu_elf_section_data (sym_sec->output_section) != NULL
1512 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1513 || htab->params->non_overlay_stubs))
1514 {
1515 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1516 h->root.u.def.value, sym_sec);
1517 }
1518
1519 return TRUE;
1520 }
1521
1522 /* Size or build stubs. */
1523
1524 static bfd_boolean
1525 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1526 {
1527 struct spu_link_hash_table *htab = spu_hash_table (info);
1528 bfd *ibfd;
1529
1530 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1531 {
1532 extern const bfd_target spu_elf32_vec;
1533 Elf_Internal_Shdr *symtab_hdr;
1534 asection *isec;
1535 Elf_Internal_Sym *local_syms = NULL;
1536
1537 if (ibfd->xvec != &spu_elf32_vec)
1538 continue;
1539
1540 /* We'll need the symbol table in a second. */
1541 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1542 if (symtab_hdr->sh_info == 0)
1543 continue;
1544
1545 /* Walk over each section attached to the input bfd. */
1546 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1547 {
1548 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1549
1550 /* If there aren't any relocs, then there's nothing more to do. */
1551 if ((isec->flags & SEC_RELOC) == 0
1552 || isec->reloc_count == 0)
1553 continue;
1554
1555 if (!maybe_needs_stubs (isec))
1556 continue;
1557
1558 /* Get the relocs. */
1559 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1560 info->keep_memory);
1561 if (internal_relocs == NULL)
1562 goto error_ret_free_local;
1563
1564 /* Now examine each relocation. */
1565 irela = internal_relocs;
1566 irelaend = irela + isec->reloc_count;
1567 for (; irela < irelaend; irela++)
1568 {
1569 enum elf_spu_reloc_type r_type;
1570 unsigned int r_indx;
1571 asection *sym_sec;
1572 Elf_Internal_Sym *sym;
1573 struct elf_link_hash_entry *h;
1574 enum _stub_type stub_type;
1575
1576 r_type = ELF32_R_TYPE (irela->r_info);
1577 r_indx = ELF32_R_SYM (irela->r_info);
1578
1579 if (r_type >= R_SPU_max)
1580 {
1581 bfd_set_error (bfd_error_bad_value);
1582 error_ret_free_internal:
1583 if (elf_section_data (isec)->relocs != internal_relocs)
1584 free (internal_relocs);
1585 error_ret_free_local:
1586 if (local_syms != NULL
1587 && (symtab_hdr->contents
1588 != (unsigned char *) local_syms))
1589 free (local_syms);
1590 return FALSE;
1591 }
1592
1593 /* Determine the reloc target section. */
1594 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1595 goto error_ret_free_internal;
1596
1597 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1598 NULL, info);
1599 if (stub_type == no_stub)
1600 continue;
1601 else if (stub_type == stub_error)
1602 goto error_ret_free_internal;
1603
1604 if (htab->stub_count == NULL)
1605 {
1606 bfd_size_type amt;
1607 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1608 htab->stub_count = bfd_zmalloc (amt);
1609 if (htab->stub_count == NULL)
1610 goto error_ret_free_internal;
1611 }
1612
1613 if (!build)
1614 {
1615 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1616 goto error_ret_free_internal;
1617 }
1618 else
1619 {
1620 bfd_vma dest;
1621
1622 if (h != NULL)
1623 dest = h->root.u.def.value;
1624 else
1625 dest = sym->st_value;
1626 dest += irela->r_addend;
1627 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1628 dest, sym_sec))
1629 goto error_ret_free_internal;
1630 }
1631 }
1632
1633 /* We're done with the internal relocs, free them. */
1634 if (elf_section_data (isec)->relocs != internal_relocs)
1635 free (internal_relocs);
1636 }
1637
1638 if (local_syms != NULL
1639 && symtab_hdr->contents != (unsigned char *) local_syms)
1640 {
1641 if (!info->keep_memory)
1642 free (local_syms);
1643 else
1644 symtab_hdr->contents = (unsigned char *) local_syms;
1645 }
1646 }
1647
1648 return TRUE;
1649 }
1650
1651 /* Allocate space for overlay call and return stubs.
1652 Return 0 on error, 1 if no overlays, 2 otherwise. */
1653
1654 int
1655 spu_elf_size_stubs (struct bfd_link_info *info)
1656 {
1657 struct spu_link_hash_table *htab;
1658 bfd *ibfd;
1659 bfd_size_type amt;
1660 flagword flags;
1661 unsigned int i;
1662 asection *stub;
1663
1664 if (!process_stubs (info, FALSE))
1665 return 0;
1666
1667 htab = spu_hash_table (info);
1668 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1669 if (htab->stub_err)
1670 return 0;
1671
1672 ibfd = info->input_bfds;
1673 if (htab->stub_count != NULL)
1674 {
1675 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1676 htab->stub_sec = bfd_zmalloc (amt);
1677 if (htab->stub_sec == NULL)
1678 return 0;
1679
1680 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1681 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1682 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1683 htab->stub_sec[0] = stub;
1684 if (stub == NULL
1685 || !bfd_set_section_alignment (ibfd, stub,
1686 ovl_stub_size_log2 (htab->params)))
1687 return 0;
1688 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1689 if (htab->params->ovly_flavour == ovly_soft_icache)
1690 /* Extra space for linked list entries. */
1691 stub->size += htab->stub_count[0] * 16;
1692
1693 for (i = 0; i < htab->num_overlays; ++i)
1694 {
1695 asection *osec = htab->ovl_sec[i];
1696 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1697 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1698 htab->stub_sec[ovl] = stub;
1699 if (stub == NULL
1700 || !bfd_set_section_alignment (ibfd, stub,
1701 ovl_stub_size_log2 (htab->params)))
1702 return 0;
1703 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1704 }
1705 }
1706
1707 if (htab->params->ovly_flavour == ovly_soft_icache)
1708 {
1709 /* Space for icache manager tables.
1710 a) Tag array, one quadword per cache line.
1711 b) Rewrite "to" list, one quadword per cache line.
1712 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1713 a power-of-two number of full quadwords) per cache line. */
1714
1715 flags = SEC_ALLOC;
1716 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1717 if (htab->ovtab == NULL
1718 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1719 return 0;
1720
1721 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1722 << htab->num_lines_log2;
1723
1724 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1725 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1726 if (htab->init == NULL
1727 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1728 return 0;
1729
1730 htab->init->size = 16;
1731 }
1732 else if (htab->stub_count == NULL)
1733 return 1;
1734 else
1735 {
1736 /* htab->ovtab consists of two arrays.
1737 . struct {
1738 . u32 vma;
1739 . u32 size;
1740 . u32 file_off;
1741 . u32 buf;
1742 . } _ovly_table[];
1743 .
1744 . struct {
1745 . u32 mapped;
1746 . } _ovly_buf_table[];
1747 . */
1748
1749 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1750 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1751 if (htab->ovtab == NULL
1752 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1753 return 0;
1754
1755 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1756 }
1757
1758 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1759 if (htab->toe == NULL
1760 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1761 return 0;
1762 htab->toe->size = 16;
1763
1764 return 2;
1765 }
1766
1767 /* Called from ld to place overlay manager data sections. This is done
1768 after the overlay manager itself is loaded, mainly so that the
1769 linker's htab->init section is placed after any other .ovl.init
1770 sections. */
1771
1772 void
1773 spu_elf_place_overlay_data (struct bfd_link_info *info)
1774 {
1775 struct spu_link_hash_table *htab = spu_hash_table (info);
1776 unsigned int i;
1777
1778 if (htab->stub_sec != NULL)
1779 {
1780 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1781
1782 for (i = 0; i < htab->num_overlays; ++i)
1783 {
1784 asection *osec = htab->ovl_sec[i];
1785 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1786 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1787 }
1788 }
1789
1790 if (htab->params->ovly_flavour == ovly_soft_icache)
1791 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1792
1793 if (htab->ovtab != NULL)
1794 {
1795 const char *ovout = ".data";
1796 if (htab->params->ovly_flavour == ovly_soft_icache)
1797 ovout = ".bss";
1798 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1799 }
1800
1801 if (htab->toe != NULL)
1802 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1803 }
1804
1805 /* Functions to handle embedded spu_ovl.o object. */
1806
1807 static void *
1808 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1809 {
1810 return stream;
1811 }
1812
1813 static file_ptr
1814 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1815 void *stream,
1816 void *buf,
1817 file_ptr nbytes,
1818 file_ptr offset)
1819 {
1820 struct _ovl_stream *os;
1821 size_t count;
1822 size_t max;
1823
1824 os = (struct _ovl_stream *) stream;
1825 max = (const char *) os->end - (const char *) os->start;
1826
1827 if ((ufile_ptr) offset >= max)
1828 return 0;
1829
1830 count = nbytes;
1831 if (count > max - offset)
1832 count = max - offset;
1833
1834 memcpy (buf, (const char *) os->start + offset, count);
1835 return count;
1836 }
1837
1838 static int
1839 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1840 void *stream,
1841 struct stat *sb)
1842 {
1843 struct _ovl_stream *os = (struct _ovl_stream *) stream;
1844
1845 memset (sb, 0, sizeof (*sb));
1846 sb->st_size = (const char *) os->end - (const char *) os->start;
1847 return 0;
1848 }
1849
1850 bfd_boolean
1851 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1852 {
1853 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1854 "elf32-spu",
1855 ovl_mgr_open,
1856 (void *) stream,
1857 ovl_mgr_pread,
1858 NULL,
1859 ovl_mgr_stat);
1860 return *ovl_bfd != NULL;
1861 }
1862
1863 static unsigned int
1864 overlay_index (asection *sec)
1865 {
1866 if (sec == NULL
1867 || sec->output_section == bfd_abs_section_ptr)
1868 return 0;
1869 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1870 }
1871
1872 /* Define an STT_OBJECT symbol. */
1873
1874 static struct elf_link_hash_entry *
1875 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1876 {
1877 struct elf_link_hash_entry *h;
1878
1879 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1880 if (h == NULL)
1881 return NULL;
1882
1883 if (h->root.type != bfd_link_hash_defined
1884 || !h->def_regular)
1885 {
1886 h->root.type = bfd_link_hash_defined;
1887 h->root.u.def.section = htab->ovtab;
1888 h->type = STT_OBJECT;
1889 h->ref_regular = 1;
1890 h->def_regular = 1;
1891 h->ref_regular_nonweak = 1;
1892 h->non_elf = 0;
1893 }
1894 else if (h->root.u.def.section->owner != NULL)
1895 {
1896 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1897 h->root.u.def.section->owner,
1898 h->root.root.string);
1899 bfd_set_error (bfd_error_bad_value);
1900 return NULL;
1901 }
1902 else
1903 {
1904 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1905 h->root.root.string);
1906 bfd_set_error (bfd_error_bad_value);
1907 return NULL;
1908 }
1909
1910 return h;
1911 }
1912
1913 /* Fill in all stubs and the overlay tables. */
1914
1915 static bfd_boolean
1916 spu_elf_build_stubs (struct bfd_link_info *info)
1917 {
1918 struct spu_link_hash_table *htab = spu_hash_table (info);
1919 struct elf_link_hash_entry *h;
1920 bfd_byte *p;
1921 asection *s;
1922 bfd *obfd;
1923 unsigned int i;
1924
1925 if (htab->num_overlays != 0)
1926 {
1927 for (i = 0; i < 2; i++)
1928 {
1929 h = htab->ovly_entry[i];
1930 if (h != NULL
1931 && (h->root.type == bfd_link_hash_defined
1932 || h->root.type == bfd_link_hash_defweak)
1933 && h->def_regular)
1934 {
1935 s = h->root.u.def.section->output_section;
1936 if (spu_elf_section_data (s)->u.o.ovl_index)
1937 {
1938 (*_bfd_error_handler) (_("%s in overlay section"),
1939 h->root.root.string);
1940 bfd_set_error (bfd_error_bad_value);
1941 return FALSE;
1942 }
1943 }
1944 }
1945 }
1946
1947 if (htab->stub_sec != NULL)
1948 {
1949 for (i = 0; i <= htab->num_overlays; i++)
1950 if (htab->stub_sec[i]->size != 0)
1951 {
1952 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1953 htab->stub_sec[i]->size);
1954 if (htab->stub_sec[i]->contents == NULL)
1955 return FALSE;
1956 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1957 htab->stub_sec[i]->size = 0;
1958 }
1959
1960 /* Fill in all the stubs. */
1961 process_stubs (info, TRUE);
1962 if (!htab->stub_err)
1963 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1964
1965 if (htab->stub_err)
1966 {
1967 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1968 bfd_set_error (bfd_error_bad_value);
1969 return FALSE;
1970 }
1971
1972 for (i = 0; i <= htab->num_overlays; i++)
1973 {
1974 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1975 {
1976 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1977 bfd_set_error (bfd_error_bad_value);
1978 return FALSE;
1979 }
1980 htab->stub_sec[i]->rawsize = 0;
1981 }
1982 }
1983
1984 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1985 return TRUE;
1986
1987 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1988 if (htab->ovtab->contents == NULL)
1989 return FALSE;
1990
1991 p = htab->ovtab->contents;
1992 if (htab->params->ovly_flavour == ovly_soft_icache)
1993 {
1994 bfd_vma off;
1995
1996 h = define_ovtab_symbol (htab, "__icache_tag_array");
1997 if (h == NULL)
1998 return FALSE;
1999 h->root.u.def.value = 0;
2000 h->size = 16 << htab->num_lines_log2;
2001 off = h->size;
2002
2003 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2004 if (h == NULL)
2005 return FALSE;
2006 h->root.u.def.value = 16 << htab->num_lines_log2;
2007 h->root.u.def.section = bfd_abs_section_ptr;
2008
2009 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2010 if (h == NULL)
2011 return FALSE;
2012 h->root.u.def.value = off;
2013 h->size = 16 << htab->num_lines_log2;
2014 off += h->size;
2015
2016 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2017 if (h == NULL)
2018 return FALSE;
2019 h->root.u.def.value = 16 << htab->num_lines_log2;
2020 h->root.u.def.section = bfd_abs_section_ptr;
2021
2022 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2023 if (h == NULL)
2024 return FALSE;
2025 h->root.u.def.value = off;
2026 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2027 off += h->size;
2028
2029 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2030 if (h == NULL)
2031 return FALSE;
2032 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2033 + htab->num_lines_log2);
2034 h->root.u.def.section = bfd_abs_section_ptr;
2035
2036 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2037 if (h == NULL)
2038 return FALSE;
2039 h->root.u.def.value = htab->fromelem_size_log2;
2040 h->root.u.def.section = bfd_abs_section_ptr;
2041
2042 h = define_ovtab_symbol (htab, "__icache_base");
2043 if (h == NULL)
2044 return FALSE;
2045 h->root.u.def.value = htab->ovl_sec[0]->vma;
2046 h->root.u.def.section = bfd_abs_section_ptr;
2047 h->size = htab->num_buf << htab->line_size_log2;
2048
2049 h = define_ovtab_symbol (htab, "__icache_linesize");
2050 if (h == NULL)
2051 return FALSE;
2052 h->root.u.def.value = 1 << htab->line_size_log2;
2053 h->root.u.def.section = bfd_abs_section_ptr;
2054
2055 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2056 if (h == NULL)
2057 return FALSE;
2058 h->root.u.def.value = htab->line_size_log2;
2059 h->root.u.def.section = bfd_abs_section_ptr;
2060
2061 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2062 if (h == NULL)
2063 return FALSE;
2064 h->root.u.def.value = -htab->line_size_log2;
2065 h->root.u.def.section = bfd_abs_section_ptr;
2066
2067 h = define_ovtab_symbol (htab, "__icache_cachesize");
2068 if (h == NULL)
2069 return FALSE;
2070 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2071 h->root.u.def.section = bfd_abs_section_ptr;
2072
2073 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2074 if (h == NULL)
2075 return FALSE;
2076 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2077 h->root.u.def.section = bfd_abs_section_ptr;
2078
2079 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2080 if (h == NULL)
2081 return FALSE;
2082 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2083 h->root.u.def.section = bfd_abs_section_ptr;
2084
2085 if (htab->init != NULL && htab->init->size != 0)
2086 {
2087 htab->init->contents = bfd_zalloc (htab->init->owner,
2088 htab->init->size);
2089 if (htab->init->contents == NULL)
2090 return FALSE;
2091
2092 h = define_ovtab_symbol (htab, "__icache_fileoff");
2093 if (h == NULL)
2094 return FALSE;
2095 h->root.u.def.value = 0;
2096 h->root.u.def.section = htab->init;
2097 h->size = 8;
2098 }
2099 }
2100 else
2101 {
2102 /* Write out _ovly_table. */
2103 /* set low bit of .size to mark non-overlay area as present. */
2104 p[7] = 1;
2105 obfd = htab->ovtab->output_section->owner;
2106 for (s = obfd->sections; s != NULL; s = s->next)
2107 {
2108 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2109
2110 if (ovl_index != 0)
2111 {
2112 unsigned long off = ovl_index * 16;
2113 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2114
2115 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2116 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2117 p + off + 4);
2118 /* file_off written later in spu_elf_modify_program_headers. */
2119 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2120 }
2121 }
2122
2123 h = define_ovtab_symbol (htab, "_ovly_table");
2124 if (h == NULL)
2125 return FALSE;
2126 h->root.u.def.value = 16;
2127 h->size = htab->num_overlays * 16;
2128
2129 h = define_ovtab_symbol (htab, "_ovly_table_end");
2130 if (h == NULL)
2131 return FALSE;
2132 h->root.u.def.value = htab->num_overlays * 16 + 16;
2133 h->size = 0;
2134
2135 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2136 if (h == NULL)
2137 return FALSE;
2138 h->root.u.def.value = htab->num_overlays * 16 + 16;
2139 h->size = htab->num_buf * 4;
2140
2141 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2142 if (h == NULL)
2143 return FALSE;
2144 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2145 h->size = 0;
2146 }
2147
2148 h = define_ovtab_symbol (htab, "_EAR_");
2149 if (h == NULL)
2150 return FALSE;
2151 h->root.u.def.section = htab->toe;
2152 h->root.u.def.value = 0;
2153 h->size = 16;
2154
2155 return TRUE;
2156 }
2157
2158 /* Check that all loadable section VMAs lie in the range
2159 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2160
2161 asection *
2162 spu_elf_check_vma (struct bfd_link_info *info)
2163 {
2164 struct elf_segment_map *m;
2165 unsigned int i;
2166 struct spu_link_hash_table *htab = spu_hash_table (info);
2167 bfd *abfd = info->output_bfd;
2168 bfd_vma hi = htab->params->local_store_hi;
2169 bfd_vma lo = htab->params->local_store_lo;
2170
2171 htab->local_store = hi + 1 - lo;
2172
2173 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2174 if (m->p_type == PT_LOAD)
2175 for (i = 0; i < m->count; i++)
2176 if (m->sections[i]->size != 0
2177 && (m->sections[i]->vma < lo
2178 || m->sections[i]->vma > hi
2179 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2180 return m->sections[i];
2181
2182 return NULL;
2183 }
2184
2185 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2186 Search for stack adjusting insns, and return the sp delta.
2187 If a store of lr is found save the instruction offset to *LR_STORE.
2188 If a stack adjusting instruction is found, save that offset to
2189 *SP_ADJUST. */
2190
2191 static int
2192 find_function_stack_adjust (asection *sec,
2193 bfd_vma offset,
2194 bfd_vma *lr_store,
2195 bfd_vma *sp_adjust)
2196 {
2197 int reg[128];
2198
2199 memset (reg, 0, sizeof (reg));
2200 for ( ; offset + 4 <= sec->size; offset += 4)
2201 {
2202 unsigned char buf[4];
2203 int rt, ra;
2204 int imm;
2205
2206 /* Assume no relocs on stack adjusing insns. */
2207 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2208 break;
2209
2210 rt = buf[3] & 0x7f;
2211 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2212
2213 if (buf[0] == 0x24 /* stqd */)
2214 {
2215 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2216 *lr_store = offset;
2217 continue;
2218 }
2219
2220 /* Partly decoded immediate field. */
2221 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2222
2223 if (buf[0] == 0x1c /* ai */)
2224 {
2225 imm >>= 7;
2226 imm = (imm ^ 0x200) - 0x200;
2227 reg[rt] = reg[ra] + imm;
2228
2229 if (rt == 1 /* sp */)
2230 {
2231 if (reg[rt] > 0)
2232 break;
2233 *sp_adjust = offset;
2234 return reg[rt];
2235 }
2236 }
2237 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2238 {
2239 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2240
2241 reg[rt] = reg[ra] + reg[rb];
2242 if (rt == 1)
2243 {
2244 if (reg[rt] > 0)
2245 break;
2246 *sp_adjust = offset;
2247 return reg[rt];
2248 }
2249 }
2250 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2251 {
2252 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2253
2254 reg[rt] = reg[rb] - reg[ra];
2255 if (rt == 1)
2256 {
2257 if (reg[rt] > 0)
2258 break;
2259 *sp_adjust = offset;
2260 return reg[rt];
2261 }
2262 }
2263 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2264 {
2265 if (buf[0] >= 0x42 /* ila */)
2266 imm |= (buf[0] & 1) << 17;
2267 else
2268 {
2269 imm &= 0xffff;
2270
2271 if (buf[0] == 0x40 /* il */)
2272 {
2273 if ((buf[1] & 0x80) == 0)
2274 continue;
2275 imm = (imm ^ 0x8000) - 0x8000;
2276 }
2277 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2278 imm <<= 16;
2279 }
2280 reg[rt] = imm;
2281 continue;
2282 }
2283 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2284 {
2285 reg[rt] |= imm & 0xffff;
2286 continue;
2287 }
2288 else if (buf[0] == 0x04 /* ori */)
2289 {
2290 imm >>= 7;
2291 imm = (imm ^ 0x200) - 0x200;
2292 reg[rt] = reg[ra] | imm;
2293 continue;
2294 }
2295 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2296 {
2297 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2298 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2299 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2300 | ((imm & 0x1000) ? 0x000000ff : 0));
2301 continue;
2302 }
2303 else if (buf[0] == 0x16 /* andbi */)
2304 {
2305 imm >>= 7;
2306 imm &= 0xff;
2307 imm |= imm << 8;
2308 imm |= imm << 16;
2309 reg[rt] = reg[ra] & imm;
2310 continue;
2311 }
2312 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2313 {
2314 /* Used in pic reg load. Say rt is trashed. Won't be used
2315 in stack adjust, but we need to continue past this branch. */
2316 reg[rt] = 0;
2317 continue;
2318 }
2319 else if (is_branch (buf) || is_indirect_branch (buf))
2320 /* If we hit a branch then we must be out of the prologue. */
2321 break;
2322 }
2323
2324 return 0;
2325 }
2326
2327 /* qsort predicate to sort symbols by section and value. */
2328
2329 static Elf_Internal_Sym *sort_syms_syms;
2330 static asection **sort_syms_psecs;
2331
2332 static int
2333 sort_syms (const void *a, const void *b)
2334 {
2335 Elf_Internal_Sym *const *s1 = a;
2336 Elf_Internal_Sym *const *s2 = b;
2337 asection *sec1,*sec2;
2338 bfd_signed_vma delta;
2339
2340 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2341 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2342
2343 if (sec1 != sec2)
2344 return sec1->index - sec2->index;
2345
2346 delta = (*s1)->st_value - (*s2)->st_value;
2347 if (delta != 0)
2348 return delta < 0 ? -1 : 1;
2349
2350 delta = (*s2)->st_size - (*s1)->st_size;
2351 if (delta != 0)
2352 return delta < 0 ? -1 : 1;
2353
2354 return *s1 < *s2 ? -1 : 1;
2355 }
2356
2357 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2358 entries for section SEC. */
2359
2360 static struct spu_elf_stack_info *
2361 alloc_stack_info (asection *sec, int max_fun)
2362 {
2363 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2364 bfd_size_type amt;
2365
2366 amt = sizeof (struct spu_elf_stack_info);
2367 amt += (max_fun - 1) * sizeof (struct function_info);
2368 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2369 if (sec_data->u.i.stack_info != NULL)
2370 sec_data->u.i.stack_info->max_fun = max_fun;
2371 return sec_data->u.i.stack_info;
2372 }
2373
2374 /* Add a new struct function_info describing a (part of a) function
2375 starting at SYM_H. Keep the array sorted by address. */
2376
2377 static struct function_info *
2378 maybe_insert_function (asection *sec,
2379 void *sym_h,
2380 bfd_boolean global,
2381 bfd_boolean is_func)
2382 {
2383 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2384 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2385 int i;
2386 bfd_vma off, size;
2387
2388 if (sinfo == NULL)
2389 {
2390 sinfo = alloc_stack_info (sec, 20);
2391 if (sinfo == NULL)
2392 return NULL;
2393 }
2394
2395 if (!global)
2396 {
2397 Elf_Internal_Sym *sym = sym_h;
2398 off = sym->st_value;
2399 size = sym->st_size;
2400 }
2401 else
2402 {
2403 struct elf_link_hash_entry *h = sym_h;
2404 off = h->root.u.def.value;
2405 size = h->size;
2406 }
2407
2408 for (i = sinfo->num_fun; --i >= 0; )
2409 if (sinfo->fun[i].lo <= off)
2410 break;
2411
2412 if (i >= 0)
2413 {
2414 /* Don't add another entry for an alias, but do update some
2415 info. */
2416 if (sinfo->fun[i].lo == off)
2417 {
2418 /* Prefer globals over local syms. */
2419 if (global && !sinfo->fun[i].global)
2420 {
2421 sinfo->fun[i].global = TRUE;
2422 sinfo->fun[i].u.h = sym_h;
2423 }
2424 if (is_func)
2425 sinfo->fun[i].is_func = TRUE;
2426 return &sinfo->fun[i];
2427 }
2428 /* Ignore a zero-size symbol inside an existing function. */
2429 else if (sinfo->fun[i].hi > off && size == 0)
2430 return &sinfo->fun[i];
2431 }
2432
2433 if (sinfo->num_fun >= sinfo->max_fun)
2434 {
2435 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2436 bfd_size_type old = amt;
2437
2438 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2439 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2440 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2441 sinfo = bfd_realloc (sinfo, amt);
2442 if (sinfo == NULL)
2443 return NULL;
2444 memset ((char *) sinfo + old, 0, amt - old);
2445 sec_data->u.i.stack_info = sinfo;
2446 }
2447
2448 if (++i < sinfo->num_fun)
2449 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2450 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2451 sinfo->fun[i].is_func = is_func;
2452 sinfo->fun[i].global = global;
2453 sinfo->fun[i].sec = sec;
2454 if (global)
2455 sinfo->fun[i].u.h = sym_h;
2456 else
2457 sinfo->fun[i].u.sym = sym_h;
2458 sinfo->fun[i].lo = off;
2459 sinfo->fun[i].hi = off + size;
2460 sinfo->fun[i].lr_store = -1;
2461 sinfo->fun[i].sp_adjust = -1;
2462 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2463 &sinfo->fun[i].lr_store,
2464 &sinfo->fun[i].sp_adjust);
2465 sinfo->num_fun += 1;
2466 return &sinfo->fun[i];
2467 }
2468
2469 /* Return the name of FUN. */
2470
2471 static const char *
2472 func_name (struct function_info *fun)
2473 {
2474 asection *sec;
2475 bfd *ibfd;
2476 Elf_Internal_Shdr *symtab_hdr;
2477
2478 while (fun->start != NULL)
2479 fun = fun->start;
2480
2481 if (fun->global)
2482 return fun->u.h->root.root.string;
2483
2484 sec = fun->sec;
2485 if (fun->u.sym->st_name == 0)
2486 {
2487 size_t len = strlen (sec->name);
2488 char *name = bfd_malloc (len + 10);
2489 if (name == NULL)
2490 return "(null)";
2491 sprintf (name, "%s+%lx", sec->name,
2492 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2493 return name;
2494 }
2495 ibfd = sec->owner;
2496 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2497 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2498 }
2499
2500 /* Read the instruction at OFF in SEC. Return true iff the instruction
2501 is a nop, lnop, or stop 0 (all zero insn). */
2502
2503 static bfd_boolean
2504 is_nop (asection *sec, bfd_vma off)
2505 {
2506 unsigned char insn[4];
2507
2508 if (off + 4 > sec->size
2509 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2510 return FALSE;
2511 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2512 return TRUE;
2513 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2514 return TRUE;
2515 return FALSE;
2516 }
2517
2518 /* Extend the range of FUN to cover nop padding up to LIMIT.
2519 Return TRUE iff some instruction other than a NOP was found. */
2520
2521 static bfd_boolean
2522 insns_at_end (struct function_info *fun, bfd_vma limit)
2523 {
2524 bfd_vma off = (fun->hi + 3) & -4;
2525
2526 while (off < limit && is_nop (fun->sec, off))
2527 off += 4;
2528 if (off < limit)
2529 {
2530 fun->hi = off;
2531 return TRUE;
2532 }
2533 fun->hi = limit;
2534 return FALSE;
2535 }
2536
2537 /* Check and fix overlapping function ranges. Return TRUE iff there
2538 are gaps in the current info we have about functions in SEC. */
2539
2540 static bfd_boolean
2541 check_function_ranges (asection *sec, struct bfd_link_info *info)
2542 {
2543 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2544 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2545 int i;
2546 bfd_boolean gaps = FALSE;
2547
2548 if (sinfo == NULL)
2549 return FALSE;
2550
2551 for (i = 1; i < sinfo->num_fun; i++)
2552 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2553 {
2554 /* Fix overlapping symbols. */
2555 const char *f1 = func_name (&sinfo->fun[i - 1]);
2556 const char *f2 = func_name (&sinfo->fun[i]);
2557
2558 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2559 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2560 }
2561 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2562 gaps = TRUE;
2563
2564 if (sinfo->num_fun == 0)
2565 gaps = TRUE;
2566 else
2567 {
2568 if (sinfo->fun[0].lo != 0)
2569 gaps = TRUE;
2570 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2571 {
2572 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2573
2574 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2575 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2576 }
2577 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2578 gaps = TRUE;
2579 }
2580 return gaps;
2581 }
2582
2583 /* Search current function info for a function that contains address
2584 OFFSET in section SEC. */
2585
2586 static struct function_info *
2587 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2588 {
2589 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2590 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2591 int lo, hi, mid;
2592
2593 lo = 0;
2594 hi = sinfo->num_fun;
2595 while (lo < hi)
2596 {
2597 mid = (lo + hi) / 2;
2598 if (offset < sinfo->fun[mid].lo)
2599 hi = mid;
2600 else if (offset >= sinfo->fun[mid].hi)
2601 lo = mid + 1;
2602 else
2603 return &sinfo->fun[mid];
2604 }
2605 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2606 sec, offset);
2607 bfd_set_error (bfd_error_bad_value);
2608 return NULL;
2609 }
2610
2611 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2612 if CALLEE was new. If this function return FALSE, CALLEE should
2613 be freed. */
2614
2615 static bfd_boolean
2616 insert_callee (struct function_info *caller, struct call_info *callee)
2617 {
2618 struct call_info **pp, *p;
2619
2620 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2621 if (p->fun == callee->fun)
2622 {
2623 /* Tail calls use less stack than normal calls. Retain entry
2624 for normal call over one for tail call. */
2625 p->is_tail &= callee->is_tail;
2626 if (!p->is_tail)
2627 {
2628 p->fun->start = NULL;
2629 p->fun->is_func = TRUE;
2630 }
2631 p->count += callee->count;
2632 /* Reorder list so most recent call is first. */
2633 *pp = p->next;
2634 p->next = caller->call_list;
2635 caller->call_list = p;
2636 return FALSE;
2637 }
2638 callee->next = caller->call_list;
2639 caller->call_list = callee;
2640 return TRUE;
2641 }
2642
2643 /* Copy CALL and insert the copy into CALLER. */
2644
2645 static bfd_boolean
2646 copy_callee (struct function_info *caller, const struct call_info *call)
2647 {
2648 struct call_info *callee;
2649 callee = bfd_malloc (sizeof (*callee));
2650 if (callee == NULL)
2651 return FALSE;
2652 *callee = *call;
2653 if (!insert_callee (caller, callee))
2654 free (callee);
2655 return TRUE;
2656 }
2657
2658 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2659 overlay stub sections. */
2660
2661 static bfd_boolean
2662 interesting_section (asection *s)
2663 {
2664 return (s->output_section != bfd_abs_section_ptr
2665 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2666 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2667 && s->size != 0);
2668 }
2669
2670 /* Rummage through the relocs for SEC, looking for function calls.
2671 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2672 mark destination symbols on calls as being functions. Also
2673 look at branches, which may be tail calls or go to hot/cold
2674 section part of same function. */
2675
2676 static bfd_boolean
2677 mark_functions_via_relocs (asection *sec,
2678 struct bfd_link_info *info,
2679 int call_tree)
2680 {
2681 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2682 Elf_Internal_Shdr *symtab_hdr;
2683 void *psyms;
2684 unsigned int priority = 0;
2685 static bfd_boolean warned;
2686
2687 if (!interesting_section (sec)
2688 || sec->reloc_count == 0)
2689 return TRUE;
2690
2691 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2692 info->keep_memory);
2693 if (internal_relocs == NULL)
2694 return FALSE;
2695
2696 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2697 psyms = &symtab_hdr->contents;
2698 irela = internal_relocs;
2699 irelaend = irela + sec->reloc_count;
2700 for (; irela < irelaend; irela++)
2701 {
2702 enum elf_spu_reloc_type r_type;
2703 unsigned int r_indx;
2704 asection *sym_sec;
2705 Elf_Internal_Sym *sym;
2706 struct elf_link_hash_entry *h;
2707 bfd_vma val;
2708 bfd_boolean nonbranch, is_call;
2709 struct function_info *caller;
2710 struct call_info *callee;
2711
2712 r_type = ELF32_R_TYPE (irela->r_info);
2713 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2714
2715 r_indx = ELF32_R_SYM (irela->r_info);
2716 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2717 return FALSE;
2718
2719 if (sym_sec == NULL
2720 || sym_sec->output_section == bfd_abs_section_ptr)
2721 continue;
2722
2723 is_call = FALSE;
2724 if (!nonbranch)
2725 {
2726 unsigned char insn[4];
2727
2728 if (!bfd_get_section_contents (sec->owner, sec, insn,
2729 irela->r_offset, 4))
2730 return FALSE;
2731 if (is_branch (insn))
2732 {
2733 is_call = (insn[0] & 0xfd) == 0x31;
2734 priority = insn[1] & 0x0f;
2735 priority <<= 8;
2736 priority |= insn[2];
2737 priority <<= 8;
2738 priority |= insn[3];
2739 priority >>= 7;
2740 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2741 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2742 {
2743 if (!warned)
2744 info->callbacks->einfo
2745 (_("%B(%A+0x%v): call to non-code section"
2746 " %B(%A), analysis incomplete\n"),
2747 sec->owner, sec, irela->r_offset,
2748 sym_sec->owner, sym_sec);
2749 warned = TRUE;
2750 continue;
2751 }
2752 }
2753 else
2754 {
2755 nonbranch = TRUE;
2756 if (is_hint (insn))
2757 continue;
2758 }
2759 }
2760
2761 if (nonbranch)
2762 {
2763 /* For --auto-overlay, count possible stubs we need for
2764 function pointer references. */
2765 unsigned int sym_type;
2766 if (h)
2767 sym_type = h->type;
2768 else
2769 sym_type = ELF_ST_TYPE (sym->st_info);
2770 if (sym_type == STT_FUNC)
2771 {
2772 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2773 spu_hash_table (info)->non_ovly_stub += 1;
2774 /* If the symbol type is STT_FUNC then this must be a
2775 function pointer initialisation. */
2776 continue;
2777 }
2778 /* Ignore data references. */
2779 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2780 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2781 continue;
2782 /* Otherwise we probably have a jump table reloc for
2783 a switch statement or some other reference to a
2784 code label. */
2785 }
2786
2787 if (h)
2788 val = h->root.u.def.value;
2789 else
2790 val = sym->st_value;
2791 val += irela->r_addend;
2792
2793 if (!call_tree)
2794 {
2795 struct function_info *fun;
2796
2797 if (irela->r_addend != 0)
2798 {
2799 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2800 if (fake == NULL)
2801 return FALSE;
2802 fake->st_value = val;
2803 fake->st_shndx
2804 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2805 sym = fake;
2806 }
2807 if (sym)
2808 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2809 else
2810 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2811 if (fun == NULL)
2812 return FALSE;
2813 if (irela->r_addend != 0
2814 && fun->u.sym != sym)
2815 free (sym);
2816 continue;
2817 }
2818
2819 caller = find_function (sec, irela->r_offset, info);
2820 if (caller == NULL)
2821 return FALSE;
2822 callee = bfd_malloc (sizeof *callee);
2823 if (callee == NULL)
2824 return FALSE;
2825
2826 callee->fun = find_function (sym_sec, val, info);
2827 if (callee->fun == NULL)
2828 return FALSE;
2829 callee->is_tail = !is_call;
2830 callee->is_pasted = FALSE;
2831 callee->broken_cycle = FALSE;
2832 callee->priority = priority;
2833 callee->count = nonbranch? 0 : 1;
2834 if (callee->fun->last_caller != sec)
2835 {
2836 callee->fun->last_caller = sec;
2837 callee->fun->call_count += 1;
2838 }
2839 if (!insert_callee (caller, callee))
2840 free (callee);
2841 else if (!is_call
2842 && !callee->fun->is_func
2843 && callee->fun->stack == 0)
2844 {
2845 /* This is either a tail call or a branch from one part of
2846 the function to another, ie. hot/cold section. If the
2847 destination has been called by some other function then
2848 it is a separate function. We also assume that functions
2849 are not split across input files. */
2850 if (sec->owner != sym_sec->owner)
2851 {
2852 callee->fun->start = NULL;
2853 callee->fun->is_func = TRUE;
2854 }
2855 else if (callee->fun->start == NULL)
2856 {
2857 struct function_info *caller_start = caller;
2858 while (caller_start->start)
2859 caller_start = caller_start->start;
2860
2861 if (caller_start != callee->fun)
2862 callee->fun->start = caller_start;
2863 }
2864 else
2865 {
2866 struct function_info *callee_start;
2867 struct function_info *caller_start;
2868 callee_start = callee->fun;
2869 while (callee_start->start)
2870 callee_start = callee_start->start;
2871 caller_start = caller;
2872 while (caller_start->start)
2873 caller_start = caller_start->start;
2874 if (caller_start != callee_start)
2875 {
2876 callee->fun->start = NULL;
2877 callee->fun->is_func = TRUE;
2878 }
2879 }
2880 }
2881 }
2882
2883 return TRUE;
2884 }
2885
2886 /* Handle something like .init or .fini, which has a piece of a function.
2887 These sections are pasted together to form a single function. */
2888
2889 static bfd_boolean
2890 pasted_function (asection *sec)
2891 {
2892 struct bfd_link_order *l;
2893 struct _spu_elf_section_data *sec_data;
2894 struct spu_elf_stack_info *sinfo;
2895 Elf_Internal_Sym *fake;
2896 struct function_info *fun, *fun_start;
2897
2898 fake = bfd_zmalloc (sizeof (*fake));
2899 if (fake == NULL)
2900 return FALSE;
2901 fake->st_value = 0;
2902 fake->st_size = sec->size;
2903 fake->st_shndx
2904 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2905 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2906 if (!fun)
2907 return FALSE;
2908
2909 /* Find a function immediately preceding this section. */
2910 fun_start = NULL;
2911 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2912 {
2913 if (l->u.indirect.section == sec)
2914 {
2915 if (fun_start != NULL)
2916 {
2917 struct call_info *callee = bfd_malloc (sizeof *callee);
2918 if (callee == NULL)
2919 return FALSE;
2920
2921 fun->start = fun_start;
2922 callee->fun = fun;
2923 callee->is_tail = TRUE;
2924 callee->is_pasted = TRUE;
2925 callee->broken_cycle = FALSE;
2926 callee->priority = 0;
2927 callee->count = 1;
2928 if (!insert_callee (fun_start, callee))
2929 free (callee);
2930 return TRUE;
2931 }
2932 break;
2933 }
2934 if (l->type == bfd_indirect_link_order
2935 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2936 && (sinfo = sec_data->u.i.stack_info) != NULL
2937 && sinfo->num_fun != 0)
2938 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2939 }
2940
2941 /* Don't return an error if we did not find a function preceding this
2942 section. The section may have incorrect flags. */
2943 return TRUE;
2944 }
2945
2946 /* Map address ranges in code sections to functions. */
2947
2948 static bfd_boolean
2949 discover_functions (struct bfd_link_info *info)
2950 {
2951 bfd *ibfd;
2952 int bfd_idx;
2953 Elf_Internal_Sym ***psym_arr;
2954 asection ***sec_arr;
2955 bfd_boolean gaps = FALSE;
2956
2957 bfd_idx = 0;
2958 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2959 bfd_idx++;
2960
2961 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2962 if (psym_arr == NULL)
2963 return FALSE;
2964 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2965 if (sec_arr == NULL)
2966 return FALSE;
2967
2968 for (ibfd = info->input_bfds, bfd_idx = 0;
2969 ibfd != NULL;
2970 ibfd = ibfd->link.next, bfd_idx++)
2971 {
2972 extern const bfd_target spu_elf32_vec;
2973 Elf_Internal_Shdr *symtab_hdr;
2974 asection *sec;
2975 size_t symcount;
2976 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2977 asection **psecs, **p;
2978
2979 if (ibfd->xvec != &spu_elf32_vec)
2980 continue;
2981
2982 /* Read all the symbols. */
2983 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2984 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2985 if (symcount == 0)
2986 {
2987 if (!gaps)
2988 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2989 if (interesting_section (sec))
2990 {
2991 gaps = TRUE;
2992 break;
2993 }
2994 continue;
2995 }
2996
2997 if (symtab_hdr->contents != NULL)
2998 {
2999 /* Don't use cached symbols since the generic ELF linker
3000 code only reads local symbols, and we need globals too. */
3001 free (symtab_hdr->contents);
3002 symtab_hdr->contents = NULL;
3003 }
3004 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3005 NULL, NULL, NULL);
3006 symtab_hdr->contents = (void *) syms;
3007 if (syms == NULL)
3008 return FALSE;
3009
3010 /* Select defined function symbols that are going to be output. */
3011 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3012 if (psyms == NULL)
3013 return FALSE;
3014 psym_arr[bfd_idx] = psyms;
3015 psecs = bfd_malloc (symcount * sizeof (*psecs));
3016 if (psecs == NULL)
3017 return FALSE;
3018 sec_arr[bfd_idx] = psecs;
3019 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3020 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3021 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3022 {
3023 asection *s;
3024
3025 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3026 if (s != NULL && interesting_section (s))
3027 *psy++ = sy;
3028 }
3029 symcount = psy - psyms;
3030 *psy = NULL;
3031
3032 /* Sort them by section and offset within section. */
3033 sort_syms_syms = syms;
3034 sort_syms_psecs = psecs;
3035 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3036
3037 /* Now inspect the function symbols. */
3038 for (psy = psyms; psy < psyms + symcount; )
3039 {
3040 asection *s = psecs[*psy - syms];
3041 Elf_Internal_Sym **psy2;
3042
3043 for (psy2 = psy; ++psy2 < psyms + symcount; )
3044 if (psecs[*psy2 - syms] != s)
3045 break;
3046
3047 if (!alloc_stack_info (s, psy2 - psy))
3048 return FALSE;
3049 psy = psy2;
3050 }
3051
3052 /* First install info about properly typed and sized functions.
3053 In an ideal world this will cover all code sections, except
3054 when partitioning functions into hot and cold sections,
3055 and the horrible pasted together .init and .fini functions. */
3056 for (psy = psyms; psy < psyms + symcount; ++psy)
3057 {
3058 sy = *psy;
3059 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3060 {
3061 asection *s = psecs[sy - syms];
3062 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3063 return FALSE;
3064 }
3065 }
3066
3067 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3068 if (interesting_section (sec))
3069 gaps |= check_function_ranges (sec, info);
3070 }
3071
3072 if (gaps)
3073 {
3074 /* See if we can discover more function symbols by looking at
3075 relocations. */
3076 for (ibfd = info->input_bfds, bfd_idx = 0;
3077 ibfd != NULL;
3078 ibfd = ibfd->link.next, bfd_idx++)
3079 {
3080 asection *sec;
3081
3082 if (psym_arr[bfd_idx] == NULL)
3083 continue;
3084
3085 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3086 if (!mark_functions_via_relocs (sec, info, FALSE))
3087 return FALSE;
3088 }
3089
3090 for (ibfd = info->input_bfds, bfd_idx = 0;
3091 ibfd != NULL;
3092 ibfd = ibfd->link.next, bfd_idx++)
3093 {
3094 Elf_Internal_Shdr *symtab_hdr;
3095 asection *sec;
3096 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3097 asection **psecs;
3098
3099 if ((psyms = psym_arr[bfd_idx]) == NULL)
3100 continue;
3101
3102 psecs = sec_arr[bfd_idx];
3103
3104 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3105 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3106
3107 gaps = FALSE;
3108 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3109 if (interesting_section (sec))
3110 gaps |= check_function_ranges (sec, info);
3111 if (!gaps)
3112 continue;
3113
3114 /* Finally, install all globals. */
3115 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3116 {
3117 asection *s;
3118
3119 s = psecs[sy - syms];
3120
3121 /* Global syms might be improperly typed functions. */
3122 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3123 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3124 {
3125 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3126 return FALSE;
3127 }
3128 }
3129 }
3130
3131 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3132 {
3133 extern const bfd_target spu_elf32_vec;
3134 asection *sec;
3135
3136 if (ibfd->xvec != &spu_elf32_vec)
3137 continue;
3138
3139 /* Some of the symbols we've installed as marking the
3140 beginning of functions may have a size of zero. Extend
3141 the range of such functions to the beginning of the
3142 next symbol of interest. */
3143 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3144 if (interesting_section (sec))
3145 {
3146 struct _spu_elf_section_data *sec_data;
3147 struct spu_elf_stack_info *sinfo;
3148
3149 sec_data = spu_elf_section_data (sec);
3150 sinfo = sec_data->u.i.stack_info;
3151 if (sinfo != NULL && sinfo->num_fun != 0)
3152 {
3153 int fun_idx;
3154 bfd_vma hi = sec->size;
3155
3156 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3157 {
3158 sinfo->fun[fun_idx].hi = hi;
3159 hi = sinfo->fun[fun_idx].lo;
3160 }
3161
3162 sinfo->fun[0].lo = 0;
3163 }
3164 /* No symbols in this section. Must be .init or .fini
3165 or something similar. */
3166 else if (!pasted_function (sec))
3167 return FALSE;
3168 }
3169 }
3170 }
3171
3172 for (ibfd = info->input_bfds, bfd_idx = 0;
3173 ibfd != NULL;
3174 ibfd = ibfd->link.next, bfd_idx++)
3175 {
3176 if (psym_arr[bfd_idx] == NULL)
3177 continue;
3178
3179 free (psym_arr[bfd_idx]);
3180 free (sec_arr[bfd_idx]);
3181 }
3182
3183 free (psym_arr);
3184 free (sec_arr);
3185
3186 return TRUE;
3187 }
3188
3189 /* Iterate over all function_info we have collected, calling DOIT on
3190 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3191 if ROOT_ONLY. */
3192
3193 static bfd_boolean
3194 for_each_node (bfd_boolean (*doit) (struct function_info *,
3195 struct bfd_link_info *,
3196 void *),
3197 struct bfd_link_info *info,
3198 void *param,
3199 int root_only)
3200 {
3201 bfd *ibfd;
3202
3203 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3204 {
3205 extern const bfd_target spu_elf32_vec;
3206 asection *sec;
3207
3208 if (ibfd->xvec != &spu_elf32_vec)
3209 continue;
3210
3211 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3212 {
3213 struct _spu_elf_section_data *sec_data;
3214 struct spu_elf_stack_info *sinfo;
3215
3216 if ((sec_data = spu_elf_section_data (sec)) != NULL
3217 && (sinfo = sec_data->u.i.stack_info) != NULL)
3218 {
3219 int i;
3220 for (i = 0; i < sinfo->num_fun; ++i)
3221 if (!root_only || !sinfo->fun[i].non_root)
3222 if (!doit (&sinfo->fun[i], info, param))
3223 return FALSE;
3224 }
3225 }
3226 }
3227 return TRUE;
3228 }
3229
3230 /* Transfer call info attached to struct function_info entries for
3231 all of a given function's sections to the first entry. */
3232
3233 static bfd_boolean
3234 transfer_calls (struct function_info *fun,
3235 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3236 void *param ATTRIBUTE_UNUSED)
3237 {
3238 struct function_info *start = fun->start;
3239
3240 if (start != NULL)
3241 {
3242 struct call_info *call, *call_next;
3243
3244 while (start->start != NULL)
3245 start = start->start;
3246 for (call = fun->call_list; call != NULL; call = call_next)
3247 {
3248 call_next = call->next;
3249 if (!insert_callee (start, call))
3250 free (call);
3251 }
3252 fun->call_list = NULL;
3253 }
3254 return TRUE;
3255 }
3256
3257 /* Mark nodes in the call graph that are called by some other node. */
3258
3259 static bfd_boolean
3260 mark_non_root (struct function_info *fun,
3261 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3262 void *param ATTRIBUTE_UNUSED)
3263 {
3264 struct call_info *call;
3265
3266 if (fun->visit1)
3267 return TRUE;
3268 fun->visit1 = TRUE;
3269 for (call = fun->call_list; call; call = call->next)
3270 {
3271 call->fun->non_root = TRUE;
3272 mark_non_root (call->fun, 0, 0);
3273 }
3274 return TRUE;
3275 }
3276
3277 /* Remove cycles from the call graph. Set depth of nodes. */
3278
3279 static bfd_boolean
3280 remove_cycles (struct function_info *fun,
3281 struct bfd_link_info *info,
3282 void *param)
3283 {
3284 struct call_info **callp, *call;
3285 unsigned int depth = *(unsigned int *) param;
3286 unsigned int max_depth = depth;
3287
3288 fun->depth = depth;
3289 fun->visit2 = TRUE;
3290 fun->marking = TRUE;
3291
3292 callp = &fun->call_list;
3293 while ((call = *callp) != NULL)
3294 {
3295 call->max_depth = depth + !call->is_pasted;
3296 if (!call->fun->visit2)
3297 {
3298 if (!remove_cycles (call->fun, info, &call->max_depth))
3299 return FALSE;
3300 if (max_depth < call->max_depth)
3301 max_depth = call->max_depth;
3302 }
3303 else if (call->fun->marking)
3304 {
3305 struct spu_link_hash_table *htab = spu_hash_table (info);
3306
3307 if (!htab->params->auto_overlay
3308 && htab->params->stack_analysis)
3309 {
3310 const char *f1 = func_name (fun);
3311 const char *f2 = func_name (call->fun);
3312
3313 info->callbacks->info (_("Stack analysis will ignore the call "
3314 "from %s to %s\n"),
3315 f1, f2);
3316 }
3317
3318 call->broken_cycle = TRUE;
3319 }
3320 callp = &call->next;
3321 }
3322 fun->marking = FALSE;
3323 *(unsigned int *) param = max_depth;
3324 return TRUE;
3325 }
3326
3327 /* Check that we actually visited all nodes in remove_cycles. If we
3328 didn't, then there is some cycle in the call graph not attached to
3329 any root node. Arbitrarily choose a node in the cycle as a new
3330 root and break the cycle. */
3331
3332 static bfd_boolean
3333 mark_detached_root (struct function_info *fun,
3334 struct bfd_link_info *info,
3335 void *param)
3336 {
3337 if (fun->visit2)
3338 return TRUE;
3339 fun->non_root = FALSE;
3340 *(unsigned int *) param = 0;
3341 return remove_cycles (fun, info, param);
3342 }
3343
3344 /* Populate call_list for each function. */
3345
3346 static bfd_boolean
3347 build_call_tree (struct bfd_link_info *info)
3348 {
3349 bfd *ibfd;
3350 unsigned int depth;
3351
3352 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3353 {
3354 extern const bfd_target spu_elf32_vec;
3355 asection *sec;
3356
3357 if (ibfd->xvec != &spu_elf32_vec)
3358 continue;
3359
3360 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3361 if (!mark_functions_via_relocs (sec, info, TRUE))
3362 return FALSE;
3363 }
3364
3365 /* Transfer call info from hot/cold section part of function
3366 to main entry. */
3367 if (!spu_hash_table (info)->params->auto_overlay
3368 && !for_each_node (transfer_calls, info, 0, FALSE))
3369 return FALSE;
3370
3371 /* Find the call graph root(s). */
3372 if (!for_each_node (mark_non_root, info, 0, FALSE))
3373 return FALSE;
3374
3375 /* Remove cycles from the call graph. We start from the root node(s)
3376 so that we break cycles in a reasonable place. */
3377 depth = 0;
3378 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3379 return FALSE;
3380
3381 return for_each_node (mark_detached_root, info, &depth, FALSE);
3382 }
3383
3384 /* qsort predicate to sort calls by priority, max_depth then count. */
3385
3386 static int
3387 sort_calls (const void *a, const void *b)
3388 {
3389 struct call_info *const *c1 = a;
3390 struct call_info *const *c2 = b;
3391 int delta;
3392
3393 delta = (*c2)->priority - (*c1)->priority;
3394 if (delta != 0)
3395 return delta;
3396
3397 delta = (*c2)->max_depth - (*c1)->max_depth;
3398 if (delta != 0)
3399 return delta;
3400
3401 delta = (*c2)->count - (*c1)->count;
3402 if (delta != 0)
3403 return delta;
3404
3405 return (char *) c1 - (char *) c2;
3406 }
3407
3408 struct _mos_param {
3409 unsigned int max_overlay_size;
3410 };
3411
3412 /* Set linker_mark and gc_mark on any sections that we will put in
3413 overlays. These flags are used by the generic ELF linker, but we
3414 won't be continuing on to bfd_elf_final_link so it is OK to use
3415 them. linker_mark is clear before we get here. Set segment_mark
3416 on sections that are part of a pasted function (excluding the last
3417 section).
3418
3419 Set up function rodata section if --overlay-rodata. We don't
3420 currently include merged string constant rodata sections since
3421
3422 Sort the call graph so that the deepest nodes will be visited
3423 first. */
3424
3425 static bfd_boolean
3426 mark_overlay_section (struct function_info *fun,
3427 struct bfd_link_info *info,
3428 void *param)
3429 {
3430 struct call_info *call;
3431 unsigned int count;
3432 struct _mos_param *mos_param = param;
3433 struct spu_link_hash_table *htab = spu_hash_table (info);
3434
3435 if (fun->visit4)
3436 return TRUE;
3437
3438 fun->visit4 = TRUE;
3439 if (!fun->sec->linker_mark
3440 && (htab->params->ovly_flavour != ovly_soft_icache
3441 || htab->params->non_ia_text
3442 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3443 || strcmp (fun->sec->name, ".init") == 0
3444 || strcmp (fun->sec->name, ".fini") == 0))
3445 {
3446 unsigned int size;
3447
3448 fun->sec->linker_mark = 1;
3449 fun->sec->gc_mark = 1;
3450 fun->sec->segment_mark = 0;
3451 /* Ensure SEC_CODE is set on this text section (it ought to
3452 be!), and SEC_CODE is clear on rodata sections. We use
3453 this flag to differentiate the two overlay section types. */
3454 fun->sec->flags |= SEC_CODE;
3455
3456 size = fun->sec->size;
3457 if (htab->params->auto_overlay & OVERLAY_RODATA)
3458 {
3459 char *name = NULL;
3460
3461 /* Find the rodata section corresponding to this function's
3462 text section. */
3463 if (strcmp (fun->sec->name, ".text") == 0)
3464 {
3465 name = bfd_malloc (sizeof (".rodata"));
3466 if (name == NULL)
3467 return FALSE;
3468 memcpy (name, ".rodata", sizeof (".rodata"));
3469 }
3470 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3471 {
3472 size_t len = strlen (fun->sec->name);
3473 name = bfd_malloc (len + 3);
3474 if (name == NULL)
3475 return FALSE;
3476 memcpy (name, ".rodata", sizeof (".rodata"));
3477 memcpy (name + 7, fun->sec->name + 5, len - 4);
3478 }
3479 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3480 {
3481 size_t len = strlen (fun->sec->name) + 1;
3482 name = bfd_malloc (len);
3483 if (name == NULL)
3484 return FALSE;
3485 memcpy (name, fun->sec->name, len);
3486 name[14] = 'r';
3487 }
3488
3489 if (name != NULL)
3490 {
3491 asection *rodata = NULL;
3492 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3493 if (group_sec == NULL)
3494 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3495 else
3496 while (group_sec != NULL && group_sec != fun->sec)
3497 {
3498 if (strcmp (group_sec->name, name) == 0)
3499 {
3500 rodata = group_sec;
3501 break;
3502 }
3503 group_sec = elf_section_data (group_sec)->next_in_group;
3504 }
3505 fun->rodata = rodata;
3506 if (fun->rodata)
3507 {
3508 size += fun->rodata->size;
3509 if (htab->params->line_size != 0
3510 && size > htab->params->line_size)
3511 {
3512 size -= fun->rodata->size;
3513 fun->rodata = NULL;
3514 }
3515 else
3516 {
3517 fun->rodata->linker_mark = 1;
3518 fun->rodata->gc_mark = 1;
3519 fun->rodata->flags &= ~SEC_CODE;
3520 }
3521 }
3522 free (name);
3523 }
3524 }
3525 if (mos_param->max_overlay_size < size)
3526 mos_param->max_overlay_size = size;
3527 }
3528
3529 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3530 count += 1;
3531
3532 if (count > 1)
3533 {
3534 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3535 if (calls == NULL)
3536 return FALSE;
3537
3538 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3539 calls[count++] = call;
3540
3541 qsort (calls, count, sizeof (*calls), sort_calls);
3542
3543 fun->call_list = NULL;
3544 while (count != 0)
3545 {
3546 --count;
3547 calls[count]->next = fun->call_list;
3548 fun->call_list = calls[count];
3549 }
3550 free (calls);
3551 }
3552
3553 for (call = fun->call_list; call != NULL; call = call->next)
3554 {
3555 if (call->is_pasted)
3556 {
3557 /* There can only be one is_pasted call per function_info. */
3558 BFD_ASSERT (!fun->sec->segment_mark);
3559 fun->sec->segment_mark = 1;
3560 }
3561 if (!call->broken_cycle
3562 && !mark_overlay_section (call->fun, info, param))
3563 return FALSE;
3564 }
3565
3566 /* Don't put entry code into an overlay. The overlay manager needs
3567 a stack! Also, don't mark .ovl.init as an overlay. */
3568 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3569 == info->output_bfd->start_address
3570 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3571 {
3572 fun->sec->linker_mark = 0;
3573 if (fun->rodata != NULL)
3574 fun->rodata->linker_mark = 0;
3575 }
3576 return TRUE;
3577 }
3578
3579 /* If non-zero then unmark functions called from those within sections
3580 that we need to unmark. Unfortunately this isn't reliable since the
3581 call graph cannot know the destination of function pointer calls. */
3582 #define RECURSE_UNMARK 0
3583
3584 struct _uos_param {
3585 asection *exclude_input_section;
3586 asection *exclude_output_section;
3587 unsigned long clearing;
3588 };
3589
3590 /* Undo some of mark_overlay_section's work. */
3591
3592 static bfd_boolean
3593 unmark_overlay_section (struct function_info *fun,
3594 struct bfd_link_info *info,
3595 void *param)
3596 {
3597 struct call_info *call;
3598 struct _uos_param *uos_param = param;
3599 unsigned int excluded = 0;
3600
3601 if (fun->visit5)
3602 return TRUE;
3603
3604 fun->visit5 = TRUE;
3605
3606 excluded = 0;
3607 if (fun->sec == uos_param->exclude_input_section
3608 || fun->sec->output_section == uos_param->exclude_output_section)
3609 excluded = 1;
3610
3611 if (RECURSE_UNMARK)
3612 uos_param->clearing += excluded;
3613
3614 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3615 {
3616 fun->sec->linker_mark = 0;
3617 if (fun->rodata)
3618 fun->rodata->linker_mark = 0;
3619 }
3620
3621 for (call = fun->call_list; call != NULL; call = call->next)
3622 if (!call->broken_cycle
3623 && !unmark_overlay_section (call->fun, info, param))
3624 return FALSE;
3625
3626 if (RECURSE_UNMARK)
3627 uos_param->clearing -= excluded;
3628 return TRUE;
3629 }
3630
3631 struct _cl_param {
3632 unsigned int lib_size;
3633 asection **lib_sections;
3634 };
3635
3636 /* Add sections we have marked as belonging to overlays to an array
3637 for consideration as non-overlay sections. The array consist of
3638 pairs of sections, (text,rodata), for functions in the call graph. */
3639
3640 static bfd_boolean
3641 collect_lib_sections (struct function_info *fun,
3642 struct bfd_link_info *info,
3643 void *param)
3644 {
3645 struct _cl_param *lib_param = param;
3646 struct call_info *call;
3647 unsigned int size;
3648
3649 if (fun->visit6)
3650 return TRUE;
3651
3652 fun->visit6 = TRUE;
3653 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3654 return TRUE;
3655
3656 size = fun->sec->size;
3657 if (fun->rodata)
3658 size += fun->rodata->size;
3659
3660 if (size <= lib_param->lib_size)
3661 {
3662 *lib_param->lib_sections++ = fun->sec;
3663 fun->sec->gc_mark = 0;
3664 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3665 {
3666 *lib_param->lib_sections++ = fun->rodata;
3667 fun->rodata->gc_mark = 0;
3668 }
3669 else
3670 *lib_param->lib_sections++ = NULL;
3671 }
3672
3673 for (call = fun->call_list; call != NULL; call = call->next)
3674 if (!call->broken_cycle)
3675 collect_lib_sections (call->fun, info, param);
3676
3677 return TRUE;
3678 }
3679
3680 /* qsort predicate to sort sections by call count. */
3681
3682 static int
3683 sort_lib (const void *a, const void *b)
3684 {
3685 asection *const *s1 = a;
3686 asection *const *s2 = b;
3687 struct _spu_elf_section_data *sec_data;
3688 struct spu_elf_stack_info *sinfo;
3689 int delta;
3690
3691 delta = 0;
3692 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3693 && (sinfo = sec_data->u.i.stack_info) != NULL)
3694 {
3695 int i;
3696 for (i = 0; i < sinfo->num_fun; ++i)
3697 delta -= sinfo->fun[i].call_count;
3698 }
3699
3700 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3701 && (sinfo = sec_data->u.i.stack_info) != NULL)
3702 {
3703 int i;
3704 for (i = 0; i < sinfo->num_fun; ++i)
3705 delta += sinfo->fun[i].call_count;
3706 }
3707
3708 if (delta != 0)
3709 return delta;
3710
3711 return s1 - s2;
3712 }
3713
3714 /* Remove some sections from those marked to be in overlays. Choose
3715 those that are called from many places, likely library functions. */
3716
3717 static unsigned int
3718 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3719 {
3720 bfd *ibfd;
3721 asection **lib_sections;
3722 unsigned int i, lib_count;
3723 struct _cl_param collect_lib_param;
3724 struct function_info dummy_caller;
3725 struct spu_link_hash_table *htab;
3726
3727 memset (&dummy_caller, 0, sizeof (dummy_caller));
3728 lib_count = 0;
3729 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3730 {
3731 extern const bfd_target spu_elf32_vec;
3732 asection *sec;
3733
3734 if (ibfd->xvec != &spu_elf32_vec)
3735 continue;
3736
3737 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3738 if (sec->linker_mark
3739 && sec->size < lib_size
3740 && (sec->flags & SEC_CODE) != 0)
3741 lib_count += 1;
3742 }
3743 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3744 if (lib_sections == NULL)
3745 return (unsigned int) -1;
3746 collect_lib_param.lib_size = lib_size;
3747 collect_lib_param.lib_sections = lib_sections;
3748 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3749 TRUE))
3750 return (unsigned int) -1;
3751 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3752
3753 /* Sort sections so that those with the most calls are first. */
3754 if (lib_count > 1)
3755 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3756
3757 htab = spu_hash_table (info);
3758 for (i = 0; i < lib_count; i++)
3759 {
3760 unsigned int tmp, stub_size;
3761 asection *sec;
3762 struct _spu_elf_section_data *sec_data;
3763 struct spu_elf_stack_info *sinfo;
3764
3765 sec = lib_sections[2 * i];
3766 /* If this section is OK, its size must be less than lib_size. */
3767 tmp = sec->size;
3768 /* If it has a rodata section, then add that too. */
3769 if (lib_sections[2 * i + 1])
3770 tmp += lib_sections[2 * i + 1]->size;
3771 /* Add any new overlay call stubs needed by the section. */
3772 stub_size = 0;
3773 if (tmp < lib_size
3774 && (sec_data = spu_elf_section_data (sec)) != NULL
3775 && (sinfo = sec_data->u.i.stack_info) != NULL)
3776 {
3777 int k;
3778 struct call_info *call;
3779
3780 for (k = 0; k < sinfo->num_fun; ++k)
3781 for (call = sinfo->fun[k].call_list; call; call = call->next)
3782 if (call->fun->sec->linker_mark)
3783 {
3784 struct call_info *p;
3785 for (p = dummy_caller.call_list; p; p = p->next)
3786 if (p->fun == call->fun)
3787 break;
3788 if (!p)
3789 stub_size += ovl_stub_size (htab->params);
3790 }
3791 }
3792 if (tmp + stub_size < lib_size)
3793 {
3794 struct call_info **pp, *p;
3795
3796 /* This section fits. Mark it as non-overlay. */
3797 lib_sections[2 * i]->linker_mark = 0;
3798 if (lib_sections[2 * i + 1])
3799 lib_sections[2 * i + 1]->linker_mark = 0;
3800 lib_size -= tmp + stub_size;
3801 /* Call stubs to the section we just added are no longer
3802 needed. */
3803 pp = &dummy_caller.call_list;
3804 while ((p = *pp) != NULL)
3805 if (!p->fun->sec->linker_mark)
3806 {
3807 lib_size += ovl_stub_size (htab->params);
3808 *pp = p->next;
3809 free (p);
3810 }
3811 else
3812 pp = &p->next;
3813 /* Add new call stubs to dummy_caller. */
3814 if ((sec_data = spu_elf_section_data (sec)) != NULL
3815 && (sinfo = sec_data->u.i.stack_info) != NULL)
3816 {
3817 int k;
3818 struct call_info *call;
3819
3820 for (k = 0; k < sinfo->num_fun; ++k)
3821 for (call = sinfo->fun[k].call_list;
3822 call;
3823 call = call->next)
3824 if (call->fun->sec->linker_mark)
3825 {
3826 struct call_info *callee;
3827 callee = bfd_malloc (sizeof (*callee));
3828 if (callee == NULL)
3829 return (unsigned int) -1;
3830 *callee = *call;
3831 if (!insert_callee (&dummy_caller, callee))
3832 free (callee);
3833 }
3834 }
3835 }
3836 }
3837 while (dummy_caller.call_list != NULL)
3838 {
3839 struct call_info *call = dummy_caller.call_list;
3840 dummy_caller.call_list = call->next;
3841 free (call);
3842 }
3843 for (i = 0; i < 2 * lib_count; i++)
3844 if (lib_sections[i])
3845 lib_sections[i]->gc_mark = 1;
3846 free (lib_sections);
3847 return lib_size;
3848 }
3849
3850 /* Build an array of overlay sections. The deepest node's section is
3851 added first, then its parent node's section, then everything called
3852 from the parent section. The idea being to group sections to
3853 minimise calls between different overlays. */
3854
3855 static bfd_boolean
3856 collect_overlays (struct function_info *fun,
3857 struct bfd_link_info *info,
3858 void *param)
3859 {
3860 struct call_info *call;
3861 bfd_boolean added_fun;
3862 asection ***ovly_sections = param;
3863
3864 if (fun->visit7)
3865 return TRUE;
3866
3867 fun->visit7 = TRUE;
3868 for (call = fun->call_list; call != NULL; call = call->next)
3869 if (!call->is_pasted && !call->broken_cycle)
3870 {
3871 if (!collect_overlays (call->fun, info, ovly_sections))
3872 return FALSE;
3873 break;
3874 }
3875
3876 added_fun = FALSE;
3877 if (fun->sec->linker_mark && fun->sec->gc_mark)
3878 {
3879 fun->sec->gc_mark = 0;
3880 *(*ovly_sections)++ = fun->sec;
3881 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3882 {
3883 fun->rodata->gc_mark = 0;
3884 *(*ovly_sections)++ = fun->rodata;
3885 }
3886 else
3887 *(*ovly_sections)++ = NULL;
3888 added_fun = TRUE;
3889
3890 /* Pasted sections must stay with the first section. We don't
3891 put pasted sections in the array, just the first section.
3892 Mark subsequent sections as already considered. */
3893 if (fun->sec->segment_mark)
3894 {
3895 struct function_info *call_fun = fun;
3896 do
3897 {
3898 for (call = call_fun->call_list; call != NULL; call = call->next)
3899 if (call->is_pasted)
3900 {
3901 call_fun = call->fun;
3902 call_fun->sec->gc_mark = 0;
3903 if (call_fun->rodata)
3904 call_fun->rodata->gc_mark = 0;
3905 break;
3906 }
3907 if (call == NULL)
3908 abort ();
3909 }
3910 while (call_fun->sec->segment_mark);
3911 }
3912 }
3913
3914 for (call = fun->call_list; call != NULL; call = call->next)
3915 if (!call->broken_cycle
3916 && !collect_overlays (call->fun, info, ovly_sections))
3917 return FALSE;
3918
3919 if (added_fun)
3920 {
3921 struct _spu_elf_section_data *sec_data;
3922 struct spu_elf_stack_info *sinfo;
3923
3924 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3925 && (sinfo = sec_data->u.i.stack_info) != NULL)
3926 {
3927 int i;
3928 for (i = 0; i < sinfo->num_fun; ++i)
3929 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3930 return FALSE;
3931 }
3932 }
3933
3934 return TRUE;
3935 }
3936
3937 struct _sum_stack_param {
3938 size_t cum_stack;
3939 size_t overall_stack;
3940 bfd_boolean emit_stack_syms;
3941 };
3942
3943 /* Descend the call graph for FUN, accumulating total stack required. */
3944
3945 static bfd_boolean
3946 sum_stack (struct function_info *fun,
3947 struct bfd_link_info *info,
3948 void *param)
3949 {
3950 struct call_info *call;
3951 struct function_info *max;
3952 size_t stack, cum_stack;
3953 const char *f1;
3954 bfd_boolean has_call;
3955 struct _sum_stack_param *sum_stack_param = param;
3956 struct spu_link_hash_table *htab;
3957
3958 cum_stack = fun->stack;
3959 sum_stack_param->cum_stack = cum_stack;
3960 if (fun->visit3)
3961 return TRUE;
3962
3963 has_call = FALSE;
3964 max = NULL;
3965 for (call = fun->call_list; call; call = call->next)
3966 {
3967 if (call->broken_cycle)
3968 continue;
3969 if (!call->is_pasted)
3970 has_call = TRUE;
3971 if (!sum_stack (call->fun, info, sum_stack_param))
3972 return FALSE;
3973 stack = sum_stack_param->cum_stack;
3974 /* Include caller stack for normal calls, don't do so for
3975 tail calls. fun->stack here is local stack usage for
3976 this function. */
3977 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3978 stack += fun->stack;
3979 if (cum_stack < stack)
3980 {
3981 cum_stack = stack;
3982 max = call->fun;
3983 }
3984 }
3985
3986 sum_stack_param->cum_stack = cum_stack;
3987 stack = fun->stack;
3988 /* Now fun->stack holds cumulative stack. */
3989 fun->stack = cum_stack;
3990 fun->visit3 = TRUE;
3991
3992 if (!fun->non_root
3993 && sum_stack_param->overall_stack < cum_stack)
3994 sum_stack_param->overall_stack = cum_stack;
3995
3996 htab = spu_hash_table (info);
3997 if (htab->params->auto_overlay)
3998 return TRUE;
3999
4000 f1 = func_name (fun);
4001 if (htab->params->stack_analysis)
4002 {
4003 if (!fun->non_root)
4004 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
4005 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
4006 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4007
4008 if (has_call)
4009 {
4010 info->callbacks->minfo (_(" calls:\n"));
4011 for (call = fun->call_list; call; call = call->next)
4012 if (!call->is_pasted && !call->broken_cycle)
4013 {
4014 const char *f2 = func_name (call->fun);
4015 const char *ann1 = call->fun == max ? "*" : " ";
4016 const char *ann2 = call->is_tail ? "t" : " ";
4017
4018 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4019 }
4020 }
4021 }
4022
4023 if (sum_stack_param->emit_stack_syms)
4024 {
4025 char *name = bfd_malloc (18 + strlen (f1));
4026 struct elf_link_hash_entry *h;
4027
4028 if (name == NULL)
4029 return FALSE;
4030
4031 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4032 sprintf (name, "__stack_%s", f1);
4033 else
4034 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4035
4036 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4037 free (name);
4038 if (h != NULL
4039 && (h->root.type == bfd_link_hash_new
4040 || h->root.type == bfd_link_hash_undefined
4041 || h->root.type == bfd_link_hash_undefweak))
4042 {
4043 h->root.type = bfd_link_hash_defined;
4044 h->root.u.def.section = bfd_abs_section_ptr;
4045 h->root.u.def.value = cum_stack;
4046 h->size = 0;
4047 h->type = 0;
4048 h->ref_regular = 1;
4049 h->def_regular = 1;
4050 h->ref_regular_nonweak = 1;
4051 h->forced_local = 1;
4052 h->non_elf = 0;
4053 }
4054 }
4055
4056 return TRUE;
4057 }
4058
4059 /* SEC is part of a pasted function. Return the call_info for the
4060 next section of this function. */
4061
4062 static struct call_info *
4063 find_pasted_call (asection *sec)
4064 {
4065 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4066 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4067 struct call_info *call;
4068 int k;
4069
4070 for (k = 0; k < sinfo->num_fun; ++k)
4071 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4072 if (call->is_pasted)
4073 return call;
4074 abort ();
4075 return 0;
4076 }
4077
4078 /* qsort predicate to sort bfds by file name. */
4079
4080 static int
4081 sort_bfds (const void *a, const void *b)
4082 {
4083 bfd *const *abfd1 = a;
4084 bfd *const *abfd2 = b;
4085
4086 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4087 }
4088
4089 static unsigned int
4090 print_one_overlay_section (FILE *script,
4091 unsigned int base,
4092 unsigned int count,
4093 unsigned int ovlynum,
4094 unsigned int *ovly_map,
4095 asection **ovly_sections,
4096 struct bfd_link_info *info)
4097 {
4098 unsigned int j;
4099
4100 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4101 {
4102 asection *sec = ovly_sections[2 * j];
4103
4104 if (fprintf (script, " %s%c%s (%s)\n",
4105 (sec->owner->my_archive != NULL
4106 ? sec->owner->my_archive->filename : ""),
4107 info->path_separator,
4108 sec->owner->filename,
4109 sec->name) <= 0)
4110 return -1;
4111 if (sec->segment_mark)
4112 {
4113 struct call_info *call = find_pasted_call (sec);
4114 while (call != NULL)
4115 {
4116 struct function_info *call_fun = call->fun;
4117 sec = call_fun->sec;
4118 if (fprintf (script, " %s%c%s (%s)\n",
4119 (sec->owner->my_archive != NULL
4120 ? sec->owner->my_archive->filename : ""),
4121 info->path_separator,
4122 sec->owner->filename,
4123 sec->name) <= 0)
4124 return -1;
4125 for (call = call_fun->call_list; call; call = call->next)
4126 if (call->is_pasted)
4127 break;
4128 }
4129 }
4130 }
4131
4132 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4133 {
4134 asection *sec = ovly_sections[2 * j + 1];
4135 if (sec != NULL
4136 && fprintf (script, " %s%c%s (%s)\n",
4137 (sec->owner->my_archive != NULL
4138 ? sec->owner->my_archive->filename : ""),
4139 info->path_separator,
4140 sec->owner->filename,
4141 sec->name) <= 0)
4142 return -1;
4143
4144 sec = ovly_sections[2 * j];
4145 if (sec->segment_mark)
4146 {
4147 struct call_info *call = find_pasted_call (sec);
4148 while (call != NULL)
4149 {
4150 struct function_info *call_fun = call->fun;
4151 sec = call_fun->rodata;
4152 if (sec != NULL
4153 && fprintf (script, " %s%c%s (%s)\n",
4154 (sec->owner->my_archive != NULL
4155 ? sec->owner->my_archive->filename : ""),
4156 info->path_separator,
4157 sec->owner->filename,
4158 sec->name) <= 0)
4159 return -1;
4160 for (call = call_fun->call_list; call; call = call->next)
4161 if (call->is_pasted)
4162 break;
4163 }
4164 }
4165 }
4166
4167 return j;
4168 }
4169
4170 /* Handle --auto-overlay. */
4171
4172 static void
4173 spu_elf_auto_overlay (struct bfd_link_info *info)
4174 {
4175 bfd *ibfd;
4176 bfd **bfd_arr;
4177 struct elf_segment_map *m;
4178 unsigned int fixed_size, lo, hi;
4179 unsigned int reserved;
4180 struct spu_link_hash_table *htab;
4181 unsigned int base, i, count, bfd_count;
4182 unsigned int region, ovlynum;
4183 asection **ovly_sections, **ovly_p;
4184 unsigned int *ovly_map;
4185 FILE *script;
4186 unsigned int total_overlay_size, overlay_size;
4187 const char *ovly_mgr_entry;
4188 struct elf_link_hash_entry *h;
4189 struct _mos_param mos_param;
4190 struct _uos_param uos_param;
4191 struct function_info dummy_caller;
4192
4193 /* Find the extents of our loadable image. */
4194 lo = (unsigned int) -1;
4195 hi = 0;
4196 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4197 if (m->p_type == PT_LOAD)
4198 for (i = 0; i < m->count; i++)
4199 if (m->sections[i]->size != 0)
4200 {
4201 if (m->sections[i]->vma < lo)
4202 lo = m->sections[i]->vma;
4203 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4204 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4205 }
4206 fixed_size = hi + 1 - lo;
4207
4208 if (!discover_functions (info))
4209 goto err_exit;
4210
4211 if (!build_call_tree (info))
4212 goto err_exit;
4213
4214 htab = spu_hash_table (info);
4215 reserved = htab->params->auto_overlay_reserved;
4216 if (reserved == 0)
4217 {
4218 struct _sum_stack_param sum_stack_param;
4219
4220 sum_stack_param.emit_stack_syms = 0;
4221 sum_stack_param.overall_stack = 0;
4222 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4223 goto err_exit;
4224 reserved = (sum_stack_param.overall_stack
4225 + htab->params->extra_stack_space);
4226 }
4227
4228 /* No need for overlays if everything already fits. */
4229 if (fixed_size + reserved <= htab->local_store
4230 && htab->params->ovly_flavour != ovly_soft_icache)
4231 {
4232 htab->params->auto_overlay = 0;
4233 return;
4234 }
4235
4236 uos_param.exclude_input_section = 0;
4237 uos_param.exclude_output_section
4238 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4239
4240 ovly_mgr_entry = "__ovly_load";
4241 if (htab->params->ovly_flavour == ovly_soft_icache)
4242 ovly_mgr_entry = "__icache_br_handler";
4243 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4244 FALSE, FALSE, FALSE);
4245 if (h != NULL
4246 && (h->root.type == bfd_link_hash_defined
4247 || h->root.type == bfd_link_hash_defweak)
4248 && h->def_regular)
4249 {
4250 /* We have a user supplied overlay manager. */
4251 uos_param.exclude_input_section = h->root.u.def.section;
4252 }
4253 else
4254 {
4255 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4256 builtin version to .text, and will adjust .text size. */
4257 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4258 }
4259
4260 /* Mark overlay sections, and find max overlay section size. */
4261 mos_param.max_overlay_size = 0;
4262 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4263 goto err_exit;
4264
4265 /* We can't put the overlay manager or interrupt routines in
4266 overlays. */
4267 uos_param.clearing = 0;
4268 if ((uos_param.exclude_input_section
4269 || uos_param.exclude_output_section)
4270 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4271 goto err_exit;
4272
4273 bfd_count = 0;
4274 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4275 ++bfd_count;
4276 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4277 if (bfd_arr == NULL)
4278 goto err_exit;
4279
4280 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4281 count = 0;
4282 bfd_count = 0;
4283 total_overlay_size = 0;
4284 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4285 {
4286 extern const bfd_target spu_elf32_vec;
4287 asection *sec;
4288 unsigned int old_count;
4289
4290 if (ibfd->xvec != &spu_elf32_vec)
4291 continue;
4292
4293 old_count = count;
4294 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4295 if (sec->linker_mark)
4296 {
4297 if ((sec->flags & SEC_CODE) != 0)
4298 count += 1;
4299 fixed_size -= sec->size;
4300 total_overlay_size += sec->size;
4301 }
4302 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4303 && sec->output_section->owner == info->output_bfd
4304 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4305 fixed_size -= sec->size;
4306 if (count != old_count)
4307 bfd_arr[bfd_count++] = ibfd;
4308 }
4309
4310 /* Since the overlay link script selects sections by file name and
4311 section name, ensure that file names are unique. */
4312 if (bfd_count > 1)
4313 {
4314 bfd_boolean ok = TRUE;
4315
4316 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4317 for (i = 1; i < bfd_count; ++i)
4318 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4319 {
4320 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4321 {
4322 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4323 info->callbacks->einfo (_("%s duplicated in %s\n"),
4324 bfd_arr[i]->filename,
4325 bfd_arr[i]->my_archive->filename);
4326 else
4327 info->callbacks->einfo (_("%s duplicated\n"),
4328 bfd_arr[i]->filename);
4329 ok = FALSE;
4330 }
4331 }
4332 if (!ok)
4333 {
4334 info->callbacks->einfo (_("sorry, no support for duplicate "
4335 "object files in auto-overlay script\n"));
4336 bfd_set_error (bfd_error_bad_value);
4337 goto err_exit;
4338 }
4339 }
4340 free (bfd_arr);
4341
4342 fixed_size += reserved;
4343 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4344 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4345 {
4346 if (htab->params->ovly_flavour == ovly_soft_icache)
4347 {
4348 /* Stubs in the non-icache area are bigger. */
4349 fixed_size += htab->non_ovly_stub * 16;
4350 /* Space for icache manager tables.
4351 a) Tag array, one quadword per cache line.
4352 - word 0: ia address of present line, init to zero. */
4353 fixed_size += 16 << htab->num_lines_log2;
4354 /* b) Rewrite "to" list, one quadword per cache line. */
4355 fixed_size += 16 << htab->num_lines_log2;
4356 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4357 to a power-of-two number of full quadwords) per cache line. */
4358 fixed_size += 16 << (htab->fromelem_size_log2
4359 + htab->num_lines_log2);
4360 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4361 fixed_size += 16;
4362 }
4363 else
4364 {
4365 /* Guess number of overlays. Assuming overlay buffer is on
4366 average only half full should be conservative. */
4367 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4368 / (htab->local_store - fixed_size));
4369 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4370 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4371 }
4372 }
4373
4374 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4375 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4376 "size of 0x%v exceeds local store\n"),
4377 (bfd_vma) fixed_size,
4378 (bfd_vma) mos_param.max_overlay_size);
4379
4380 /* Now see if we should put some functions in the non-overlay area. */
4381 else if (fixed_size < htab->params->auto_overlay_fixed)
4382 {
4383 unsigned int max_fixed, lib_size;
4384
4385 max_fixed = htab->local_store - mos_param.max_overlay_size;
4386 if (max_fixed > htab->params->auto_overlay_fixed)
4387 max_fixed = htab->params->auto_overlay_fixed;
4388 lib_size = max_fixed - fixed_size;
4389 lib_size = auto_ovl_lib_functions (info, lib_size);
4390 if (lib_size == (unsigned int) -1)
4391 goto err_exit;
4392 fixed_size = max_fixed - lib_size;
4393 }
4394
4395 /* Build an array of sections, suitably sorted to place into
4396 overlays. */
4397 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4398 if (ovly_sections == NULL)
4399 goto err_exit;
4400 ovly_p = ovly_sections;
4401 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4402 goto err_exit;
4403 count = (size_t) (ovly_p - ovly_sections) / 2;
4404 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4405 if (ovly_map == NULL)
4406 goto err_exit;
4407
4408 memset (&dummy_caller, 0, sizeof (dummy_caller));
4409 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4410 if (htab->params->line_size != 0)
4411 overlay_size = htab->params->line_size;
4412 base = 0;
4413 ovlynum = 0;
4414 while (base < count)
4415 {
4416 unsigned int size = 0, rosize = 0, roalign = 0;
4417
4418 for (i = base; i < count; i++)
4419 {
4420 asection *sec, *rosec;
4421 unsigned int tmp, rotmp;
4422 unsigned int num_stubs;
4423 struct call_info *call, *pasty;
4424 struct _spu_elf_section_data *sec_data;
4425 struct spu_elf_stack_info *sinfo;
4426 unsigned int k;
4427
4428 /* See whether we can add this section to the current
4429 overlay without overflowing our overlay buffer. */
4430 sec = ovly_sections[2 * i];
4431 tmp = align_power (size, sec->alignment_power) + sec->size;
4432 rotmp = rosize;
4433 rosec = ovly_sections[2 * i + 1];
4434 if (rosec != NULL)
4435 {
4436 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4437 if (roalign < rosec->alignment_power)
4438 roalign = rosec->alignment_power;
4439 }
4440 if (align_power (tmp, roalign) + rotmp > overlay_size)
4441 break;
4442 if (sec->segment_mark)
4443 {
4444 /* Pasted sections must stay together, so add their
4445 sizes too. */
4446 pasty = find_pasted_call (sec);
4447 while (pasty != NULL)
4448 {
4449 struct function_info *call_fun = pasty->fun;
4450 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4451 + call_fun->sec->size);
4452 if (call_fun->rodata)
4453 {
4454 rotmp = (align_power (rotmp,
4455 call_fun->rodata->alignment_power)
4456 + call_fun->rodata->size);
4457 if (roalign < rosec->alignment_power)
4458 roalign = rosec->alignment_power;
4459 }
4460 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4461 if (pasty->is_pasted)
4462 break;
4463 }
4464 }
4465 if (align_power (tmp, roalign) + rotmp > overlay_size)
4466 break;
4467
4468 /* If we add this section, we might need new overlay call
4469 stubs. Add any overlay section calls to dummy_call. */
4470 pasty = NULL;
4471 sec_data = spu_elf_section_data (sec);
4472 sinfo = sec_data->u.i.stack_info;
4473 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4474 for (call = sinfo->fun[k].call_list; call; call = call->next)
4475 if (call->is_pasted)
4476 {
4477 BFD_ASSERT (pasty == NULL);
4478 pasty = call;
4479 }
4480 else if (call->fun->sec->linker_mark)
4481 {
4482 if (!copy_callee (&dummy_caller, call))
4483 goto err_exit;
4484 }
4485 while (pasty != NULL)
4486 {
4487 struct function_info *call_fun = pasty->fun;
4488 pasty = NULL;
4489 for (call = call_fun->call_list; call; call = call->next)
4490 if (call->is_pasted)
4491 {
4492 BFD_ASSERT (pasty == NULL);
4493 pasty = call;
4494 }
4495 else if (!copy_callee (&dummy_caller, call))
4496 goto err_exit;
4497 }
4498
4499 /* Calculate call stub size. */
4500 num_stubs = 0;
4501 for (call = dummy_caller.call_list; call; call = call->next)
4502 {
4503 unsigned int stub_delta = 1;
4504
4505 if (htab->params->ovly_flavour == ovly_soft_icache)
4506 stub_delta = call->count;
4507 num_stubs += stub_delta;
4508
4509 /* If the call is within this overlay, we won't need a
4510 stub. */
4511 for (k = base; k < i + 1; k++)
4512 if (call->fun->sec == ovly_sections[2 * k])
4513 {
4514 num_stubs -= stub_delta;
4515 break;
4516 }
4517 }
4518 if (htab->params->ovly_flavour == ovly_soft_icache
4519 && num_stubs > htab->params->max_branch)
4520 break;
4521 if (align_power (tmp, roalign) + rotmp
4522 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4523 break;
4524 size = tmp;
4525 rosize = rotmp;
4526 }
4527
4528 if (i == base)
4529 {
4530 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4531 ovly_sections[2 * i]->owner,
4532 ovly_sections[2 * i],
4533 ovly_sections[2 * i + 1] ? " + rodata" : "");
4534 bfd_set_error (bfd_error_bad_value);
4535 goto err_exit;
4536 }
4537
4538 while (dummy_caller.call_list != NULL)
4539 {
4540 struct call_info *call = dummy_caller.call_list;
4541 dummy_caller.call_list = call->next;
4542 free (call);
4543 }
4544
4545 ++ovlynum;
4546 while (base < i)
4547 ovly_map[base++] = ovlynum;
4548 }
4549
4550 script = htab->params->spu_elf_open_overlay_script ();
4551
4552 if (htab->params->ovly_flavour == ovly_soft_icache)
4553 {
4554 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4555 goto file_err;
4556
4557 if (fprintf (script,
4558 " . = ALIGN (%u);\n"
4559 " .ovl.init : { *(.ovl.init) }\n"
4560 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4561 htab->params->line_size) <= 0)
4562 goto file_err;
4563
4564 base = 0;
4565 ovlynum = 1;
4566 while (base < count)
4567 {
4568 unsigned int indx = ovlynum - 1;
4569 unsigned int vma, lma;
4570
4571 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4572 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4573
4574 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4575 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4576 ovlynum, vma, lma) <= 0)
4577 goto file_err;
4578
4579 base = print_one_overlay_section (script, base, count, ovlynum,
4580 ovly_map, ovly_sections, info);
4581 if (base == (unsigned) -1)
4582 goto file_err;
4583
4584 if (fprintf (script, " }\n") <= 0)
4585 goto file_err;
4586
4587 ovlynum++;
4588 }
4589
4590 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4591 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4592 goto file_err;
4593
4594 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4595 goto file_err;
4596 }
4597 else
4598 {
4599 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4600 goto file_err;
4601
4602 if (fprintf (script,
4603 " . = ALIGN (16);\n"
4604 " .ovl.init : { *(.ovl.init) }\n"
4605 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4606 goto file_err;
4607
4608 for (region = 1; region <= htab->params->num_lines; region++)
4609 {
4610 ovlynum = region;
4611 base = 0;
4612 while (base < count && ovly_map[base] < ovlynum)
4613 base++;
4614
4615 if (base == count)
4616 break;
4617
4618 if (region == 1)
4619 {
4620 /* We need to set lma since we are overlaying .ovl.init. */
4621 if (fprintf (script,
4622 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4623 goto file_err;
4624 }
4625 else
4626 {
4627 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4628 goto file_err;
4629 }
4630
4631 while (base < count)
4632 {
4633 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4634 goto file_err;
4635
4636 base = print_one_overlay_section (script, base, count, ovlynum,
4637 ovly_map, ovly_sections, info);
4638 if (base == (unsigned) -1)
4639 goto file_err;
4640
4641 if (fprintf (script, " }\n") <= 0)
4642 goto file_err;
4643
4644 ovlynum += htab->params->num_lines;
4645 while (base < count && ovly_map[base] < ovlynum)
4646 base++;
4647 }
4648
4649 if (fprintf (script, " }\n") <= 0)
4650 goto file_err;
4651 }
4652
4653 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4654 goto file_err;
4655 }
4656
4657 free (ovly_map);
4658 free (ovly_sections);
4659
4660 if (fclose (script) != 0)
4661 goto file_err;
4662
4663 if (htab->params->auto_overlay & AUTO_RELINK)
4664 (*htab->params->spu_elf_relink) ();
4665
4666 xexit (0);
4667
4668 file_err:
4669 bfd_set_error (bfd_error_system_call);
4670 err_exit:
4671 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4672 xexit (1);
4673 }
4674
4675 /* Provide an estimate of total stack required. */
4676
4677 static bfd_boolean
4678 spu_elf_stack_analysis (struct bfd_link_info *info)
4679 {
4680 struct spu_link_hash_table *htab;
4681 struct _sum_stack_param sum_stack_param;
4682
4683 if (!discover_functions (info))
4684 return FALSE;
4685
4686 if (!build_call_tree (info))
4687 return FALSE;
4688
4689 htab = spu_hash_table (info);
4690 if (htab->params->stack_analysis)
4691 {
4692 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4693 info->callbacks->minfo (_("\nStack size for functions. "
4694 "Annotations: '*' max stack, 't' tail call\n"));
4695 }
4696
4697 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4698 sum_stack_param.overall_stack = 0;
4699 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4700 return FALSE;
4701
4702 if (htab->params->stack_analysis)
4703 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4704 (bfd_vma) sum_stack_param.overall_stack);
4705 return TRUE;
4706 }
4707
4708 /* Perform a final link. */
4709
4710 static bfd_boolean
4711 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4712 {
4713 struct spu_link_hash_table *htab = spu_hash_table (info);
4714
4715 if (htab->params->auto_overlay)
4716 spu_elf_auto_overlay (info);
4717
4718 if ((htab->params->stack_analysis
4719 || (htab->params->ovly_flavour == ovly_soft_icache
4720 && htab->params->lrlive_analysis))
4721 && !spu_elf_stack_analysis (info))
4722 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4723
4724 if (!spu_elf_build_stubs (info))
4725 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4726
4727 return bfd_elf_final_link (output_bfd, info);
4728 }
4729
4730 /* Called when not normally emitting relocs, ie. !info->relocatable
4731 and !info->emitrelocations. Returns a count of special relocs
4732 that need to be emitted. */
4733
4734 static unsigned int
4735 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4736 {
4737 Elf_Internal_Rela *relocs;
4738 unsigned int count = 0;
4739
4740 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4741 info->keep_memory);
4742 if (relocs != NULL)
4743 {
4744 Elf_Internal_Rela *rel;
4745 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4746
4747 for (rel = relocs; rel < relend; rel++)
4748 {
4749 int r_type = ELF32_R_TYPE (rel->r_info);
4750 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4751 ++count;
4752 }
4753
4754 if (elf_section_data (sec)->relocs != relocs)
4755 free (relocs);
4756 }
4757
4758 return count;
4759 }
4760
4761 /* Functions for adding fixup records to .fixup */
4762
4763 #define FIXUP_RECORD_SIZE 4
4764
4765 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4766 bfd_put_32 (output_bfd, addr, \
4767 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4768 #define FIXUP_GET(output_bfd,htab,index) \
4769 bfd_get_32 (output_bfd, \
4770 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4771
4772 /* Store OFFSET in .fixup. This assumes it will be called with an
4773 increasing OFFSET. When this OFFSET fits with the last base offset,
4774 it just sets a bit, otherwise it adds a new fixup record. */
4775 static void
4776 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4777 bfd_vma offset)
4778 {
4779 struct spu_link_hash_table *htab = spu_hash_table (info);
4780 asection *sfixup = htab->sfixup;
4781 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4782 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4783 if (sfixup->reloc_count == 0)
4784 {
4785 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4786 sfixup->reloc_count++;
4787 }
4788 else
4789 {
4790 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4791 if (qaddr != (base & ~(bfd_vma) 15))
4792 {
4793 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4794 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4795 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4796 sfixup->reloc_count++;
4797 }
4798 else
4799 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4800 }
4801 }
4802
4803 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4804
4805 static int
4806 spu_elf_relocate_section (bfd *output_bfd,
4807 struct bfd_link_info *info,
4808 bfd *input_bfd,
4809 asection *input_section,
4810 bfd_byte *contents,
4811 Elf_Internal_Rela *relocs,
4812 Elf_Internal_Sym *local_syms,
4813 asection **local_sections)
4814 {
4815 Elf_Internal_Shdr *symtab_hdr;
4816 struct elf_link_hash_entry **sym_hashes;
4817 Elf_Internal_Rela *rel, *relend;
4818 struct spu_link_hash_table *htab;
4819 asection *ea;
4820 int ret = TRUE;
4821 bfd_boolean emit_these_relocs = FALSE;
4822 bfd_boolean is_ea_sym;
4823 bfd_boolean stubs;
4824 unsigned int iovl = 0;
4825
4826 htab = spu_hash_table (info);
4827 stubs = (htab->stub_sec != NULL
4828 && maybe_needs_stubs (input_section));
4829 iovl = overlay_index (input_section);
4830 ea = bfd_get_section_by_name (output_bfd, "._ea");
4831 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4832 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4833
4834 rel = relocs;
4835 relend = relocs + input_section->reloc_count;
4836 for (; rel < relend; rel++)
4837 {
4838 int r_type;
4839 reloc_howto_type *howto;
4840 unsigned int r_symndx;
4841 Elf_Internal_Sym *sym;
4842 asection *sec;
4843 struct elf_link_hash_entry *h;
4844 const char *sym_name;
4845 bfd_vma relocation;
4846 bfd_vma addend;
4847 bfd_reloc_status_type r;
4848 bfd_boolean unresolved_reloc;
4849 enum _stub_type stub_type;
4850
4851 r_symndx = ELF32_R_SYM (rel->r_info);
4852 r_type = ELF32_R_TYPE (rel->r_info);
4853 howto = elf_howto_table + r_type;
4854 unresolved_reloc = FALSE;
4855 h = NULL;
4856 sym = NULL;
4857 sec = NULL;
4858 if (r_symndx < symtab_hdr->sh_info)
4859 {
4860 sym = local_syms + r_symndx;
4861 sec = local_sections[r_symndx];
4862 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4863 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4864 }
4865 else
4866 {
4867 if (sym_hashes == NULL)
4868 return FALSE;
4869
4870 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4871
4872 if (info->wrap_hash != NULL
4873 && (input_section->flags & SEC_DEBUGGING) != 0)
4874 h = ((struct elf_link_hash_entry *)
4875 unwrap_hash_lookup (info, input_bfd, &h->root));
4876
4877 while (h->root.type == bfd_link_hash_indirect
4878 || h->root.type == bfd_link_hash_warning)
4879 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4880
4881 relocation = 0;
4882 if (h->root.type == bfd_link_hash_defined
4883 || h->root.type == bfd_link_hash_defweak)
4884 {
4885 sec = h->root.u.def.section;
4886 if (sec == NULL
4887 || sec->output_section == NULL)
4888 /* Set a flag that will be cleared later if we find a
4889 relocation value for this symbol. output_section
4890 is typically NULL for symbols satisfied by a shared
4891 library. */
4892 unresolved_reloc = TRUE;
4893 else
4894 relocation = (h->root.u.def.value
4895 + sec->output_section->vma
4896 + sec->output_offset);
4897 }
4898 else if (h->root.type == bfd_link_hash_undefweak)
4899 ;
4900 else if (info->unresolved_syms_in_objects == RM_IGNORE
4901 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4902 ;
4903 else if (!info->relocatable
4904 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4905 {
4906 bfd_boolean err;
4907 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4908 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4909 if (!info->callbacks->undefined_symbol (info,
4910 h->root.root.string,
4911 input_bfd,
4912 input_section,
4913 rel->r_offset, err))
4914 return FALSE;
4915 }
4916 sym_name = h->root.root.string;
4917 }
4918
4919 if (sec != NULL && discarded_section (sec))
4920 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4921 rel, 1, relend, howto, 0, contents);
4922
4923 if (info->relocatable)
4924 continue;
4925
4926 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4927 if (r_type == R_SPU_ADD_PIC
4928 && h != NULL
4929 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4930 {
4931 bfd_byte *loc = contents + rel->r_offset;
4932 loc[0] = 0x1c;
4933 loc[1] = 0x00;
4934 loc[2] &= 0x3f;
4935 }
4936
4937 is_ea_sym = (ea != NULL
4938 && sec != NULL
4939 && sec->output_section == ea);
4940
4941 /* If this symbol is in an overlay area, we may need to relocate
4942 to the overlay stub. */
4943 addend = rel->r_addend;
4944 if (stubs
4945 && !is_ea_sym
4946 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4947 contents, info)) != no_stub)
4948 {
4949 unsigned int ovl = 0;
4950 struct got_entry *g, **head;
4951
4952 if (stub_type != nonovl_stub)
4953 ovl = iovl;
4954
4955 if (h != NULL)
4956 head = &h->got.glist;
4957 else
4958 head = elf_local_got_ents (input_bfd) + r_symndx;
4959
4960 for (g = *head; g != NULL; g = g->next)
4961 if (htab->params->ovly_flavour == ovly_soft_icache
4962 ? (g->ovl == ovl
4963 && g->br_addr == (rel->r_offset
4964 + input_section->output_offset
4965 + input_section->output_section->vma))
4966 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4967 break;
4968 if (g == NULL)
4969 abort ();
4970
4971 relocation = g->stub_addr;
4972 addend = 0;
4973 }
4974 else
4975 {
4976 /* For soft icache, encode the overlay index into addresses. */
4977 if (htab->params->ovly_flavour == ovly_soft_icache
4978 && (r_type == R_SPU_ADDR16_HI
4979 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4980 && !is_ea_sym)
4981 {
4982 unsigned int ovl = overlay_index (sec);
4983 if (ovl != 0)
4984 {
4985 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4986 relocation += set_id << 18;
4987 }
4988 }
4989 }
4990
4991 if (htab->params->emit_fixups && !info->relocatable
4992 && (input_section->flags & SEC_ALLOC) != 0
4993 && r_type == R_SPU_ADDR32)
4994 {
4995 bfd_vma offset;
4996 offset = rel->r_offset + input_section->output_section->vma
4997 + input_section->output_offset;
4998 spu_elf_emit_fixup (output_bfd, info, offset);
4999 }
5000
5001 if (unresolved_reloc)
5002 ;
5003 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5004 {
5005 if (is_ea_sym)
5006 {
5007 /* ._ea is a special section that isn't allocated in SPU
5008 memory, but rather occupies space in PPU memory as
5009 part of an embedded ELF image. If this reloc is
5010 against a symbol defined in ._ea, then transform the
5011 reloc into an equivalent one without a symbol
5012 relative to the start of the ELF image. */
5013 rel->r_addend += (relocation
5014 - ea->vma
5015 + elf_section_data (ea)->this_hdr.sh_offset);
5016 rel->r_info = ELF32_R_INFO (0, r_type);
5017 }
5018 emit_these_relocs = TRUE;
5019 continue;
5020 }
5021 else if (is_ea_sym)
5022 unresolved_reloc = TRUE;
5023
5024 if (unresolved_reloc
5025 && _bfd_elf_section_offset (output_bfd, info, input_section,
5026 rel->r_offset) != (bfd_vma) -1)
5027 {
5028 (*_bfd_error_handler)
5029 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5030 input_bfd,
5031 bfd_get_section_name (input_bfd, input_section),
5032 (long) rel->r_offset,
5033 howto->name,
5034 sym_name);
5035 ret = FALSE;
5036 }
5037
5038 r = _bfd_final_link_relocate (howto,
5039 input_bfd,
5040 input_section,
5041 contents,
5042 rel->r_offset, relocation, addend);
5043
5044 if (r != bfd_reloc_ok)
5045 {
5046 const char *msg = (const char *) 0;
5047
5048 switch (r)
5049 {
5050 case bfd_reloc_overflow:
5051 if (!((*info->callbacks->reloc_overflow)
5052 (info, (h ? &h->root : NULL), sym_name, howto->name,
5053 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5054 return FALSE;
5055 break;
5056
5057 case bfd_reloc_undefined:
5058 if (!((*info->callbacks->undefined_symbol)
5059 (info, sym_name, input_bfd, input_section,
5060 rel->r_offset, TRUE)))
5061 return FALSE;
5062 break;
5063
5064 case bfd_reloc_outofrange:
5065 msg = _("internal error: out of range error");
5066 goto common_error;
5067
5068 case bfd_reloc_notsupported:
5069 msg = _("internal error: unsupported relocation error");
5070 goto common_error;
5071
5072 case bfd_reloc_dangerous:
5073 msg = _("internal error: dangerous error");
5074 goto common_error;
5075
5076 default:
5077 msg = _("internal error: unknown error");
5078 /* fall through */
5079
5080 common_error:
5081 ret = FALSE;
5082 if (!((*info->callbacks->warning)
5083 (info, msg, sym_name, input_bfd, input_section,
5084 rel->r_offset)))
5085 return FALSE;
5086 break;
5087 }
5088 }
5089 }
5090
5091 if (ret
5092 && emit_these_relocs
5093 && !info->emitrelocations)
5094 {
5095 Elf_Internal_Rela *wrel;
5096 Elf_Internal_Shdr *rel_hdr;
5097
5098 wrel = rel = relocs;
5099 relend = relocs + input_section->reloc_count;
5100 for (; rel < relend; rel++)
5101 {
5102 int r_type;
5103
5104 r_type = ELF32_R_TYPE (rel->r_info);
5105 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5106 *wrel++ = *rel;
5107 }
5108 input_section->reloc_count = wrel - relocs;
5109 /* Backflips for _bfd_elf_link_output_relocs. */
5110 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5111 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5112 ret = 2;
5113 }
5114
5115 return ret;
5116 }
5117
5118 static bfd_boolean
5119 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5120 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5121 {
5122 return TRUE;
5123 }
5124
5125 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5126
5127 static int
5128 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5129 const char *sym_name ATTRIBUTE_UNUSED,
5130 Elf_Internal_Sym *sym,
5131 asection *sym_sec ATTRIBUTE_UNUSED,
5132 struct elf_link_hash_entry *h)
5133 {
5134 struct spu_link_hash_table *htab = spu_hash_table (info);
5135
5136 if (!info->relocatable
5137 && htab->stub_sec != NULL
5138 && h != NULL
5139 && (h->root.type == bfd_link_hash_defined
5140 || h->root.type == bfd_link_hash_defweak)
5141 && h->def_regular
5142 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5143 {
5144 struct got_entry *g;
5145
5146 for (g = h->got.glist; g != NULL; g = g->next)
5147 if (htab->params->ovly_flavour == ovly_soft_icache
5148 ? g->br_addr == g->stub_addr
5149 : g->addend == 0 && g->ovl == 0)
5150 {
5151 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5152 (htab->stub_sec[0]->output_section->owner,
5153 htab->stub_sec[0]->output_section));
5154 sym->st_value = g->stub_addr;
5155 break;
5156 }
5157 }
5158
5159 return 1;
5160 }
5161
5162 static int spu_plugin = 0;
5163
5164 void
5165 spu_elf_plugin (int val)
5166 {
5167 spu_plugin = val;
5168 }
5169
5170 /* Set ELF header e_type for plugins. */
5171
5172 static void
5173 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5174 {
5175 if (spu_plugin)
5176 {
5177 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5178
5179 i_ehdrp->e_type = ET_DYN;
5180 }
5181
5182 _bfd_elf_post_process_headers (abfd, info);
5183 }
5184
5185 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5186 segments for overlays. */
5187
5188 static int
5189 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5190 {
5191 int extra = 0;
5192 asection *sec;
5193
5194 if (info != NULL)
5195 {
5196 struct spu_link_hash_table *htab = spu_hash_table (info);
5197 extra = htab->num_overlays;
5198 }
5199
5200 if (extra)
5201 ++extra;
5202
5203 sec = bfd_get_section_by_name (abfd, ".toe");
5204 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5205 ++extra;
5206
5207 return extra;
5208 }
5209
5210 /* Remove .toe section from other PT_LOAD segments and put it in
5211 a segment of its own. Put overlays in separate segments too. */
5212
5213 static bfd_boolean
5214 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5215 {
5216 asection *toe, *s;
5217 struct elf_segment_map *m, *m_overlay;
5218 struct elf_segment_map **p, **p_overlay;
5219 unsigned int i;
5220
5221 if (info == NULL)
5222 return TRUE;
5223
5224 toe = bfd_get_section_by_name (abfd, ".toe");
5225 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5226 if (m->p_type == PT_LOAD && m->count > 1)
5227 for (i = 0; i < m->count; i++)
5228 if ((s = m->sections[i]) == toe
5229 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5230 {
5231 struct elf_segment_map *m2;
5232 bfd_vma amt;
5233
5234 if (i + 1 < m->count)
5235 {
5236 amt = sizeof (struct elf_segment_map);
5237 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5238 m2 = bfd_zalloc (abfd, amt);
5239 if (m2 == NULL)
5240 return FALSE;
5241 m2->count = m->count - (i + 1);
5242 memcpy (m2->sections, m->sections + i + 1,
5243 m2->count * sizeof (m->sections[0]));
5244 m2->p_type = PT_LOAD;
5245 m2->next = m->next;
5246 m->next = m2;
5247 }
5248 m->count = 1;
5249 if (i != 0)
5250 {
5251 m->count = i;
5252 amt = sizeof (struct elf_segment_map);
5253 m2 = bfd_zalloc (abfd, amt);
5254 if (m2 == NULL)
5255 return FALSE;
5256 m2->p_type = PT_LOAD;
5257 m2->count = 1;
5258 m2->sections[0] = s;
5259 m2->next = m->next;
5260 m->next = m2;
5261 }
5262 break;
5263 }
5264
5265
5266 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5267 PT_LOAD segments. This can cause the .ovl.init section to be
5268 overwritten with the contents of some overlay segment. To work
5269 around this issue, we ensure that all PF_OVERLAY segments are
5270 sorted first amongst the program headers; this ensures that even
5271 with a broken loader, the .ovl.init section (which is not marked
5272 as PF_OVERLAY) will be placed into SPU local store on startup. */
5273
5274 /* Move all overlay segments onto a separate list. */
5275 p = &elf_seg_map (abfd);
5276 p_overlay = &m_overlay;
5277 while (*p != NULL)
5278 {
5279 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5280 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5281 {
5282 m = *p;
5283 *p = m->next;
5284 *p_overlay = m;
5285 p_overlay = &m->next;
5286 continue;
5287 }
5288
5289 p = &((*p)->next);
5290 }
5291
5292 /* Re-insert overlay segments at the head of the segment map. */
5293 *p_overlay = elf_seg_map (abfd);
5294 elf_seg_map (abfd) = m_overlay;
5295
5296 return TRUE;
5297 }
5298
5299 /* Tweak the section type of .note.spu_name. */
5300
5301 static bfd_boolean
5302 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5303 Elf_Internal_Shdr *hdr,
5304 asection *sec)
5305 {
5306 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5307 hdr->sh_type = SHT_NOTE;
5308 return TRUE;
5309 }
5310
5311 /* Tweak phdrs before writing them out. */
5312
5313 static int
5314 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5315 {
5316 const struct elf_backend_data *bed;
5317 struct elf_obj_tdata *tdata;
5318 Elf_Internal_Phdr *phdr, *last;
5319 struct spu_link_hash_table *htab;
5320 unsigned int count;
5321 unsigned int i;
5322
5323 if (info == NULL)
5324 return TRUE;
5325
5326 bed = get_elf_backend_data (abfd);
5327 tdata = elf_tdata (abfd);
5328 phdr = tdata->phdr;
5329 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5330 htab = spu_hash_table (info);
5331 if (htab->num_overlays != 0)
5332 {
5333 struct elf_segment_map *m;
5334 unsigned int o;
5335
5336 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5337 if (m->count != 0
5338 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5339 {
5340 /* Mark this as an overlay header. */
5341 phdr[i].p_flags |= PF_OVERLAY;
5342
5343 if (htab->ovtab != NULL && htab->ovtab->size != 0
5344 && htab->params->ovly_flavour != ovly_soft_icache)
5345 {
5346 bfd_byte *p = htab->ovtab->contents;
5347 unsigned int off = o * 16 + 8;
5348
5349 /* Write file_off into _ovly_table. */
5350 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5351 }
5352 }
5353 /* Soft-icache has its file offset put in .ovl.init. */
5354 if (htab->init != NULL && htab->init->size != 0)
5355 {
5356 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5357
5358 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5359 }
5360 }
5361
5362 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5363 of 16. This should always be possible when using the standard
5364 linker scripts, but don't create overlapping segments if
5365 someone is playing games with linker scripts. */
5366 last = NULL;
5367 for (i = count; i-- != 0; )
5368 if (phdr[i].p_type == PT_LOAD)
5369 {
5370 unsigned adjust;
5371
5372 adjust = -phdr[i].p_filesz & 15;
5373 if (adjust != 0
5374 && last != NULL
5375 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5376 break;
5377
5378 adjust = -phdr[i].p_memsz & 15;
5379 if (adjust != 0
5380 && last != NULL
5381 && phdr[i].p_filesz != 0
5382 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5383 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5384 break;
5385
5386 if (phdr[i].p_filesz != 0)
5387 last = &phdr[i];
5388 }
5389
5390 if (i == (unsigned int) -1)
5391 for (i = count; i-- != 0; )
5392 if (phdr[i].p_type == PT_LOAD)
5393 {
5394 unsigned adjust;
5395
5396 adjust = -phdr[i].p_filesz & 15;
5397 phdr[i].p_filesz += adjust;
5398
5399 adjust = -phdr[i].p_memsz & 15;
5400 phdr[i].p_memsz += adjust;
5401 }
5402
5403 return TRUE;
5404 }
5405
5406 bfd_boolean
5407 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5408 {
5409 struct spu_link_hash_table *htab = spu_hash_table (info);
5410 if (htab->params->emit_fixups)
5411 {
5412 asection *sfixup = htab->sfixup;
5413 int fixup_count = 0;
5414 bfd *ibfd;
5415 size_t size;
5416
5417 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5418 {
5419 asection *isec;
5420
5421 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5422 continue;
5423
5424 /* Walk over each section attached to the input bfd. */
5425 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5426 {
5427 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5428 bfd_vma base_end;
5429
5430 /* If there aren't any relocs, then there's nothing more
5431 to do. */
5432 if ((isec->flags & SEC_ALLOC) == 0
5433 || (isec->flags & SEC_RELOC) == 0
5434 || isec->reloc_count == 0)
5435 continue;
5436
5437 /* Get the relocs. */
5438 internal_relocs =
5439 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5440 info->keep_memory);
5441 if (internal_relocs == NULL)
5442 return FALSE;
5443
5444 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5445 relocations. They are stored in a single word by
5446 saving the upper 28 bits of the address and setting the
5447 lower 4 bits to a bit mask of the words that have the
5448 relocation. BASE_END keeps track of the next quadword. */
5449 irela = internal_relocs;
5450 irelaend = irela + isec->reloc_count;
5451 base_end = 0;
5452 for (; irela < irelaend; irela++)
5453 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5454 && irela->r_offset >= base_end)
5455 {
5456 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5457 fixup_count++;
5458 }
5459 }
5460 }
5461
5462 /* We always have a NULL fixup as a sentinel */
5463 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5464 if (!bfd_set_section_size (output_bfd, sfixup, size))
5465 return FALSE;
5466 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5467 if (sfixup->contents == NULL)
5468 return FALSE;
5469 }
5470 return TRUE;
5471 }
5472
5473 #define TARGET_BIG_SYM spu_elf32_vec
5474 #define TARGET_BIG_NAME "elf32-spu"
5475 #define ELF_ARCH bfd_arch_spu
5476 #define ELF_TARGET_ID SPU_ELF_DATA
5477 #define ELF_MACHINE_CODE EM_SPU
5478 /* This matches the alignment need for DMA. */
5479 #define ELF_MAXPAGESIZE 0x80
5480 #define elf_backend_rela_normal 1
5481 #define elf_backend_can_gc_sections 1
5482
5483 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5484 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5485 #define elf_info_to_howto spu_elf_info_to_howto
5486 #define elf_backend_count_relocs spu_elf_count_relocs
5487 #define elf_backend_relocate_section spu_elf_relocate_section
5488 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5489 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5490 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5491 #define elf_backend_object_p spu_elf_object_p
5492 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5493 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5494
5495 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5496 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5497 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5498 #define elf_backend_post_process_headers spu_elf_post_process_headers
5499 #define elf_backend_fake_sections spu_elf_fake_sections
5500 #define elf_backend_special_sections spu_elf_special_sections
5501 #define bfd_elf32_bfd_final_link spu_elf_final_link
5502
5503 #include "elf32-target.h"
This page took 0.190206 seconds and 5 git commands to generate.