bfd/ChangeLog:
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
5
6 This file is part of BFD, the Binary File Descriptor library.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include "libiberty.h"
24 #include "bfd.h"
25 #include "bfdlink.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf/spu.h"
29 #include "elf32-spu.h"
30
31 /* We use RELA style relocs. Don't define USE_REL. */
32
33 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
34 void *, asection *,
35 bfd *, char **);
36
37 /* Values of type 'enum elf_spu_reloc_type' are used to index this
38 array, so it must be declared in the order of that type. */
39
40 static reloc_howto_type elf_howto_table[] = {
41 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
42 bfd_elf_generic_reloc, "SPU_NONE",
43 FALSE, 0, 0x00000000, FALSE),
44 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
45 bfd_elf_generic_reloc, "SPU_ADDR10",
46 FALSE, 0, 0x00ffc000, FALSE),
47 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
48 bfd_elf_generic_reloc, "SPU_ADDR16",
49 FALSE, 0, 0x007fff80, FALSE),
50 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
51 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
52 FALSE, 0, 0x007fff80, FALSE),
53 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
54 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
55 FALSE, 0, 0x007fff80, FALSE),
56 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
57 bfd_elf_generic_reloc, "SPU_ADDR18",
58 FALSE, 0, 0x01ffff80, FALSE),
59 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
60 bfd_elf_generic_reloc, "SPU_ADDR32",
61 FALSE, 0, 0xffffffff, FALSE),
62 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "SPU_REL16",
64 FALSE, 0, 0x007fff80, TRUE),
65 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
66 bfd_elf_generic_reloc, "SPU_ADDR7",
67 FALSE, 0, 0x001fc000, FALSE),
68 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
69 spu_elf_rel9, "SPU_REL9",
70 FALSE, 0, 0x0180007f, TRUE),
71 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
72 spu_elf_rel9, "SPU_REL9I",
73 FALSE, 0, 0x0000c07f, TRUE),
74 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
75 bfd_elf_generic_reloc, "SPU_ADDR10I",
76 FALSE, 0, 0x00ffc000, FALSE),
77 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
78 bfd_elf_generic_reloc, "SPU_ADDR16I",
79 FALSE, 0, 0x007fff80, FALSE),
80 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
81 bfd_elf_generic_reloc, "SPU_REL32",
82 FALSE, 0, 0xffffffff, TRUE),
83 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "SPU_ADDR16X",
85 FALSE, 0, 0x007fff80, FALSE),
86 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
87 bfd_elf_generic_reloc, "SPU_PPU32",
88 FALSE, 0, 0xffffffff, FALSE),
89 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
90 bfd_elf_generic_reloc, "SPU_PPU64",
91 FALSE, 0, -1, FALSE),
92 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
93 bfd_elf_generic_reloc, "SPU_ADD_PIC",
94 FALSE, 0, 0x00000000, FALSE),
95 };
96
97 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
98 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
99 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
100 { NULL, 0, 0, 0, 0 }
101 };
102
103 static enum elf_spu_reloc_type
104 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
105 {
106 switch (code)
107 {
108 default:
109 return R_SPU_NONE;
110 case BFD_RELOC_SPU_IMM10W:
111 return R_SPU_ADDR10;
112 case BFD_RELOC_SPU_IMM16W:
113 return R_SPU_ADDR16;
114 case BFD_RELOC_SPU_LO16:
115 return R_SPU_ADDR16_LO;
116 case BFD_RELOC_SPU_HI16:
117 return R_SPU_ADDR16_HI;
118 case BFD_RELOC_SPU_IMM18:
119 return R_SPU_ADDR18;
120 case BFD_RELOC_SPU_PCREL16:
121 return R_SPU_REL16;
122 case BFD_RELOC_SPU_IMM7:
123 return R_SPU_ADDR7;
124 case BFD_RELOC_SPU_IMM8:
125 return R_SPU_NONE;
126 case BFD_RELOC_SPU_PCREL9a:
127 return R_SPU_REL9;
128 case BFD_RELOC_SPU_PCREL9b:
129 return R_SPU_REL9I;
130 case BFD_RELOC_SPU_IMM10:
131 return R_SPU_ADDR10I;
132 case BFD_RELOC_SPU_IMM16:
133 return R_SPU_ADDR16I;
134 case BFD_RELOC_32:
135 return R_SPU_ADDR32;
136 case BFD_RELOC_32_PCREL:
137 return R_SPU_REL32;
138 case BFD_RELOC_SPU_PPU32:
139 return R_SPU_PPU32;
140 case BFD_RELOC_SPU_PPU64:
141 return R_SPU_PPU64;
142 case BFD_RELOC_SPU_ADD_PIC:
143 return R_SPU_ADD_PIC;
144 }
145 }
146
147 static void
148 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
149 arelent *cache_ptr,
150 Elf_Internal_Rela *dst)
151 {
152 enum elf_spu_reloc_type r_type;
153
154 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
155 BFD_ASSERT (r_type < R_SPU_max);
156 cache_ptr->howto = &elf_howto_table[(int) r_type];
157 }
158
159 static reloc_howto_type *
160 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
161 bfd_reloc_code_real_type code)
162 {
163 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
164
165 if (r_type == R_SPU_NONE)
166 return NULL;
167
168 return elf_howto_table + r_type;
169 }
170
171 static reloc_howto_type *
172 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
173 const char *r_name)
174 {
175 unsigned int i;
176
177 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
178 if (elf_howto_table[i].name != NULL
179 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
180 return &elf_howto_table[i];
181
182 return NULL;
183 }
184
185 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
186
187 static bfd_reloc_status_type
188 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
189 void *data, asection *input_section,
190 bfd *output_bfd, char **error_message)
191 {
192 bfd_size_type octets;
193 bfd_vma val;
194 long insn;
195
196 /* If this is a relocatable link (output_bfd test tells us), just
197 call the generic function. Any adjustment will be done at final
198 link time. */
199 if (output_bfd != NULL)
200 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
201 input_section, output_bfd, error_message);
202
203 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
204 return bfd_reloc_outofrange;
205 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
206
207 /* Get symbol value. */
208 val = 0;
209 if (!bfd_is_com_section (symbol->section))
210 val = symbol->value;
211 if (symbol->section->output_section)
212 val += symbol->section->output_section->vma;
213
214 val += reloc_entry->addend;
215
216 /* Make it pc-relative. */
217 val -= input_section->output_section->vma + input_section->output_offset;
218
219 val >>= 2;
220 if (val + 256 >= 512)
221 return bfd_reloc_overflow;
222
223 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
224
225 /* Move two high bits of value to REL9I and REL9 position.
226 The mask will take care of selecting the right field. */
227 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
228 insn &= ~reloc_entry->howto->dst_mask;
229 insn |= val & reloc_entry->howto->dst_mask;
230 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
231 return bfd_reloc_ok;
232 }
233
234 static bfd_boolean
235 spu_elf_new_section_hook (bfd *abfd, asection *sec)
236 {
237 if (!sec->used_by_bfd)
238 {
239 struct _spu_elf_section_data *sdata;
240
241 sdata = bfd_zalloc (abfd, sizeof (*sdata));
242 if (sdata == NULL)
243 return FALSE;
244 sec->used_by_bfd = sdata;
245 }
246
247 return _bfd_elf_new_section_hook (abfd, sec);
248 }
249
250 /* Set up overlay info for executables. */
251
252 static bfd_boolean
253 spu_elf_object_p (bfd *abfd)
254 {
255 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
256 {
257 unsigned int i, num_ovl, num_buf;
258 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
259 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
260 Elf_Internal_Phdr *last_phdr = NULL;
261
262 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
263 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
264 {
265 unsigned int j;
266
267 ++num_ovl;
268 if (last_phdr == NULL
269 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
270 ++num_buf;
271 last_phdr = phdr;
272 for (j = 1; j < elf_numsections (abfd); j++)
273 {
274 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
275
276 if (ELF_SECTION_SIZE (shdr, phdr) != 0
277 && ELF_SECTION_IN_SEGMENT (shdr, phdr))
278 {
279 asection *sec = shdr->bfd_section;
280 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
281 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
282 }
283 }
284 }
285 }
286 return TRUE;
287 }
288
289 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
290 strip --strip-unneeded will not remove them. */
291
292 static void
293 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
294 {
295 if (sym->name != NULL
296 && sym->section != bfd_abs_section_ptr
297 && strncmp (sym->name, "_EAR_", 5) == 0)
298 sym->flags |= BSF_KEEP;
299 }
300
301 /* SPU ELF linker hash table. */
302
303 struct spu_link_hash_table
304 {
305 struct elf_link_hash_table elf;
306
307 struct spu_elf_params *params;
308
309 /* Shortcuts to overlay sections. */
310 asection *ovtab;
311 asection *init;
312 asection *toe;
313 asection **ovl_sec;
314
315 /* Count of stubs in each overlay section. */
316 unsigned int *stub_count;
317
318 /* The stub section for each overlay section. */
319 asection **stub_sec;
320
321 struct elf_link_hash_entry *ovly_entry[2];
322
323 /* Number of overlay buffers. */
324 unsigned int num_buf;
325
326 /* Total number of overlays. */
327 unsigned int num_overlays;
328
329 /* For soft icache. */
330 unsigned int line_size_log2;
331 unsigned int num_lines_log2;
332 unsigned int fromelem_size_log2;
333
334 /* How much memory we have. */
335 unsigned int local_store;
336
337 /* Count of overlay stubs needed in non-overlay area. */
338 unsigned int non_ovly_stub;
339
340 /* Pointer to the fixup section */
341 asection *sfixup;
342
343 /* Set on error. */
344 unsigned int stub_err : 1;
345 };
346
347 /* Hijack the generic got fields for overlay stub accounting. */
348
349 struct got_entry
350 {
351 struct got_entry *next;
352 unsigned int ovl;
353 union {
354 bfd_vma addend;
355 bfd_vma br_addr;
356 };
357 bfd_vma stub_addr;
358 };
359
360 #define spu_hash_table(p) \
361 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
362 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
363
364 struct call_info
365 {
366 struct function_info *fun;
367 struct call_info *next;
368 unsigned int count;
369 unsigned int max_depth;
370 unsigned int is_tail : 1;
371 unsigned int is_pasted : 1;
372 unsigned int broken_cycle : 1;
373 unsigned int priority : 13;
374 };
375
376 struct function_info
377 {
378 /* List of functions called. Also branches to hot/cold part of
379 function. */
380 struct call_info *call_list;
381 /* For hot/cold part of function, point to owner. */
382 struct function_info *start;
383 /* Symbol at start of function. */
384 union {
385 Elf_Internal_Sym *sym;
386 struct elf_link_hash_entry *h;
387 } u;
388 /* Function section. */
389 asection *sec;
390 asection *rodata;
391 /* Where last called from, and number of sections called from. */
392 asection *last_caller;
393 unsigned int call_count;
394 /* Address range of (this part of) function. */
395 bfd_vma lo, hi;
396 /* Offset where we found a store of lr, or -1 if none found. */
397 bfd_vma lr_store;
398 /* Offset where we found the stack adjustment insn. */
399 bfd_vma sp_adjust;
400 /* Stack usage. */
401 int stack;
402 /* Distance from root of call tree. Tail and hot/cold branches
403 count as one deeper. We aren't counting stack frames here. */
404 unsigned int depth;
405 /* Set if global symbol. */
406 unsigned int global : 1;
407 /* Set if known to be start of function (as distinct from a hunk
408 in hot/cold section. */
409 unsigned int is_func : 1;
410 /* Set if not a root node. */
411 unsigned int non_root : 1;
412 /* Flags used during call tree traversal. It's cheaper to replicate
413 the visit flags than have one which needs clearing after a traversal. */
414 unsigned int visit1 : 1;
415 unsigned int visit2 : 1;
416 unsigned int marking : 1;
417 unsigned int visit3 : 1;
418 unsigned int visit4 : 1;
419 unsigned int visit5 : 1;
420 unsigned int visit6 : 1;
421 unsigned int visit7 : 1;
422 };
423
424 struct spu_elf_stack_info
425 {
426 int num_fun;
427 int max_fun;
428 /* Variable size array describing functions, one per contiguous
429 address range belonging to a function. */
430 struct function_info fun[1];
431 };
432
433 static struct function_info *find_function (asection *, bfd_vma,
434 struct bfd_link_info *);
435
436 /* Create a spu ELF linker hash table. */
437
438 static struct bfd_link_hash_table *
439 spu_elf_link_hash_table_create (bfd *abfd)
440 {
441 struct spu_link_hash_table *htab;
442
443 htab = bfd_zmalloc (sizeof (*htab));
444 if (htab == NULL)
445 return NULL;
446
447 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
448 _bfd_elf_link_hash_newfunc,
449 sizeof (struct elf_link_hash_entry),
450 SPU_ELF_DATA))
451 {
452 free (htab);
453 return NULL;
454 }
455
456 htab->elf.init_got_refcount.refcount = 0;
457 htab->elf.init_got_refcount.glist = NULL;
458 htab->elf.init_got_offset.offset = 0;
459 htab->elf.init_got_offset.glist = NULL;
460 return &htab->elf.root;
461 }
462
463 void
464 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
465 {
466 bfd_vma max_branch_log2;
467
468 struct spu_link_hash_table *htab = spu_hash_table (info);
469 htab->params = params;
470 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
471 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
472
473 /* For the software i-cache, we provide a "from" list whose size
474 is a power-of-two number of quadwords, big enough to hold one
475 byte per outgoing branch. Compute this number here. */
476 max_branch_log2 = bfd_log2 (htab->params->max_branch);
477 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
478 }
479
480 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
481 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
482 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
483
484 static bfd_boolean
485 get_sym_h (struct elf_link_hash_entry **hp,
486 Elf_Internal_Sym **symp,
487 asection **symsecp,
488 Elf_Internal_Sym **locsymsp,
489 unsigned long r_symndx,
490 bfd *ibfd)
491 {
492 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
493
494 if (r_symndx >= symtab_hdr->sh_info)
495 {
496 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
497 struct elf_link_hash_entry *h;
498
499 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
500 while (h->root.type == bfd_link_hash_indirect
501 || h->root.type == bfd_link_hash_warning)
502 h = (struct elf_link_hash_entry *) h->root.u.i.link;
503
504 if (hp != NULL)
505 *hp = h;
506
507 if (symp != NULL)
508 *symp = NULL;
509
510 if (symsecp != NULL)
511 {
512 asection *symsec = NULL;
513 if (h->root.type == bfd_link_hash_defined
514 || h->root.type == bfd_link_hash_defweak)
515 symsec = h->root.u.def.section;
516 *symsecp = symsec;
517 }
518 }
519 else
520 {
521 Elf_Internal_Sym *sym;
522 Elf_Internal_Sym *locsyms = *locsymsp;
523
524 if (locsyms == NULL)
525 {
526 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
527 if (locsyms == NULL)
528 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
529 symtab_hdr->sh_info,
530 0, NULL, NULL, NULL);
531 if (locsyms == NULL)
532 return FALSE;
533 *locsymsp = locsyms;
534 }
535 sym = locsyms + r_symndx;
536
537 if (hp != NULL)
538 *hp = NULL;
539
540 if (symp != NULL)
541 *symp = sym;
542
543 if (symsecp != NULL)
544 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
545 }
546
547 return TRUE;
548 }
549
550 /* Create the note section if not already present. This is done early so
551 that the linker maps the sections to the right place in the output. */
552
553 bfd_boolean
554 spu_elf_create_sections (struct bfd_link_info *info)
555 {
556 struct spu_link_hash_table *htab = spu_hash_table (info);
557 bfd *ibfd;
558
559 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
560 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
561 break;
562
563 if (ibfd == NULL)
564 {
565 /* Make SPU_PTNOTE_SPUNAME section. */
566 asection *s;
567 size_t name_len;
568 size_t size;
569 bfd_byte *data;
570 flagword flags;
571
572 ibfd = info->input_bfds;
573 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
574 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
575 if (s == NULL
576 || !bfd_set_section_alignment (ibfd, s, 4))
577 return FALSE;
578
579 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
580 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
581 size += (name_len + 3) & -4;
582
583 if (!bfd_set_section_size (ibfd, s, size))
584 return FALSE;
585
586 data = bfd_zalloc (ibfd, size);
587 if (data == NULL)
588 return FALSE;
589
590 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
591 bfd_put_32 (ibfd, name_len, data + 4);
592 bfd_put_32 (ibfd, 1, data + 8);
593 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
594 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
595 bfd_get_filename (info->output_bfd), name_len);
596 s->contents = data;
597 }
598
599 if (htab->params->emit_fixups)
600 {
601 asection *s;
602 flagword flags;
603
604 if (htab->elf.dynobj == NULL)
605 htab->elf.dynobj = ibfd;
606 ibfd = htab->elf.dynobj;
607 flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
608 | SEC_IN_MEMORY | SEC_LINKER_CREATED);
609 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
610 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
611 return FALSE;
612 htab->sfixup = s;
613 }
614
615 return TRUE;
616 }
617
618 /* qsort predicate to sort sections by vma. */
619
620 static int
621 sort_sections (const void *a, const void *b)
622 {
623 const asection *const *s1 = a;
624 const asection *const *s2 = b;
625 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
626
627 if (delta != 0)
628 return delta < 0 ? -1 : 1;
629
630 return (*s1)->index - (*s2)->index;
631 }
632
633 /* Identify overlays in the output bfd, and number them.
634 Returns 0 on error, 1 if no overlays, 2 if overlays. */
635
636 int
637 spu_elf_find_overlays (struct bfd_link_info *info)
638 {
639 struct spu_link_hash_table *htab = spu_hash_table (info);
640 asection **alloc_sec;
641 unsigned int i, n, ovl_index, num_buf;
642 asection *s;
643 bfd_vma ovl_end;
644 static const char *const entry_names[2][2] = {
645 { "__ovly_load", "__icache_br_handler" },
646 { "__ovly_return", "__icache_call_handler" }
647 };
648
649 if (info->output_bfd->section_count < 2)
650 return 1;
651
652 alloc_sec
653 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
654 if (alloc_sec == NULL)
655 return 0;
656
657 /* Pick out all the alloced sections. */
658 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
659 if ((s->flags & SEC_ALLOC) != 0
660 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
661 && s->size != 0)
662 alloc_sec[n++] = s;
663
664 if (n == 0)
665 {
666 free (alloc_sec);
667 return 1;
668 }
669
670 /* Sort them by vma. */
671 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
672
673 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
674 if (htab->params->ovly_flavour == ovly_soft_icache)
675 {
676 unsigned int prev_buf = 0, set_id = 0;
677
678 /* Look for an overlapping vma to find the first overlay section. */
679 bfd_vma vma_start = 0;
680
681 for (i = 1; i < n; i++)
682 {
683 s = alloc_sec[i];
684 if (s->vma < ovl_end)
685 {
686 asection *s0 = alloc_sec[i - 1];
687 vma_start = s0->vma;
688 ovl_end = (s0->vma
689 + ((bfd_vma) 1
690 << (htab->num_lines_log2 + htab->line_size_log2)));
691 --i;
692 break;
693 }
694 else
695 ovl_end = s->vma + s->size;
696 }
697
698 /* Now find any sections within the cache area. */
699 for (ovl_index = 0, num_buf = 0; i < n; i++)
700 {
701 s = alloc_sec[i];
702 if (s->vma >= ovl_end)
703 break;
704
705 /* A section in an overlay area called .ovl.init is not
706 an overlay, in the sense that it might be loaded in
707 by the overlay manager, but rather the initial
708 section contents for the overlay buffer. */
709 if (strncmp (s->name, ".ovl.init", 9) != 0)
710 {
711 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
712 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
713 prev_buf = num_buf;
714
715 if ((s->vma - vma_start) & (htab->params->line_size - 1))
716 {
717 info->callbacks->einfo (_("%X%P: overlay section %A "
718 "does not start on a cache line.\n"),
719 s);
720 bfd_set_error (bfd_error_bad_value);
721 return 0;
722 }
723 else if (s->size > htab->params->line_size)
724 {
725 info->callbacks->einfo (_("%X%P: overlay section %A "
726 "is larger than a cache line.\n"),
727 s);
728 bfd_set_error (bfd_error_bad_value);
729 return 0;
730 }
731
732 alloc_sec[ovl_index++] = s;
733 spu_elf_section_data (s)->u.o.ovl_index
734 = (set_id << htab->num_lines_log2) + num_buf;
735 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
736 }
737 }
738
739 /* Ensure there are no more overlay sections. */
740 for ( ; i < n; i++)
741 {
742 s = alloc_sec[i];
743 if (s->vma < ovl_end)
744 {
745 info->callbacks->einfo (_("%X%P: overlay section %A "
746 "is not in cache area.\n"),
747 alloc_sec[i-1]);
748 bfd_set_error (bfd_error_bad_value);
749 return 0;
750 }
751 else
752 ovl_end = s->vma + s->size;
753 }
754 }
755 else
756 {
757 /* Look for overlapping vmas. Any with overlap must be overlays.
758 Count them. Also count the number of overlay regions. */
759 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
760 {
761 s = alloc_sec[i];
762 if (s->vma < ovl_end)
763 {
764 asection *s0 = alloc_sec[i - 1];
765
766 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
767 {
768 ++num_buf;
769 if (strncmp (s0->name, ".ovl.init", 9) != 0)
770 {
771 alloc_sec[ovl_index] = s0;
772 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
773 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
774 }
775 else
776 ovl_end = s->vma + s->size;
777 }
778 if (strncmp (s->name, ".ovl.init", 9) != 0)
779 {
780 alloc_sec[ovl_index] = s;
781 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
782 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
783 if (s0->vma != s->vma)
784 {
785 info->callbacks->einfo (_("%X%P: overlay sections %A "
786 "and %A do not start at the "
787 "same address.\n"),
788 s0, s);
789 bfd_set_error (bfd_error_bad_value);
790 return 0;
791 }
792 if (ovl_end < s->vma + s->size)
793 ovl_end = s->vma + s->size;
794 }
795 }
796 else
797 ovl_end = s->vma + s->size;
798 }
799 }
800
801 htab->num_overlays = ovl_index;
802 htab->num_buf = num_buf;
803 htab->ovl_sec = alloc_sec;
804
805 if (ovl_index == 0)
806 return 1;
807
808 for (i = 0; i < 2; i++)
809 {
810 const char *name;
811 struct elf_link_hash_entry *h;
812
813 name = entry_names[i][htab->params->ovly_flavour];
814 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
815 if (h == NULL)
816 return 0;
817
818 if (h->root.type == bfd_link_hash_new)
819 {
820 h->root.type = bfd_link_hash_undefined;
821 h->ref_regular = 1;
822 h->ref_regular_nonweak = 1;
823 h->non_elf = 0;
824 }
825 htab->ovly_entry[i] = h;
826 }
827
828 return 2;
829 }
830
831 /* Non-zero to use bra in overlay stubs rather than br. */
832 #define BRA_STUBS 0
833
834 #define BRA 0x30000000
835 #define BRASL 0x31000000
836 #define BR 0x32000000
837 #define BRSL 0x33000000
838 #define NOP 0x40200000
839 #define LNOP 0x00200000
840 #define ILA 0x42000000
841
842 /* Return true for all relative and absolute branch instructions.
843 bra 00110000 0..
844 brasl 00110001 0..
845 br 00110010 0..
846 brsl 00110011 0..
847 brz 00100000 0..
848 brnz 00100001 0..
849 brhz 00100010 0..
850 brhnz 00100011 0.. */
851
852 static bfd_boolean
853 is_branch (const unsigned char *insn)
854 {
855 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
856 }
857
858 /* Return true for all indirect branch instructions.
859 bi 00110101 000
860 bisl 00110101 001
861 iret 00110101 010
862 bisled 00110101 011
863 biz 00100101 000
864 binz 00100101 001
865 bihz 00100101 010
866 bihnz 00100101 011 */
867
868 static bfd_boolean
869 is_indirect_branch (const unsigned char *insn)
870 {
871 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
872 }
873
874 /* Return true for branch hint instructions.
875 hbra 0001000..
876 hbrr 0001001.. */
877
878 static bfd_boolean
879 is_hint (const unsigned char *insn)
880 {
881 return (insn[0] & 0xfc) == 0x10;
882 }
883
884 /* True if INPUT_SECTION might need overlay stubs. */
885
886 static bfd_boolean
887 maybe_needs_stubs (asection *input_section)
888 {
889 /* No stubs for debug sections and suchlike. */
890 if ((input_section->flags & SEC_ALLOC) == 0)
891 return FALSE;
892
893 /* No stubs for link-once sections that will be discarded. */
894 if (input_section->output_section == bfd_abs_section_ptr)
895 return FALSE;
896
897 /* Don't create stubs for .eh_frame references. */
898 if (strcmp (input_section->name, ".eh_frame") == 0)
899 return FALSE;
900
901 return TRUE;
902 }
903
904 enum _stub_type
905 {
906 no_stub,
907 call_ovl_stub,
908 br000_ovl_stub,
909 br001_ovl_stub,
910 br010_ovl_stub,
911 br011_ovl_stub,
912 br100_ovl_stub,
913 br101_ovl_stub,
914 br110_ovl_stub,
915 br111_ovl_stub,
916 nonovl_stub,
917 stub_error
918 };
919
920 /* Return non-zero if this reloc symbol should go via an overlay stub.
921 Return 2 if the stub must be in non-overlay area. */
922
923 static enum _stub_type
924 needs_ovl_stub (struct elf_link_hash_entry *h,
925 Elf_Internal_Sym *sym,
926 asection *sym_sec,
927 asection *input_section,
928 Elf_Internal_Rela *irela,
929 bfd_byte *contents,
930 struct bfd_link_info *info)
931 {
932 struct spu_link_hash_table *htab = spu_hash_table (info);
933 enum elf_spu_reloc_type r_type;
934 unsigned int sym_type;
935 bfd_boolean branch, hint, call;
936 enum _stub_type ret = no_stub;
937 bfd_byte insn[4];
938
939 if (sym_sec == NULL
940 || sym_sec->output_section == bfd_abs_section_ptr
941 || spu_elf_section_data (sym_sec->output_section) == NULL)
942 return ret;
943
944 if (h != NULL)
945 {
946 /* Ensure no stubs for user supplied overlay manager syms. */
947 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
948 return ret;
949
950 /* setjmp always goes via an overlay stub, because then the return
951 and hence the longjmp goes via __ovly_return. That magically
952 makes setjmp/longjmp between overlays work. */
953 if (strncmp (h->root.root.string, "setjmp", 6) == 0
954 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
955 ret = call_ovl_stub;
956 }
957
958 if (h != NULL)
959 sym_type = h->type;
960 else
961 sym_type = ELF_ST_TYPE (sym->st_info);
962
963 r_type = ELF32_R_TYPE (irela->r_info);
964 branch = FALSE;
965 hint = FALSE;
966 call = FALSE;
967 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
968 {
969 if (contents == NULL)
970 {
971 contents = insn;
972 if (!bfd_get_section_contents (input_section->owner,
973 input_section,
974 contents,
975 irela->r_offset, 4))
976 return stub_error;
977 }
978 else
979 contents += irela->r_offset;
980
981 branch = is_branch (contents);
982 hint = is_hint (contents);
983 if (branch || hint)
984 {
985 call = (contents[0] & 0xfd) == 0x31;
986 if (call
987 && sym_type != STT_FUNC
988 && contents != insn)
989 {
990 /* It's common for people to write assembly and forget
991 to give function symbols the right type. Handle
992 calls to such symbols, but warn so that (hopefully)
993 people will fix their code. We need the symbol
994 type to be correct to distinguish function pointer
995 initialisation from other pointer initialisations. */
996 const char *sym_name;
997
998 if (h != NULL)
999 sym_name = h->root.root.string;
1000 else
1001 {
1002 Elf_Internal_Shdr *symtab_hdr;
1003 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1004 sym_name = bfd_elf_sym_name (input_section->owner,
1005 symtab_hdr,
1006 sym,
1007 sym_sec);
1008 }
1009 (*_bfd_error_handler) (_("warning: call to non-function"
1010 " symbol %s defined in %B"),
1011 sym_sec->owner, sym_name);
1012
1013 }
1014 }
1015 }
1016
1017 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1018 || (sym_type != STT_FUNC
1019 && !(branch || hint)
1020 && (sym_sec->flags & SEC_CODE) == 0))
1021 return no_stub;
1022
1023 /* Usually, symbols in non-overlay sections don't need stubs. */
1024 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1025 && !htab->params->non_overlay_stubs)
1026 return ret;
1027
1028 /* A reference from some other section to a symbol in an overlay
1029 section needs a stub. */
1030 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1031 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1032 {
1033 unsigned int lrlive = 0;
1034 if (branch)
1035 lrlive = (contents[1] & 0x70) >> 4;
1036
1037 if (!lrlive && (call || sym_type == STT_FUNC))
1038 ret = call_ovl_stub;
1039 else
1040 ret = br000_ovl_stub + lrlive;
1041 }
1042
1043 /* If this insn isn't a branch then we are possibly taking the
1044 address of a function and passing it out somehow. Soft-icache code
1045 always generates inline code to do indirect branches. */
1046 if (!(branch || hint)
1047 && sym_type == STT_FUNC
1048 && htab->params->ovly_flavour != ovly_soft_icache)
1049 ret = nonovl_stub;
1050
1051 return ret;
1052 }
1053
1054 static bfd_boolean
1055 count_stub (struct spu_link_hash_table *htab,
1056 bfd *ibfd,
1057 asection *isec,
1058 enum _stub_type stub_type,
1059 struct elf_link_hash_entry *h,
1060 const Elf_Internal_Rela *irela)
1061 {
1062 unsigned int ovl = 0;
1063 struct got_entry *g, **head;
1064 bfd_vma addend;
1065
1066 /* If this instruction is a branch or call, we need a stub
1067 for it. One stub per function per overlay.
1068 If it isn't a branch, then we are taking the address of
1069 this function so need a stub in the non-overlay area
1070 for it. One stub per function. */
1071 if (stub_type != nonovl_stub)
1072 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1073
1074 if (h != NULL)
1075 head = &h->got.glist;
1076 else
1077 {
1078 if (elf_local_got_ents (ibfd) == NULL)
1079 {
1080 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1081 * sizeof (*elf_local_got_ents (ibfd)));
1082 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1083 if (elf_local_got_ents (ibfd) == NULL)
1084 return FALSE;
1085 }
1086 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1087 }
1088
1089 if (htab->params->ovly_flavour == ovly_soft_icache)
1090 {
1091 htab->stub_count[ovl] += 1;
1092 return TRUE;
1093 }
1094
1095 addend = 0;
1096 if (irela != NULL)
1097 addend = irela->r_addend;
1098
1099 if (ovl == 0)
1100 {
1101 struct got_entry *gnext;
1102
1103 for (g = *head; g != NULL; g = g->next)
1104 if (g->addend == addend && g->ovl == 0)
1105 break;
1106
1107 if (g == NULL)
1108 {
1109 /* Need a new non-overlay area stub. Zap other stubs. */
1110 for (g = *head; g != NULL; g = gnext)
1111 {
1112 gnext = g->next;
1113 if (g->addend == addend)
1114 {
1115 htab->stub_count[g->ovl] -= 1;
1116 free (g);
1117 }
1118 }
1119 }
1120 }
1121 else
1122 {
1123 for (g = *head; g != NULL; g = g->next)
1124 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1125 break;
1126 }
1127
1128 if (g == NULL)
1129 {
1130 g = bfd_malloc (sizeof *g);
1131 if (g == NULL)
1132 return FALSE;
1133 g->ovl = ovl;
1134 g->addend = addend;
1135 g->stub_addr = (bfd_vma) -1;
1136 g->next = *head;
1137 *head = g;
1138
1139 htab->stub_count[ovl] += 1;
1140 }
1141
1142 return TRUE;
1143 }
1144
1145 /* Support two sizes of overlay stubs, a slower more compact stub of two
1146 intructions, and a faster stub of four instructions.
1147 Soft-icache stubs are four or eight words. */
1148
1149 static unsigned int
1150 ovl_stub_size (struct spu_elf_params *params)
1151 {
1152 return 16 << params->ovly_flavour >> params->compact_stub;
1153 }
1154
1155 static unsigned int
1156 ovl_stub_size_log2 (struct spu_elf_params *params)
1157 {
1158 return 4 + params->ovly_flavour - params->compact_stub;
1159 }
1160
1161 /* Two instruction overlay stubs look like:
1162
1163 brsl $75,__ovly_load
1164 .word target_ovl_and_address
1165
1166 ovl_and_address is a word with the overlay number in the top 14 bits
1167 and local store address in the bottom 18 bits.
1168
1169 Four instruction overlay stubs look like:
1170
1171 ila $78,ovl_number
1172 lnop
1173 ila $79,target_address
1174 br __ovly_load
1175
1176 Software icache stubs are:
1177
1178 .word target_index
1179 .word target_ia;
1180 .word lrlive_branchlocalstoreaddr;
1181 brasl $75,__icache_br_handler
1182 .quad xor_pattern
1183 */
1184
1185 static bfd_boolean
1186 build_stub (struct bfd_link_info *info,
1187 bfd *ibfd,
1188 asection *isec,
1189 enum _stub_type stub_type,
1190 struct elf_link_hash_entry *h,
1191 const Elf_Internal_Rela *irela,
1192 bfd_vma dest,
1193 asection *dest_sec)
1194 {
1195 struct spu_link_hash_table *htab = spu_hash_table (info);
1196 unsigned int ovl, dest_ovl, set_id;
1197 struct got_entry *g, **head;
1198 asection *sec;
1199 bfd_vma addend, from, to, br_dest, patt;
1200 unsigned int lrlive;
1201
1202 ovl = 0;
1203 if (stub_type != nonovl_stub)
1204 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1205
1206 if (h != NULL)
1207 head = &h->got.glist;
1208 else
1209 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1210
1211 addend = 0;
1212 if (irela != NULL)
1213 addend = irela->r_addend;
1214
1215 if (htab->params->ovly_flavour == ovly_soft_icache)
1216 {
1217 g = bfd_malloc (sizeof *g);
1218 if (g == NULL)
1219 return FALSE;
1220 g->ovl = ovl;
1221 g->br_addr = 0;
1222 if (irela != NULL)
1223 g->br_addr = (irela->r_offset
1224 + isec->output_offset
1225 + isec->output_section->vma);
1226 g->next = *head;
1227 *head = g;
1228 }
1229 else
1230 {
1231 for (g = *head; g != NULL; g = g->next)
1232 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1233 break;
1234 if (g == NULL)
1235 abort ();
1236
1237 if (g->ovl == 0 && ovl != 0)
1238 return TRUE;
1239
1240 if (g->stub_addr != (bfd_vma) -1)
1241 return TRUE;
1242 }
1243
1244 sec = htab->stub_sec[ovl];
1245 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1246 from = sec->size + sec->output_offset + sec->output_section->vma;
1247 g->stub_addr = from;
1248 to = (htab->ovly_entry[0]->root.u.def.value
1249 + htab->ovly_entry[0]->root.u.def.section->output_offset
1250 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1251
1252 if (((dest | to | from) & 3) != 0)
1253 {
1254 htab->stub_err = 1;
1255 return FALSE;
1256 }
1257 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1258
1259 if (htab->params->ovly_flavour == ovly_normal
1260 && !htab->params->compact_stub)
1261 {
1262 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1263 sec->contents + sec->size);
1264 bfd_put_32 (sec->owner, LNOP,
1265 sec->contents + sec->size + 4);
1266 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1267 sec->contents + sec->size + 8);
1268 if (!BRA_STUBS)
1269 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1270 sec->contents + sec->size + 12);
1271 else
1272 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1273 sec->contents + sec->size + 12);
1274 }
1275 else if (htab->params->ovly_flavour == ovly_normal
1276 && htab->params->compact_stub)
1277 {
1278 if (!BRA_STUBS)
1279 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1280 sec->contents + sec->size);
1281 else
1282 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1283 sec->contents + sec->size);
1284 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1285 sec->contents + sec->size + 4);
1286 }
1287 else if (htab->params->ovly_flavour == ovly_soft_icache
1288 && htab->params->compact_stub)
1289 {
1290 lrlive = 0;
1291 if (stub_type == nonovl_stub)
1292 ;
1293 else if (stub_type == call_ovl_stub)
1294 /* A brsl makes lr live and *(*sp+16) is live.
1295 Tail calls have the same liveness. */
1296 lrlive = 5;
1297 else if (!htab->params->lrlive_analysis)
1298 /* Assume stack frame and lr save. */
1299 lrlive = 1;
1300 else if (irela != NULL)
1301 {
1302 /* Analyse branch instructions. */
1303 struct function_info *caller;
1304 bfd_vma off;
1305
1306 caller = find_function (isec, irela->r_offset, info);
1307 if (caller->start == NULL)
1308 off = irela->r_offset;
1309 else
1310 {
1311 struct function_info *found = NULL;
1312
1313 /* Find the earliest piece of this function that
1314 has frame adjusting instructions. We might
1315 see dynamic frame adjustment (eg. for alloca)
1316 in some later piece, but functions using
1317 alloca always set up a frame earlier. Frame
1318 setup instructions are always in one piece. */
1319 if (caller->lr_store != (bfd_vma) -1
1320 || caller->sp_adjust != (bfd_vma) -1)
1321 found = caller;
1322 while (caller->start != NULL)
1323 {
1324 caller = caller->start;
1325 if (caller->lr_store != (bfd_vma) -1
1326 || caller->sp_adjust != (bfd_vma) -1)
1327 found = caller;
1328 }
1329 if (found != NULL)
1330 caller = found;
1331 off = (bfd_vma) -1;
1332 }
1333
1334 if (off > caller->sp_adjust)
1335 {
1336 if (off > caller->lr_store)
1337 /* Only *(*sp+16) is live. */
1338 lrlive = 1;
1339 else
1340 /* If no lr save, then we must be in a
1341 leaf function with a frame.
1342 lr is still live. */
1343 lrlive = 4;
1344 }
1345 else if (off > caller->lr_store)
1346 {
1347 /* Between lr save and stack adjust. */
1348 lrlive = 3;
1349 /* This should never happen since prologues won't
1350 be split here. */
1351 BFD_ASSERT (0);
1352 }
1353 else
1354 /* On entry to function. */
1355 lrlive = 5;
1356
1357 if (stub_type != br000_ovl_stub
1358 && lrlive != stub_type - br000_ovl_stub)
1359 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1360 "from analysis (%u)\n"),
1361 isec, irela->r_offset, lrlive,
1362 stub_type - br000_ovl_stub);
1363 }
1364
1365 /* If given lrlive info via .brinfo, use it. */
1366 if (stub_type > br000_ovl_stub)
1367 lrlive = stub_type - br000_ovl_stub;
1368
1369 if (ovl == 0)
1370 to = (htab->ovly_entry[1]->root.u.def.value
1371 + htab->ovly_entry[1]->root.u.def.section->output_offset
1372 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1373
1374 /* The branch that uses this stub goes to stub_addr + 4. We'll
1375 set up an xor pattern that can be used by the icache manager
1376 to modify this branch to go directly to its destination. */
1377 g->stub_addr += 4;
1378 br_dest = g->stub_addr;
1379 if (irela == NULL)
1380 {
1381 /* Except in the case of _SPUEAR_ stubs, the branch in
1382 question is the one in the stub itself. */
1383 BFD_ASSERT (stub_type == nonovl_stub);
1384 g->br_addr = g->stub_addr;
1385 br_dest = to;
1386 }
1387
1388 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1389 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1390 sec->contents + sec->size);
1391 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1392 sec->contents + sec->size + 4);
1393 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1394 sec->contents + sec->size + 8);
1395 patt = dest ^ br_dest;
1396 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1397 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1398 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1399 sec->contents + sec->size + 12);
1400
1401 if (ovl == 0)
1402 /* Extra space for linked list entries. */
1403 sec->size += 16;
1404 }
1405 else
1406 abort ();
1407
1408 sec->size += ovl_stub_size (htab->params);
1409
1410 if (htab->params->emit_stub_syms)
1411 {
1412 size_t len;
1413 char *name;
1414 int add;
1415
1416 len = 8 + sizeof (".ovl_call.") - 1;
1417 if (h != NULL)
1418 len += strlen (h->root.root.string);
1419 else
1420 len += 8 + 1 + 8;
1421 add = 0;
1422 if (irela != NULL)
1423 add = (int) irela->r_addend & 0xffffffff;
1424 if (add != 0)
1425 len += 1 + 8;
1426 name = bfd_malloc (len + 1);
1427 if (name == NULL)
1428 return FALSE;
1429
1430 sprintf (name, "%08x.ovl_call.", g->ovl);
1431 if (h != NULL)
1432 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1433 else
1434 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1435 dest_sec->id & 0xffffffff,
1436 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1437 if (add != 0)
1438 sprintf (name + len - 9, "+%x", add);
1439
1440 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1441 free (name);
1442 if (h == NULL)
1443 return FALSE;
1444 if (h->root.type == bfd_link_hash_new)
1445 {
1446 h->root.type = bfd_link_hash_defined;
1447 h->root.u.def.section = sec;
1448 h->size = ovl_stub_size (htab->params);
1449 h->root.u.def.value = sec->size - h->size;
1450 h->type = STT_FUNC;
1451 h->ref_regular = 1;
1452 h->def_regular = 1;
1453 h->ref_regular_nonweak = 1;
1454 h->forced_local = 1;
1455 h->non_elf = 0;
1456 }
1457 }
1458
1459 return TRUE;
1460 }
1461
1462 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1463 symbols. */
1464
1465 static bfd_boolean
1466 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1467 {
1468 /* Symbols starting with _SPUEAR_ need a stub because they may be
1469 invoked by the PPU. */
1470 struct bfd_link_info *info = inf;
1471 struct spu_link_hash_table *htab = spu_hash_table (info);
1472 asection *sym_sec;
1473
1474 if ((h->root.type == bfd_link_hash_defined
1475 || h->root.type == bfd_link_hash_defweak)
1476 && h->def_regular
1477 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1478 && (sym_sec = h->root.u.def.section) != NULL
1479 && sym_sec->output_section != bfd_abs_section_ptr
1480 && spu_elf_section_data (sym_sec->output_section) != NULL
1481 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1482 || htab->params->non_overlay_stubs))
1483 {
1484 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1485 }
1486
1487 return TRUE;
1488 }
1489
1490 static bfd_boolean
1491 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1492 {
1493 /* Symbols starting with _SPUEAR_ need a stub because they may be
1494 invoked by the PPU. */
1495 struct bfd_link_info *info = inf;
1496 struct spu_link_hash_table *htab = spu_hash_table (info);
1497 asection *sym_sec;
1498
1499 if ((h->root.type == bfd_link_hash_defined
1500 || h->root.type == bfd_link_hash_defweak)
1501 && h->def_regular
1502 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1503 && (sym_sec = h->root.u.def.section) != NULL
1504 && sym_sec->output_section != bfd_abs_section_ptr
1505 && spu_elf_section_data (sym_sec->output_section) != NULL
1506 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1507 || htab->params->non_overlay_stubs))
1508 {
1509 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1510 h->root.u.def.value, sym_sec);
1511 }
1512
1513 return TRUE;
1514 }
1515
1516 /* Size or build stubs. */
1517
1518 static bfd_boolean
1519 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1520 {
1521 struct spu_link_hash_table *htab = spu_hash_table (info);
1522 bfd *ibfd;
1523
1524 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1525 {
1526 extern const bfd_target bfd_elf32_spu_vec;
1527 Elf_Internal_Shdr *symtab_hdr;
1528 asection *isec;
1529 Elf_Internal_Sym *local_syms = NULL;
1530
1531 if (ibfd->xvec != &bfd_elf32_spu_vec)
1532 continue;
1533
1534 /* We'll need the symbol table in a second. */
1535 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1536 if (symtab_hdr->sh_info == 0)
1537 continue;
1538
1539 /* Walk over each section attached to the input bfd. */
1540 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1541 {
1542 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1543
1544 /* If there aren't any relocs, then there's nothing more to do. */
1545 if ((isec->flags & SEC_RELOC) == 0
1546 || isec->reloc_count == 0)
1547 continue;
1548
1549 if (!maybe_needs_stubs (isec))
1550 continue;
1551
1552 /* Get the relocs. */
1553 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1554 info->keep_memory);
1555 if (internal_relocs == NULL)
1556 goto error_ret_free_local;
1557
1558 /* Now examine each relocation. */
1559 irela = internal_relocs;
1560 irelaend = irela + isec->reloc_count;
1561 for (; irela < irelaend; irela++)
1562 {
1563 enum elf_spu_reloc_type r_type;
1564 unsigned int r_indx;
1565 asection *sym_sec;
1566 Elf_Internal_Sym *sym;
1567 struct elf_link_hash_entry *h;
1568 enum _stub_type stub_type;
1569
1570 r_type = ELF32_R_TYPE (irela->r_info);
1571 r_indx = ELF32_R_SYM (irela->r_info);
1572
1573 if (r_type >= R_SPU_max)
1574 {
1575 bfd_set_error (bfd_error_bad_value);
1576 error_ret_free_internal:
1577 if (elf_section_data (isec)->relocs != internal_relocs)
1578 free (internal_relocs);
1579 error_ret_free_local:
1580 if (local_syms != NULL
1581 && (symtab_hdr->contents
1582 != (unsigned char *) local_syms))
1583 free (local_syms);
1584 return FALSE;
1585 }
1586
1587 /* Determine the reloc target section. */
1588 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1589 goto error_ret_free_internal;
1590
1591 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1592 NULL, info);
1593 if (stub_type == no_stub)
1594 continue;
1595 else if (stub_type == stub_error)
1596 goto error_ret_free_internal;
1597
1598 if (htab->stub_count == NULL)
1599 {
1600 bfd_size_type amt;
1601 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1602 htab->stub_count = bfd_zmalloc (amt);
1603 if (htab->stub_count == NULL)
1604 goto error_ret_free_internal;
1605 }
1606
1607 if (!build)
1608 {
1609 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1610 goto error_ret_free_internal;
1611 }
1612 else
1613 {
1614 bfd_vma dest;
1615
1616 if (h != NULL)
1617 dest = h->root.u.def.value;
1618 else
1619 dest = sym->st_value;
1620 dest += irela->r_addend;
1621 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1622 dest, sym_sec))
1623 goto error_ret_free_internal;
1624 }
1625 }
1626
1627 /* We're done with the internal relocs, free them. */
1628 if (elf_section_data (isec)->relocs != internal_relocs)
1629 free (internal_relocs);
1630 }
1631
1632 if (local_syms != NULL
1633 && symtab_hdr->contents != (unsigned char *) local_syms)
1634 {
1635 if (!info->keep_memory)
1636 free (local_syms);
1637 else
1638 symtab_hdr->contents = (unsigned char *) local_syms;
1639 }
1640 }
1641
1642 return TRUE;
1643 }
1644
1645 /* Allocate space for overlay call and return stubs.
1646 Return 0 on error, 1 if no overlays, 2 otherwise. */
1647
1648 int
1649 spu_elf_size_stubs (struct bfd_link_info *info)
1650 {
1651 struct spu_link_hash_table *htab;
1652 bfd *ibfd;
1653 bfd_size_type amt;
1654 flagword flags;
1655 unsigned int i;
1656 asection *stub;
1657
1658 if (!process_stubs (info, FALSE))
1659 return 0;
1660
1661 htab = spu_hash_table (info);
1662 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1663 if (htab->stub_err)
1664 return 0;
1665
1666 ibfd = info->input_bfds;
1667 if (htab->stub_count != NULL)
1668 {
1669 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1670 htab->stub_sec = bfd_zmalloc (amt);
1671 if (htab->stub_sec == NULL)
1672 return 0;
1673
1674 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1675 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1676 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1677 htab->stub_sec[0] = stub;
1678 if (stub == NULL
1679 || !bfd_set_section_alignment (ibfd, stub,
1680 ovl_stub_size_log2 (htab->params)))
1681 return 0;
1682 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1683 if (htab->params->ovly_flavour == ovly_soft_icache)
1684 /* Extra space for linked list entries. */
1685 stub->size += htab->stub_count[0] * 16;
1686
1687 for (i = 0; i < htab->num_overlays; ++i)
1688 {
1689 asection *osec = htab->ovl_sec[i];
1690 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1691 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1692 htab->stub_sec[ovl] = stub;
1693 if (stub == NULL
1694 || !bfd_set_section_alignment (ibfd, stub,
1695 ovl_stub_size_log2 (htab->params)))
1696 return 0;
1697 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1698 }
1699 }
1700
1701 if (htab->params->ovly_flavour == ovly_soft_icache)
1702 {
1703 /* Space for icache manager tables.
1704 a) Tag array, one quadword per cache line.
1705 b) Rewrite "to" list, one quadword per cache line.
1706 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1707 a power-of-two number of full quadwords) per cache line. */
1708
1709 flags = SEC_ALLOC;
1710 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1711 if (htab->ovtab == NULL
1712 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1713 return 0;
1714
1715 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1716 << htab->num_lines_log2;
1717
1718 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1719 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1720 if (htab->init == NULL
1721 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1722 return 0;
1723
1724 htab->init->size = 16;
1725 }
1726 else if (htab->stub_count == NULL)
1727 return 1;
1728 else
1729 {
1730 /* htab->ovtab consists of two arrays.
1731 . struct {
1732 . u32 vma;
1733 . u32 size;
1734 . u32 file_off;
1735 . u32 buf;
1736 . } _ovly_table[];
1737 .
1738 . struct {
1739 . u32 mapped;
1740 . } _ovly_buf_table[];
1741 . */
1742
1743 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1744 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1745 if (htab->ovtab == NULL
1746 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1747 return 0;
1748
1749 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1750 }
1751
1752 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1753 if (htab->toe == NULL
1754 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1755 return 0;
1756 htab->toe->size = 16;
1757
1758 return 2;
1759 }
1760
1761 /* Called from ld to place overlay manager data sections. This is done
1762 after the overlay manager itself is loaded, mainly so that the
1763 linker's htab->init section is placed after any other .ovl.init
1764 sections. */
1765
1766 void
1767 spu_elf_place_overlay_data (struct bfd_link_info *info)
1768 {
1769 struct spu_link_hash_table *htab = spu_hash_table (info);
1770 unsigned int i;
1771
1772 if (htab->stub_sec != NULL)
1773 {
1774 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1775
1776 for (i = 0; i < htab->num_overlays; ++i)
1777 {
1778 asection *osec = htab->ovl_sec[i];
1779 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1780 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1781 }
1782 }
1783
1784 if (htab->params->ovly_flavour == ovly_soft_icache)
1785 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1786
1787 if (htab->ovtab != NULL)
1788 {
1789 const char *ovout = ".data";
1790 if (htab->params->ovly_flavour == ovly_soft_icache)
1791 ovout = ".bss";
1792 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1793 }
1794
1795 if (htab->toe != NULL)
1796 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1797 }
1798
1799 /* Functions to handle embedded spu_ovl.o object. */
1800
1801 static void *
1802 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1803 {
1804 return stream;
1805 }
1806
1807 static file_ptr
1808 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1809 void *stream,
1810 void *buf,
1811 file_ptr nbytes,
1812 file_ptr offset)
1813 {
1814 struct _ovl_stream *os;
1815 size_t count;
1816 size_t max;
1817
1818 os = (struct _ovl_stream *) stream;
1819 max = (const char *) os->end - (const char *) os->start;
1820
1821 if ((ufile_ptr) offset >= max)
1822 return 0;
1823
1824 count = nbytes;
1825 if (count > max - offset)
1826 count = max - offset;
1827
1828 memcpy (buf, (const char *) os->start + offset, count);
1829 return count;
1830 }
1831
1832 bfd_boolean
1833 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1834 {
1835 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1836 "elf32-spu",
1837 ovl_mgr_open,
1838 (void *) stream,
1839 ovl_mgr_pread,
1840 NULL,
1841 NULL);
1842 return *ovl_bfd != NULL;
1843 }
1844
1845 static unsigned int
1846 overlay_index (asection *sec)
1847 {
1848 if (sec == NULL
1849 || sec->output_section == bfd_abs_section_ptr)
1850 return 0;
1851 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1852 }
1853
1854 /* Define an STT_OBJECT symbol. */
1855
1856 static struct elf_link_hash_entry *
1857 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1858 {
1859 struct elf_link_hash_entry *h;
1860
1861 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1862 if (h == NULL)
1863 return NULL;
1864
1865 if (h->root.type != bfd_link_hash_defined
1866 || !h->def_regular)
1867 {
1868 h->root.type = bfd_link_hash_defined;
1869 h->root.u.def.section = htab->ovtab;
1870 h->type = STT_OBJECT;
1871 h->ref_regular = 1;
1872 h->def_regular = 1;
1873 h->ref_regular_nonweak = 1;
1874 h->non_elf = 0;
1875 }
1876 else if (h->root.u.def.section->owner != NULL)
1877 {
1878 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1879 h->root.u.def.section->owner,
1880 h->root.root.string);
1881 bfd_set_error (bfd_error_bad_value);
1882 return NULL;
1883 }
1884 else
1885 {
1886 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1887 h->root.root.string);
1888 bfd_set_error (bfd_error_bad_value);
1889 return NULL;
1890 }
1891
1892 return h;
1893 }
1894
1895 /* Fill in all stubs and the overlay tables. */
1896
1897 static bfd_boolean
1898 spu_elf_build_stubs (struct bfd_link_info *info)
1899 {
1900 struct spu_link_hash_table *htab = spu_hash_table (info);
1901 struct elf_link_hash_entry *h;
1902 bfd_byte *p;
1903 asection *s;
1904 bfd *obfd;
1905 unsigned int i;
1906
1907 if (htab->num_overlays != 0)
1908 {
1909 for (i = 0; i < 2; i++)
1910 {
1911 h = htab->ovly_entry[i];
1912 if (h != NULL
1913 && (h->root.type == bfd_link_hash_defined
1914 || h->root.type == bfd_link_hash_defweak)
1915 && h->def_regular)
1916 {
1917 s = h->root.u.def.section->output_section;
1918 if (spu_elf_section_data (s)->u.o.ovl_index)
1919 {
1920 (*_bfd_error_handler) (_("%s in overlay section"),
1921 h->root.root.string);
1922 bfd_set_error (bfd_error_bad_value);
1923 return FALSE;
1924 }
1925 }
1926 }
1927 }
1928
1929 if (htab->stub_sec != NULL)
1930 {
1931 for (i = 0; i <= htab->num_overlays; i++)
1932 if (htab->stub_sec[i]->size != 0)
1933 {
1934 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1935 htab->stub_sec[i]->size);
1936 if (htab->stub_sec[i]->contents == NULL)
1937 return FALSE;
1938 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1939 htab->stub_sec[i]->size = 0;
1940 }
1941
1942 /* Fill in all the stubs. */
1943 process_stubs (info, TRUE);
1944 if (!htab->stub_err)
1945 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1946
1947 if (htab->stub_err)
1948 {
1949 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1950 bfd_set_error (bfd_error_bad_value);
1951 return FALSE;
1952 }
1953
1954 for (i = 0; i <= htab->num_overlays; i++)
1955 {
1956 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1957 {
1958 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1959 bfd_set_error (bfd_error_bad_value);
1960 return FALSE;
1961 }
1962 htab->stub_sec[i]->rawsize = 0;
1963 }
1964 }
1965
1966 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1967 return TRUE;
1968
1969 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1970 if (htab->ovtab->contents == NULL)
1971 return FALSE;
1972
1973 p = htab->ovtab->contents;
1974 if (htab->params->ovly_flavour == ovly_soft_icache)
1975 {
1976 bfd_vma off;
1977
1978 h = define_ovtab_symbol (htab, "__icache_tag_array");
1979 if (h == NULL)
1980 return FALSE;
1981 h->root.u.def.value = 0;
1982 h->size = 16 << htab->num_lines_log2;
1983 off = h->size;
1984
1985 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1986 if (h == NULL)
1987 return FALSE;
1988 h->root.u.def.value = 16 << htab->num_lines_log2;
1989 h->root.u.def.section = bfd_abs_section_ptr;
1990
1991 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1992 if (h == NULL)
1993 return FALSE;
1994 h->root.u.def.value = off;
1995 h->size = 16 << htab->num_lines_log2;
1996 off += h->size;
1997
1998 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1999 if (h == NULL)
2000 return FALSE;
2001 h->root.u.def.value = 16 << htab->num_lines_log2;
2002 h->root.u.def.section = bfd_abs_section_ptr;
2003
2004 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2005 if (h == NULL)
2006 return FALSE;
2007 h->root.u.def.value = off;
2008 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2009 off += h->size;
2010
2011 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2012 if (h == NULL)
2013 return FALSE;
2014 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2015 + htab->num_lines_log2);
2016 h->root.u.def.section = bfd_abs_section_ptr;
2017
2018 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2019 if (h == NULL)
2020 return FALSE;
2021 h->root.u.def.value = htab->fromelem_size_log2;
2022 h->root.u.def.section = bfd_abs_section_ptr;
2023
2024 h = define_ovtab_symbol (htab, "__icache_base");
2025 if (h == NULL)
2026 return FALSE;
2027 h->root.u.def.value = htab->ovl_sec[0]->vma;
2028 h->root.u.def.section = bfd_abs_section_ptr;
2029 h->size = htab->num_buf << htab->line_size_log2;
2030
2031 h = define_ovtab_symbol (htab, "__icache_linesize");
2032 if (h == NULL)
2033 return FALSE;
2034 h->root.u.def.value = 1 << htab->line_size_log2;
2035 h->root.u.def.section = bfd_abs_section_ptr;
2036
2037 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2038 if (h == NULL)
2039 return FALSE;
2040 h->root.u.def.value = htab->line_size_log2;
2041 h->root.u.def.section = bfd_abs_section_ptr;
2042
2043 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2044 if (h == NULL)
2045 return FALSE;
2046 h->root.u.def.value = -htab->line_size_log2;
2047 h->root.u.def.section = bfd_abs_section_ptr;
2048
2049 h = define_ovtab_symbol (htab, "__icache_cachesize");
2050 if (h == NULL)
2051 return FALSE;
2052 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2053 h->root.u.def.section = bfd_abs_section_ptr;
2054
2055 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2056 if (h == NULL)
2057 return FALSE;
2058 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2059 h->root.u.def.section = bfd_abs_section_ptr;
2060
2061 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2062 if (h == NULL)
2063 return FALSE;
2064 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2065 h->root.u.def.section = bfd_abs_section_ptr;
2066
2067 if (htab->init != NULL && htab->init->size != 0)
2068 {
2069 htab->init->contents = bfd_zalloc (htab->init->owner,
2070 htab->init->size);
2071 if (htab->init->contents == NULL)
2072 return FALSE;
2073
2074 h = define_ovtab_symbol (htab, "__icache_fileoff");
2075 if (h == NULL)
2076 return FALSE;
2077 h->root.u.def.value = 0;
2078 h->root.u.def.section = htab->init;
2079 h->size = 8;
2080 }
2081 }
2082 else
2083 {
2084 /* Write out _ovly_table. */
2085 /* set low bit of .size to mark non-overlay area as present. */
2086 p[7] = 1;
2087 obfd = htab->ovtab->output_section->owner;
2088 for (s = obfd->sections; s != NULL; s = s->next)
2089 {
2090 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2091
2092 if (ovl_index != 0)
2093 {
2094 unsigned long off = ovl_index * 16;
2095 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2096
2097 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2098 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2099 p + off + 4);
2100 /* file_off written later in spu_elf_modify_program_headers. */
2101 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2102 }
2103 }
2104
2105 h = define_ovtab_symbol (htab, "_ovly_table");
2106 if (h == NULL)
2107 return FALSE;
2108 h->root.u.def.value = 16;
2109 h->size = htab->num_overlays * 16;
2110
2111 h = define_ovtab_symbol (htab, "_ovly_table_end");
2112 if (h == NULL)
2113 return FALSE;
2114 h->root.u.def.value = htab->num_overlays * 16 + 16;
2115 h->size = 0;
2116
2117 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2118 if (h == NULL)
2119 return FALSE;
2120 h->root.u.def.value = htab->num_overlays * 16 + 16;
2121 h->size = htab->num_buf * 4;
2122
2123 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2124 if (h == NULL)
2125 return FALSE;
2126 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2127 h->size = 0;
2128 }
2129
2130 h = define_ovtab_symbol (htab, "_EAR_");
2131 if (h == NULL)
2132 return FALSE;
2133 h->root.u.def.section = htab->toe;
2134 h->root.u.def.value = 0;
2135 h->size = 16;
2136
2137 return TRUE;
2138 }
2139
2140 /* Check that all loadable section VMAs lie in the range
2141 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2142
2143 asection *
2144 spu_elf_check_vma (struct bfd_link_info *info)
2145 {
2146 struct elf_segment_map *m;
2147 unsigned int i;
2148 struct spu_link_hash_table *htab = spu_hash_table (info);
2149 bfd *abfd = info->output_bfd;
2150 bfd_vma hi = htab->params->local_store_hi;
2151 bfd_vma lo = htab->params->local_store_lo;
2152
2153 htab->local_store = hi + 1 - lo;
2154
2155 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2156 if (m->p_type == PT_LOAD)
2157 for (i = 0; i < m->count; i++)
2158 if (m->sections[i]->size != 0
2159 && (m->sections[i]->vma < lo
2160 || m->sections[i]->vma > hi
2161 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2162 return m->sections[i];
2163
2164 return NULL;
2165 }
2166
2167 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2168 Search for stack adjusting insns, and return the sp delta.
2169 If a store of lr is found save the instruction offset to *LR_STORE.
2170 If a stack adjusting instruction is found, save that offset to
2171 *SP_ADJUST. */
2172
2173 static int
2174 find_function_stack_adjust (asection *sec,
2175 bfd_vma offset,
2176 bfd_vma *lr_store,
2177 bfd_vma *sp_adjust)
2178 {
2179 int reg[128];
2180
2181 memset (reg, 0, sizeof (reg));
2182 for ( ; offset + 4 <= sec->size; offset += 4)
2183 {
2184 unsigned char buf[4];
2185 int rt, ra;
2186 int imm;
2187
2188 /* Assume no relocs on stack adjusing insns. */
2189 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2190 break;
2191
2192 rt = buf[3] & 0x7f;
2193 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2194
2195 if (buf[0] == 0x24 /* stqd */)
2196 {
2197 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2198 *lr_store = offset;
2199 continue;
2200 }
2201
2202 /* Partly decoded immediate field. */
2203 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2204
2205 if (buf[0] == 0x1c /* ai */)
2206 {
2207 imm >>= 7;
2208 imm = (imm ^ 0x200) - 0x200;
2209 reg[rt] = reg[ra] + imm;
2210
2211 if (rt == 1 /* sp */)
2212 {
2213 if (reg[rt] > 0)
2214 break;
2215 *sp_adjust = offset;
2216 return reg[rt];
2217 }
2218 }
2219 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2220 {
2221 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2222
2223 reg[rt] = reg[ra] + reg[rb];
2224 if (rt == 1)
2225 {
2226 if (reg[rt] > 0)
2227 break;
2228 *sp_adjust = offset;
2229 return reg[rt];
2230 }
2231 }
2232 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2233 {
2234 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2235
2236 reg[rt] = reg[rb] - reg[ra];
2237 if (rt == 1)
2238 {
2239 if (reg[rt] > 0)
2240 break;
2241 *sp_adjust = offset;
2242 return reg[rt];
2243 }
2244 }
2245 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2246 {
2247 if (buf[0] >= 0x42 /* ila */)
2248 imm |= (buf[0] & 1) << 17;
2249 else
2250 {
2251 imm &= 0xffff;
2252
2253 if (buf[0] == 0x40 /* il */)
2254 {
2255 if ((buf[1] & 0x80) == 0)
2256 continue;
2257 imm = (imm ^ 0x8000) - 0x8000;
2258 }
2259 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2260 imm <<= 16;
2261 }
2262 reg[rt] = imm;
2263 continue;
2264 }
2265 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2266 {
2267 reg[rt] |= imm & 0xffff;
2268 continue;
2269 }
2270 else if (buf[0] == 0x04 /* ori */)
2271 {
2272 imm >>= 7;
2273 imm = (imm ^ 0x200) - 0x200;
2274 reg[rt] = reg[ra] | imm;
2275 continue;
2276 }
2277 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2278 {
2279 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2280 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2281 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2282 | ((imm & 0x1000) ? 0x000000ff : 0));
2283 continue;
2284 }
2285 else if (buf[0] == 0x16 /* andbi */)
2286 {
2287 imm >>= 7;
2288 imm &= 0xff;
2289 imm |= imm << 8;
2290 imm |= imm << 16;
2291 reg[rt] = reg[ra] & imm;
2292 continue;
2293 }
2294 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2295 {
2296 /* Used in pic reg load. Say rt is trashed. Won't be used
2297 in stack adjust, but we need to continue past this branch. */
2298 reg[rt] = 0;
2299 continue;
2300 }
2301 else if (is_branch (buf) || is_indirect_branch (buf))
2302 /* If we hit a branch then we must be out of the prologue. */
2303 break;
2304 }
2305
2306 return 0;
2307 }
2308
2309 /* qsort predicate to sort symbols by section and value. */
2310
2311 static Elf_Internal_Sym *sort_syms_syms;
2312 static asection **sort_syms_psecs;
2313
2314 static int
2315 sort_syms (const void *a, const void *b)
2316 {
2317 Elf_Internal_Sym *const *s1 = a;
2318 Elf_Internal_Sym *const *s2 = b;
2319 asection *sec1,*sec2;
2320 bfd_signed_vma delta;
2321
2322 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2323 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2324
2325 if (sec1 != sec2)
2326 return sec1->index - sec2->index;
2327
2328 delta = (*s1)->st_value - (*s2)->st_value;
2329 if (delta != 0)
2330 return delta < 0 ? -1 : 1;
2331
2332 delta = (*s2)->st_size - (*s1)->st_size;
2333 if (delta != 0)
2334 return delta < 0 ? -1 : 1;
2335
2336 return *s1 < *s2 ? -1 : 1;
2337 }
2338
2339 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2340 entries for section SEC. */
2341
2342 static struct spu_elf_stack_info *
2343 alloc_stack_info (asection *sec, int max_fun)
2344 {
2345 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2346 bfd_size_type amt;
2347
2348 amt = sizeof (struct spu_elf_stack_info);
2349 amt += (max_fun - 1) * sizeof (struct function_info);
2350 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2351 if (sec_data->u.i.stack_info != NULL)
2352 sec_data->u.i.stack_info->max_fun = max_fun;
2353 return sec_data->u.i.stack_info;
2354 }
2355
2356 /* Add a new struct function_info describing a (part of a) function
2357 starting at SYM_H. Keep the array sorted by address. */
2358
2359 static struct function_info *
2360 maybe_insert_function (asection *sec,
2361 void *sym_h,
2362 bfd_boolean global,
2363 bfd_boolean is_func)
2364 {
2365 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2366 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2367 int i;
2368 bfd_vma off, size;
2369
2370 if (sinfo == NULL)
2371 {
2372 sinfo = alloc_stack_info (sec, 20);
2373 if (sinfo == NULL)
2374 return NULL;
2375 }
2376
2377 if (!global)
2378 {
2379 Elf_Internal_Sym *sym = sym_h;
2380 off = sym->st_value;
2381 size = sym->st_size;
2382 }
2383 else
2384 {
2385 struct elf_link_hash_entry *h = sym_h;
2386 off = h->root.u.def.value;
2387 size = h->size;
2388 }
2389
2390 for (i = sinfo->num_fun; --i >= 0; )
2391 if (sinfo->fun[i].lo <= off)
2392 break;
2393
2394 if (i >= 0)
2395 {
2396 /* Don't add another entry for an alias, but do update some
2397 info. */
2398 if (sinfo->fun[i].lo == off)
2399 {
2400 /* Prefer globals over local syms. */
2401 if (global && !sinfo->fun[i].global)
2402 {
2403 sinfo->fun[i].global = TRUE;
2404 sinfo->fun[i].u.h = sym_h;
2405 }
2406 if (is_func)
2407 sinfo->fun[i].is_func = TRUE;
2408 return &sinfo->fun[i];
2409 }
2410 /* Ignore a zero-size symbol inside an existing function. */
2411 else if (sinfo->fun[i].hi > off && size == 0)
2412 return &sinfo->fun[i];
2413 }
2414
2415 if (sinfo->num_fun >= sinfo->max_fun)
2416 {
2417 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2418 bfd_size_type old = amt;
2419
2420 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2421 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2422 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2423 sinfo = bfd_realloc (sinfo, amt);
2424 if (sinfo == NULL)
2425 return NULL;
2426 memset ((char *) sinfo + old, 0, amt - old);
2427 sec_data->u.i.stack_info = sinfo;
2428 }
2429
2430 if (++i < sinfo->num_fun)
2431 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2432 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2433 sinfo->fun[i].is_func = is_func;
2434 sinfo->fun[i].global = global;
2435 sinfo->fun[i].sec = sec;
2436 if (global)
2437 sinfo->fun[i].u.h = sym_h;
2438 else
2439 sinfo->fun[i].u.sym = sym_h;
2440 sinfo->fun[i].lo = off;
2441 sinfo->fun[i].hi = off + size;
2442 sinfo->fun[i].lr_store = -1;
2443 sinfo->fun[i].sp_adjust = -1;
2444 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2445 &sinfo->fun[i].lr_store,
2446 &sinfo->fun[i].sp_adjust);
2447 sinfo->num_fun += 1;
2448 return &sinfo->fun[i];
2449 }
2450
2451 /* Return the name of FUN. */
2452
2453 static const char *
2454 func_name (struct function_info *fun)
2455 {
2456 asection *sec;
2457 bfd *ibfd;
2458 Elf_Internal_Shdr *symtab_hdr;
2459
2460 while (fun->start != NULL)
2461 fun = fun->start;
2462
2463 if (fun->global)
2464 return fun->u.h->root.root.string;
2465
2466 sec = fun->sec;
2467 if (fun->u.sym->st_name == 0)
2468 {
2469 size_t len = strlen (sec->name);
2470 char *name = bfd_malloc (len + 10);
2471 if (name == NULL)
2472 return "(null)";
2473 sprintf (name, "%s+%lx", sec->name,
2474 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2475 return name;
2476 }
2477 ibfd = sec->owner;
2478 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2479 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2480 }
2481
2482 /* Read the instruction at OFF in SEC. Return true iff the instruction
2483 is a nop, lnop, or stop 0 (all zero insn). */
2484
2485 static bfd_boolean
2486 is_nop (asection *sec, bfd_vma off)
2487 {
2488 unsigned char insn[4];
2489
2490 if (off + 4 > sec->size
2491 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2492 return FALSE;
2493 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2494 return TRUE;
2495 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2496 return TRUE;
2497 return FALSE;
2498 }
2499
2500 /* Extend the range of FUN to cover nop padding up to LIMIT.
2501 Return TRUE iff some instruction other than a NOP was found. */
2502
2503 static bfd_boolean
2504 insns_at_end (struct function_info *fun, bfd_vma limit)
2505 {
2506 bfd_vma off = (fun->hi + 3) & -4;
2507
2508 while (off < limit && is_nop (fun->sec, off))
2509 off += 4;
2510 if (off < limit)
2511 {
2512 fun->hi = off;
2513 return TRUE;
2514 }
2515 fun->hi = limit;
2516 return FALSE;
2517 }
2518
2519 /* Check and fix overlapping function ranges. Return TRUE iff there
2520 are gaps in the current info we have about functions in SEC. */
2521
2522 static bfd_boolean
2523 check_function_ranges (asection *sec, struct bfd_link_info *info)
2524 {
2525 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2526 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2527 int i;
2528 bfd_boolean gaps = FALSE;
2529
2530 if (sinfo == NULL)
2531 return FALSE;
2532
2533 for (i = 1; i < sinfo->num_fun; i++)
2534 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2535 {
2536 /* Fix overlapping symbols. */
2537 const char *f1 = func_name (&sinfo->fun[i - 1]);
2538 const char *f2 = func_name (&sinfo->fun[i]);
2539
2540 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2541 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2542 }
2543 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2544 gaps = TRUE;
2545
2546 if (sinfo->num_fun == 0)
2547 gaps = TRUE;
2548 else
2549 {
2550 if (sinfo->fun[0].lo != 0)
2551 gaps = TRUE;
2552 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2553 {
2554 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2555
2556 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2557 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2558 }
2559 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2560 gaps = TRUE;
2561 }
2562 return gaps;
2563 }
2564
2565 /* Search current function info for a function that contains address
2566 OFFSET in section SEC. */
2567
2568 static struct function_info *
2569 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2570 {
2571 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2572 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2573 int lo, hi, mid;
2574
2575 lo = 0;
2576 hi = sinfo->num_fun;
2577 while (lo < hi)
2578 {
2579 mid = (lo + hi) / 2;
2580 if (offset < sinfo->fun[mid].lo)
2581 hi = mid;
2582 else if (offset >= sinfo->fun[mid].hi)
2583 lo = mid + 1;
2584 else
2585 return &sinfo->fun[mid];
2586 }
2587 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2588 sec, offset);
2589 bfd_set_error (bfd_error_bad_value);
2590 return NULL;
2591 }
2592
2593 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2594 if CALLEE was new. If this function return FALSE, CALLEE should
2595 be freed. */
2596
2597 static bfd_boolean
2598 insert_callee (struct function_info *caller, struct call_info *callee)
2599 {
2600 struct call_info **pp, *p;
2601
2602 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2603 if (p->fun == callee->fun)
2604 {
2605 /* Tail calls use less stack than normal calls. Retain entry
2606 for normal call over one for tail call. */
2607 p->is_tail &= callee->is_tail;
2608 if (!p->is_tail)
2609 {
2610 p->fun->start = NULL;
2611 p->fun->is_func = TRUE;
2612 }
2613 p->count += callee->count;
2614 /* Reorder list so most recent call is first. */
2615 *pp = p->next;
2616 p->next = caller->call_list;
2617 caller->call_list = p;
2618 return FALSE;
2619 }
2620 callee->next = caller->call_list;
2621 caller->call_list = callee;
2622 return TRUE;
2623 }
2624
2625 /* Copy CALL and insert the copy into CALLER. */
2626
2627 static bfd_boolean
2628 copy_callee (struct function_info *caller, const struct call_info *call)
2629 {
2630 struct call_info *callee;
2631 callee = bfd_malloc (sizeof (*callee));
2632 if (callee == NULL)
2633 return FALSE;
2634 *callee = *call;
2635 if (!insert_callee (caller, callee))
2636 free (callee);
2637 return TRUE;
2638 }
2639
2640 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2641 overlay stub sections. */
2642
2643 static bfd_boolean
2644 interesting_section (asection *s)
2645 {
2646 return (s->output_section != bfd_abs_section_ptr
2647 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2648 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2649 && s->size != 0);
2650 }
2651
2652 /* Rummage through the relocs for SEC, looking for function calls.
2653 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2654 mark destination symbols on calls as being functions. Also
2655 look at branches, which may be tail calls or go to hot/cold
2656 section part of same function. */
2657
2658 static bfd_boolean
2659 mark_functions_via_relocs (asection *sec,
2660 struct bfd_link_info *info,
2661 int call_tree)
2662 {
2663 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2664 Elf_Internal_Shdr *symtab_hdr;
2665 void *psyms;
2666 unsigned int priority = 0;
2667 static bfd_boolean warned;
2668
2669 if (!interesting_section (sec)
2670 || sec->reloc_count == 0)
2671 return TRUE;
2672
2673 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2674 info->keep_memory);
2675 if (internal_relocs == NULL)
2676 return FALSE;
2677
2678 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2679 psyms = &symtab_hdr->contents;
2680 irela = internal_relocs;
2681 irelaend = irela + sec->reloc_count;
2682 for (; irela < irelaend; irela++)
2683 {
2684 enum elf_spu_reloc_type r_type;
2685 unsigned int r_indx;
2686 asection *sym_sec;
2687 Elf_Internal_Sym *sym;
2688 struct elf_link_hash_entry *h;
2689 bfd_vma val;
2690 bfd_boolean nonbranch, is_call;
2691 struct function_info *caller;
2692 struct call_info *callee;
2693
2694 r_type = ELF32_R_TYPE (irela->r_info);
2695 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2696
2697 r_indx = ELF32_R_SYM (irela->r_info);
2698 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2699 return FALSE;
2700
2701 if (sym_sec == NULL
2702 || sym_sec->output_section == bfd_abs_section_ptr)
2703 continue;
2704
2705 is_call = FALSE;
2706 if (!nonbranch)
2707 {
2708 unsigned char insn[4];
2709
2710 if (!bfd_get_section_contents (sec->owner, sec, insn,
2711 irela->r_offset, 4))
2712 return FALSE;
2713 if (is_branch (insn))
2714 {
2715 is_call = (insn[0] & 0xfd) == 0x31;
2716 priority = insn[1] & 0x0f;
2717 priority <<= 8;
2718 priority |= insn[2];
2719 priority <<= 8;
2720 priority |= insn[3];
2721 priority >>= 7;
2722 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2723 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2724 {
2725 if (!warned)
2726 info->callbacks->einfo
2727 (_("%B(%A+0x%v): call to non-code section"
2728 " %B(%A), analysis incomplete\n"),
2729 sec->owner, sec, irela->r_offset,
2730 sym_sec->owner, sym_sec);
2731 warned = TRUE;
2732 continue;
2733 }
2734 }
2735 else
2736 {
2737 nonbranch = TRUE;
2738 if (is_hint (insn))
2739 continue;
2740 }
2741 }
2742
2743 if (nonbranch)
2744 {
2745 /* For --auto-overlay, count possible stubs we need for
2746 function pointer references. */
2747 unsigned int sym_type;
2748 if (h)
2749 sym_type = h->type;
2750 else
2751 sym_type = ELF_ST_TYPE (sym->st_info);
2752 if (sym_type == STT_FUNC)
2753 {
2754 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2755 spu_hash_table (info)->non_ovly_stub += 1;
2756 /* If the symbol type is STT_FUNC then this must be a
2757 function pointer initialisation. */
2758 continue;
2759 }
2760 /* Ignore data references. */
2761 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2762 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2763 continue;
2764 /* Otherwise we probably have a jump table reloc for
2765 a switch statement or some other reference to a
2766 code label. */
2767 }
2768
2769 if (h)
2770 val = h->root.u.def.value;
2771 else
2772 val = sym->st_value;
2773 val += irela->r_addend;
2774
2775 if (!call_tree)
2776 {
2777 struct function_info *fun;
2778
2779 if (irela->r_addend != 0)
2780 {
2781 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2782 if (fake == NULL)
2783 return FALSE;
2784 fake->st_value = val;
2785 fake->st_shndx
2786 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2787 sym = fake;
2788 }
2789 if (sym)
2790 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2791 else
2792 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2793 if (fun == NULL)
2794 return FALSE;
2795 if (irela->r_addend != 0
2796 && fun->u.sym != sym)
2797 free (sym);
2798 continue;
2799 }
2800
2801 caller = find_function (sec, irela->r_offset, info);
2802 if (caller == NULL)
2803 return FALSE;
2804 callee = bfd_malloc (sizeof *callee);
2805 if (callee == NULL)
2806 return FALSE;
2807
2808 callee->fun = find_function (sym_sec, val, info);
2809 if (callee->fun == NULL)
2810 return FALSE;
2811 callee->is_tail = !is_call;
2812 callee->is_pasted = FALSE;
2813 callee->broken_cycle = FALSE;
2814 callee->priority = priority;
2815 callee->count = nonbranch? 0 : 1;
2816 if (callee->fun->last_caller != sec)
2817 {
2818 callee->fun->last_caller = sec;
2819 callee->fun->call_count += 1;
2820 }
2821 if (!insert_callee (caller, callee))
2822 free (callee);
2823 else if (!is_call
2824 && !callee->fun->is_func
2825 && callee->fun->stack == 0)
2826 {
2827 /* This is either a tail call or a branch from one part of
2828 the function to another, ie. hot/cold section. If the
2829 destination has been called by some other function then
2830 it is a separate function. We also assume that functions
2831 are not split across input files. */
2832 if (sec->owner != sym_sec->owner)
2833 {
2834 callee->fun->start = NULL;
2835 callee->fun->is_func = TRUE;
2836 }
2837 else if (callee->fun->start == NULL)
2838 {
2839 struct function_info *caller_start = caller;
2840 while (caller_start->start)
2841 caller_start = caller_start->start;
2842
2843 if (caller_start != callee->fun)
2844 callee->fun->start = caller_start;
2845 }
2846 else
2847 {
2848 struct function_info *callee_start;
2849 struct function_info *caller_start;
2850 callee_start = callee->fun;
2851 while (callee_start->start)
2852 callee_start = callee_start->start;
2853 caller_start = caller;
2854 while (caller_start->start)
2855 caller_start = caller_start->start;
2856 if (caller_start != callee_start)
2857 {
2858 callee->fun->start = NULL;
2859 callee->fun->is_func = TRUE;
2860 }
2861 }
2862 }
2863 }
2864
2865 return TRUE;
2866 }
2867
2868 /* Handle something like .init or .fini, which has a piece of a function.
2869 These sections are pasted together to form a single function. */
2870
2871 static bfd_boolean
2872 pasted_function (asection *sec)
2873 {
2874 struct bfd_link_order *l;
2875 struct _spu_elf_section_data *sec_data;
2876 struct spu_elf_stack_info *sinfo;
2877 Elf_Internal_Sym *fake;
2878 struct function_info *fun, *fun_start;
2879
2880 fake = bfd_zmalloc (sizeof (*fake));
2881 if (fake == NULL)
2882 return FALSE;
2883 fake->st_value = 0;
2884 fake->st_size = sec->size;
2885 fake->st_shndx
2886 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2887 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2888 if (!fun)
2889 return FALSE;
2890
2891 /* Find a function immediately preceding this section. */
2892 fun_start = NULL;
2893 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2894 {
2895 if (l->u.indirect.section == sec)
2896 {
2897 if (fun_start != NULL)
2898 {
2899 struct call_info *callee = bfd_malloc (sizeof *callee);
2900 if (callee == NULL)
2901 return FALSE;
2902
2903 fun->start = fun_start;
2904 callee->fun = fun;
2905 callee->is_tail = TRUE;
2906 callee->is_pasted = TRUE;
2907 callee->broken_cycle = FALSE;
2908 callee->priority = 0;
2909 callee->count = 1;
2910 if (!insert_callee (fun_start, callee))
2911 free (callee);
2912 return TRUE;
2913 }
2914 break;
2915 }
2916 if (l->type == bfd_indirect_link_order
2917 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2918 && (sinfo = sec_data->u.i.stack_info) != NULL
2919 && sinfo->num_fun != 0)
2920 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2921 }
2922
2923 /* Don't return an error if we did not find a function preceding this
2924 section. The section may have incorrect flags. */
2925 return TRUE;
2926 }
2927
2928 /* Map address ranges in code sections to functions. */
2929
2930 static bfd_boolean
2931 discover_functions (struct bfd_link_info *info)
2932 {
2933 bfd *ibfd;
2934 int bfd_idx;
2935 Elf_Internal_Sym ***psym_arr;
2936 asection ***sec_arr;
2937 bfd_boolean gaps = FALSE;
2938
2939 bfd_idx = 0;
2940 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2941 bfd_idx++;
2942
2943 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2944 if (psym_arr == NULL)
2945 return FALSE;
2946 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2947 if (sec_arr == NULL)
2948 return FALSE;
2949
2950 for (ibfd = info->input_bfds, bfd_idx = 0;
2951 ibfd != NULL;
2952 ibfd = ibfd->link_next, bfd_idx++)
2953 {
2954 extern const bfd_target bfd_elf32_spu_vec;
2955 Elf_Internal_Shdr *symtab_hdr;
2956 asection *sec;
2957 size_t symcount;
2958 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2959 asection **psecs, **p;
2960
2961 if (ibfd->xvec != &bfd_elf32_spu_vec)
2962 continue;
2963
2964 /* Read all the symbols. */
2965 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2966 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2967 if (symcount == 0)
2968 {
2969 if (!gaps)
2970 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2971 if (interesting_section (sec))
2972 {
2973 gaps = TRUE;
2974 break;
2975 }
2976 continue;
2977 }
2978
2979 if (symtab_hdr->contents != NULL)
2980 {
2981 /* Don't use cached symbols since the generic ELF linker
2982 code only reads local symbols, and we need globals too. */
2983 free (symtab_hdr->contents);
2984 symtab_hdr->contents = NULL;
2985 }
2986 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2987 NULL, NULL, NULL);
2988 symtab_hdr->contents = (void *) syms;
2989 if (syms == NULL)
2990 return FALSE;
2991
2992 /* Select defined function symbols that are going to be output. */
2993 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2994 if (psyms == NULL)
2995 return FALSE;
2996 psym_arr[bfd_idx] = psyms;
2997 psecs = bfd_malloc (symcount * sizeof (*psecs));
2998 if (psecs == NULL)
2999 return FALSE;
3000 sec_arr[bfd_idx] = psecs;
3001 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3002 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3003 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3004 {
3005 asection *s;
3006
3007 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3008 if (s != NULL && interesting_section (s))
3009 *psy++ = sy;
3010 }
3011 symcount = psy - psyms;
3012 *psy = NULL;
3013
3014 /* Sort them by section and offset within section. */
3015 sort_syms_syms = syms;
3016 sort_syms_psecs = psecs;
3017 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3018
3019 /* Now inspect the function symbols. */
3020 for (psy = psyms; psy < psyms + symcount; )
3021 {
3022 asection *s = psecs[*psy - syms];
3023 Elf_Internal_Sym **psy2;
3024
3025 for (psy2 = psy; ++psy2 < psyms + symcount; )
3026 if (psecs[*psy2 - syms] != s)
3027 break;
3028
3029 if (!alloc_stack_info (s, psy2 - psy))
3030 return FALSE;
3031 psy = psy2;
3032 }
3033
3034 /* First install info about properly typed and sized functions.
3035 In an ideal world this will cover all code sections, except
3036 when partitioning functions into hot and cold sections,
3037 and the horrible pasted together .init and .fini functions. */
3038 for (psy = psyms; psy < psyms + symcount; ++psy)
3039 {
3040 sy = *psy;
3041 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3042 {
3043 asection *s = psecs[sy - syms];
3044 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3045 return FALSE;
3046 }
3047 }
3048
3049 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3050 if (interesting_section (sec))
3051 gaps |= check_function_ranges (sec, info);
3052 }
3053
3054 if (gaps)
3055 {
3056 /* See if we can discover more function symbols by looking at
3057 relocations. */
3058 for (ibfd = info->input_bfds, bfd_idx = 0;
3059 ibfd != NULL;
3060 ibfd = ibfd->link_next, bfd_idx++)
3061 {
3062 asection *sec;
3063
3064 if (psym_arr[bfd_idx] == NULL)
3065 continue;
3066
3067 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3068 if (!mark_functions_via_relocs (sec, info, FALSE))
3069 return FALSE;
3070 }
3071
3072 for (ibfd = info->input_bfds, bfd_idx = 0;
3073 ibfd != NULL;
3074 ibfd = ibfd->link_next, bfd_idx++)
3075 {
3076 Elf_Internal_Shdr *symtab_hdr;
3077 asection *sec;
3078 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3079 asection **psecs;
3080
3081 if ((psyms = psym_arr[bfd_idx]) == NULL)
3082 continue;
3083
3084 psecs = sec_arr[bfd_idx];
3085
3086 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3087 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3088
3089 gaps = FALSE;
3090 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3091 if (interesting_section (sec))
3092 gaps |= check_function_ranges (sec, info);
3093 if (!gaps)
3094 continue;
3095
3096 /* Finally, install all globals. */
3097 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3098 {
3099 asection *s;
3100
3101 s = psecs[sy - syms];
3102
3103 /* Global syms might be improperly typed functions. */
3104 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3105 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3106 {
3107 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3108 return FALSE;
3109 }
3110 }
3111 }
3112
3113 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3114 {
3115 extern const bfd_target bfd_elf32_spu_vec;
3116 asection *sec;
3117
3118 if (ibfd->xvec != &bfd_elf32_spu_vec)
3119 continue;
3120
3121 /* Some of the symbols we've installed as marking the
3122 beginning of functions may have a size of zero. Extend
3123 the range of such functions to the beginning of the
3124 next symbol of interest. */
3125 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3126 if (interesting_section (sec))
3127 {
3128 struct _spu_elf_section_data *sec_data;
3129 struct spu_elf_stack_info *sinfo;
3130
3131 sec_data = spu_elf_section_data (sec);
3132 sinfo = sec_data->u.i.stack_info;
3133 if (sinfo != NULL && sinfo->num_fun != 0)
3134 {
3135 int fun_idx;
3136 bfd_vma hi = sec->size;
3137
3138 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3139 {
3140 sinfo->fun[fun_idx].hi = hi;
3141 hi = sinfo->fun[fun_idx].lo;
3142 }
3143
3144 sinfo->fun[0].lo = 0;
3145 }
3146 /* No symbols in this section. Must be .init or .fini
3147 or something similar. */
3148 else if (!pasted_function (sec))
3149 return FALSE;
3150 }
3151 }
3152 }
3153
3154 for (ibfd = info->input_bfds, bfd_idx = 0;
3155 ibfd != NULL;
3156 ibfd = ibfd->link_next, bfd_idx++)
3157 {
3158 if (psym_arr[bfd_idx] == NULL)
3159 continue;
3160
3161 free (psym_arr[bfd_idx]);
3162 free (sec_arr[bfd_idx]);
3163 }
3164
3165 free (psym_arr);
3166 free (sec_arr);
3167
3168 return TRUE;
3169 }
3170
3171 /* Iterate over all function_info we have collected, calling DOIT on
3172 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3173 if ROOT_ONLY. */
3174
3175 static bfd_boolean
3176 for_each_node (bfd_boolean (*doit) (struct function_info *,
3177 struct bfd_link_info *,
3178 void *),
3179 struct bfd_link_info *info,
3180 void *param,
3181 int root_only)
3182 {
3183 bfd *ibfd;
3184
3185 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3186 {
3187 extern const bfd_target bfd_elf32_spu_vec;
3188 asection *sec;
3189
3190 if (ibfd->xvec != &bfd_elf32_spu_vec)
3191 continue;
3192
3193 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3194 {
3195 struct _spu_elf_section_data *sec_data;
3196 struct spu_elf_stack_info *sinfo;
3197
3198 if ((sec_data = spu_elf_section_data (sec)) != NULL
3199 && (sinfo = sec_data->u.i.stack_info) != NULL)
3200 {
3201 int i;
3202 for (i = 0; i < sinfo->num_fun; ++i)
3203 if (!root_only || !sinfo->fun[i].non_root)
3204 if (!doit (&sinfo->fun[i], info, param))
3205 return FALSE;
3206 }
3207 }
3208 }
3209 return TRUE;
3210 }
3211
3212 /* Transfer call info attached to struct function_info entries for
3213 all of a given function's sections to the first entry. */
3214
3215 static bfd_boolean
3216 transfer_calls (struct function_info *fun,
3217 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3218 void *param ATTRIBUTE_UNUSED)
3219 {
3220 struct function_info *start = fun->start;
3221
3222 if (start != NULL)
3223 {
3224 struct call_info *call, *call_next;
3225
3226 while (start->start != NULL)
3227 start = start->start;
3228 for (call = fun->call_list; call != NULL; call = call_next)
3229 {
3230 call_next = call->next;
3231 if (!insert_callee (start, call))
3232 free (call);
3233 }
3234 fun->call_list = NULL;
3235 }
3236 return TRUE;
3237 }
3238
3239 /* Mark nodes in the call graph that are called by some other node. */
3240
3241 static bfd_boolean
3242 mark_non_root (struct function_info *fun,
3243 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3244 void *param ATTRIBUTE_UNUSED)
3245 {
3246 struct call_info *call;
3247
3248 if (fun->visit1)
3249 return TRUE;
3250 fun->visit1 = TRUE;
3251 for (call = fun->call_list; call; call = call->next)
3252 {
3253 call->fun->non_root = TRUE;
3254 mark_non_root (call->fun, 0, 0);
3255 }
3256 return TRUE;
3257 }
3258
3259 /* Remove cycles from the call graph. Set depth of nodes. */
3260
3261 static bfd_boolean
3262 remove_cycles (struct function_info *fun,
3263 struct bfd_link_info *info,
3264 void *param)
3265 {
3266 struct call_info **callp, *call;
3267 unsigned int depth = *(unsigned int *) param;
3268 unsigned int max_depth = depth;
3269
3270 fun->depth = depth;
3271 fun->visit2 = TRUE;
3272 fun->marking = TRUE;
3273
3274 callp = &fun->call_list;
3275 while ((call = *callp) != NULL)
3276 {
3277 call->max_depth = depth + !call->is_pasted;
3278 if (!call->fun->visit2)
3279 {
3280 if (!remove_cycles (call->fun, info, &call->max_depth))
3281 return FALSE;
3282 if (max_depth < call->max_depth)
3283 max_depth = call->max_depth;
3284 }
3285 else if (call->fun->marking)
3286 {
3287 struct spu_link_hash_table *htab = spu_hash_table (info);
3288
3289 if (!htab->params->auto_overlay
3290 && htab->params->stack_analysis)
3291 {
3292 const char *f1 = func_name (fun);
3293 const char *f2 = func_name (call->fun);
3294
3295 info->callbacks->info (_("Stack analysis will ignore the call "
3296 "from %s to %s\n"),
3297 f1, f2);
3298 }
3299
3300 call->broken_cycle = TRUE;
3301 }
3302 callp = &call->next;
3303 }
3304 fun->marking = FALSE;
3305 *(unsigned int *) param = max_depth;
3306 return TRUE;
3307 }
3308
3309 /* Check that we actually visited all nodes in remove_cycles. If we
3310 didn't, then there is some cycle in the call graph not attached to
3311 any root node. Arbitrarily choose a node in the cycle as a new
3312 root and break the cycle. */
3313
3314 static bfd_boolean
3315 mark_detached_root (struct function_info *fun,
3316 struct bfd_link_info *info,
3317 void *param)
3318 {
3319 if (fun->visit2)
3320 return TRUE;
3321 fun->non_root = FALSE;
3322 *(unsigned int *) param = 0;
3323 return remove_cycles (fun, info, param);
3324 }
3325
3326 /* Populate call_list for each function. */
3327
3328 static bfd_boolean
3329 build_call_tree (struct bfd_link_info *info)
3330 {
3331 bfd *ibfd;
3332 unsigned int depth;
3333
3334 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3335 {
3336 extern const bfd_target bfd_elf32_spu_vec;
3337 asection *sec;
3338
3339 if (ibfd->xvec != &bfd_elf32_spu_vec)
3340 continue;
3341
3342 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3343 if (!mark_functions_via_relocs (sec, info, TRUE))
3344 return FALSE;
3345 }
3346
3347 /* Transfer call info from hot/cold section part of function
3348 to main entry. */
3349 if (!spu_hash_table (info)->params->auto_overlay
3350 && !for_each_node (transfer_calls, info, 0, FALSE))
3351 return FALSE;
3352
3353 /* Find the call graph root(s). */
3354 if (!for_each_node (mark_non_root, info, 0, FALSE))
3355 return FALSE;
3356
3357 /* Remove cycles from the call graph. We start from the root node(s)
3358 so that we break cycles in a reasonable place. */
3359 depth = 0;
3360 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3361 return FALSE;
3362
3363 return for_each_node (mark_detached_root, info, &depth, FALSE);
3364 }
3365
3366 /* qsort predicate to sort calls by priority, max_depth then count. */
3367
3368 static int
3369 sort_calls (const void *a, const void *b)
3370 {
3371 struct call_info *const *c1 = a;
3372 struct call_info *const *c2 = b;
3373 int delta;
3374
3375 delta = (*c2)->priority - (*c1)->priority;
3376 if (delta != 0)
3377 return delta;
3378
3379 delta = (*c2)->max_depth - (*c1)->max_depth;
3380 if (delta != 0)
3381 return delta;
3382
3383 delta = (*c2)->count - (*c1)->count;
3384 if (delta != 0)
3385 return delta;
3386
3387 return (char *) c1 - (char *) c2;
3388 }
3389
3390 struct _mos_param {
3391 unsigned int max_overlay_size;
3392 };
3393
3394 /* Set linker_mark and gc_mark on any sections that we will put in
3395 overlays. These flags are used by the generic ELF linker, but we
3396 won't be continuing on to bfd_elf_final_link so it is OK to use
3397 them. linker_mark is clear before we get here. Set segment_mark
3398 on sections that are part of a pasted function (excluding the last
3399 section).
3400
3401 Set up function rodata section if --overlay-rodata. We don't
3402 currently include merged string constant rodata sections since
3403
3404 Sort the call graph so that the deepest nodes will be visited
3405 first. */
3406
3407 static bfd_boolean
3408 mark_overlay_section (struct function_info *fun,
3409 struct bfd_link_info *info,
3410 void *param)
3411 {
3412 struct call_info *call;
3413 unsigned int count;
3414 struct _mos_param *mos_param = param;
3415 struct spu_link_hash_table *htab = spu_hash_table (info);
3416
3417 if (fun->visit4)
3418 return TRUE;
3419
3420 fun->visit4 = TRUE;
3421 if (!fun->sec->linker_mark
3422 && (htab->params->ovly_flavour != ovly_soft_icache
3423 || htab->params->non_ia_text
3424 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3425 || strcmp (fun->sec->name, ".init") == 0
3426 || strcmp (fun->sec->name, ".fini") == 0))
3427 {
3428 unsigned int size;
3429
3430 fun->sec->linker_mark = 1;
3431 fun->sec->gc_mark = 1;
3432 fun->sec->segment_mark = 0;
3433 /* Ensure SEC_CODE is set on this text section (it ought to
3434 be!), and SEC_CODE is clear on rodata sections. We use
3435 this flag to differentiate the two overlay section types. */
3436 fun->sec->flags |= SEC_CODE;
3437
3438 size = fun->sec->size;
3439 if (htab->params->auto_overlay & OVERLAY_RODATA)
3440 {
3441 char *name = NULL;
3442
3443 /* Find the rodata section corresponding to this function's
3444 text section. */
3445 if (strcmp (fun->sec->name, ".text") == 0)
3446 {
3447 name = bfd_malloc (sizeof (".rodata"));
3448 if (name == NULL)
3449 return FALSE;
3450 memcpy (name, ".rodata", sizeof (".rodata"));
3451 }
3452 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3453 {
3454 size_t len = strlen (fun->sec->name);
3455 name = bfd_malloc (len + 3);
3456 if (name == NULL)
3457 return FALSE;
3458 memcpy (name, ".rodata", sizeof (".rodata"));
3459 memcpy (name + 7, fun->sec->name + 5, len - 4);
3460 }
3461 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3462 {
3463 size_t len = strlen (fun->sec->name) + 1;
3464 name = bfd_malloc (len);
3465 if (name == NULL)
3466 return FALSE;
3467 memcpy (name, fun->sec->name, len);
3468 name[14] = 'r';
3469 }
3470
3471 if (name != NULL)
3472 {
3473 asection *rodata = NULL;
3474 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3475 if (group_sec == NULL)
3476 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3477 else
3478 while (group_sec != NULL && group_sec != fun->sec)
3479 {
3480 if (strcmp (group_sec->name, name) == 0)
3481 {
3482 rodata = group_sec;
3483 break;
3484 }
3485 group_sec = elf_section_data (group_sec)->next_in_group;
3486 }
3487 fun->rodata = rodata;
3488 if (fun->rodata)
3489 {
3490 size += fun->rodata->size;
3491 if (htab->params->line_size != 0
3492 && size > htab->params->line_size)
3493 {
3494 size -= fun->rodata->size;
3495 fun->rodata = NULL;
3496 }
3497 else
3498 {
3499 fun->rodata->linker_mark = 1;
3500 fun->rodata->gc_mark = 1;
3501 fun->rodata->flags &= ~SEC_CODE;
3502 }
3503 }
3504 free (name);
3505 }
3506 }
3507 if (mos_param->max_overlay_size < size)
3508 mos_param->max_overlay_size = size;
3509 }
3510
3511 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3512 count += 1;
3513
3514 if (count > 1)
3515 {
3516 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3517 if (calls == NULL)
3518 return FALSE;
3519
3520 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3521 calls[count++] = call;
3522
3523 qsort (calls, count, sizeof (*calls), sort_calls);
3524
3525 fun->call_list = NULL;
3526 while (count != 0)
3527 {
3528 --count;
3529 calls[count]->next = fun->call_list;
3530 fun->call_list = calls[count];
3531 }
3532 free (calls);
3533 }
3534
3535 for (call = fun->call_list; call != NULL; call = call->next)
3536 {
3537 if (call->is_pasted)
3538 {
3539 /* There can only be one is_pasted call per function_info. */
3540 BFD_ASSERT (!fun->sec->segment_mark);
3541 fun->sec->segment_mark = 1;
3542 }
3543 if (!call->broken_cycle
3544 && !mark_overlay_section (call->fun, info, param))
3545 return FALSE;
3546 }
3547
3548 /* Don't put entry code into an overlay. The overlay manager needs
3549 a stack! Also, don't mark .ovl.init as an overlay. */
3550 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3551 == info->output_bfd->start_address
3552 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3553 {
3554 fun->sec->linker_mark = 0;
3555 if (fun->rodata != NULL)
3556 fun->rodata->linker_mark = 0;
3557 }
3558 return TRUE;
3559 }
3560
3561 /* If non-zero then unmark functions called from those within sections
3562 that we need to unmark. Unfortunately this isn't reliable since the
3563 call graph cannot know the destination of function pointer calls. */
3564 #define RECURSE_UNMARK 0
3565
3566 struct _uos_param {
3567 asection *exclude_input_section;
3568 asection *exclude_output_section;
3569 unsigned long clearing;
3570 };
3571
3572 /* Undo some of mark_overlay_section's work. */
3573
3574 static bfd_boolean
3575 unmark_overlay_section (struct function_info *fun,
3576 struct bfd_link_info *info,
3577 void *param)
3578 {
3579 struct call_info *call;
3580 struct _uos_param *uos_param = param;
3581 unsigned int excluded = 0;
3582
3583 if (fun->visit5)
3584 return TRUE;
3585
3586 fun->visit5 = TRUE;
3587
3588 excluded = 0;
3589 if (fun->sec == uos_param->exclude_input_section
3590 || fun->sec->output_section == uos_param->exclude_output_section)
3591 excluded = 1;
3592
3593 if (RECURSE_UNMARK)
3594 uos_param->clearing += excluded;
3595
3596 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3597 {
3598 fun->sec->linker_mark = 0;
3599 if (fun->rodata)
3600 fun->rodata->linker_mark = 0;
3601 }
3602
3603 for (call = fun->call_list; call != NULL; call = call->next)
3604 if (!call->broken_cycle
3605 && !unmark_overlay_section (call->fun, info, param))
3606 return FALSE;
3607
3608 if (RECURSE_UNMARK)
3609 uos_param->clearing -= excluded;
3610 return TRUE;
3611 }
3612
3613 struct _cl_param {
3614 unsigned int lib_size;
3615 asection **lib_sections;
3616 };
3617
3618 /* Add sections we have marked as belonging to overlays to an array
3619 for consideration as non-overlay sections. The array consist of
3620 pairs of sections, (text,rodata), for functions in the call graph. */
3621
3622 static bfd_boolean
3623 collect_lib_sections (struct function_info *fun,
3624 struct bfd_link_info *info,
3625 void *param)
3626 {
3627 struct _cl_param *lib_param = param;
3628 struct call_info *call;
3629 unsigned int size;
3630
3631 if (fun->visit6)
3632 return TRUE;
3633
3634 fun->visit6 = TRUE;
3635 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3636 return TRUE;
3637
3638 size = fun->sec->size;
3639 if (fun->rodata)
3640 size += fun->rodata->size;
3641
3642 if (size <= lib_param->lib_size)
3643 {
3644 *lib_param->lib_sections++ = fun->sec;
3645 fun->sec->gc_mark = 0;
3646 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3647 {
3648 *lib_param->lib_sections++ = fun->rodata;
3649 fun->rodata->gc_mark = 0;
3650 }
3651 else
3652 *lib_param->lib_sections++ = NULL;
3653 }
3654
3655 for (call = fun->call_list; call != NULL; call = call->next)
3656 if (!call->broken_cycle)
3657 collect_lib_sections (call->fun, info, param);
3658
3659 return TRUE;
3660 }
3661
3662 /* qsort predicate to sort sections by call count. */
3663
3664 static int
3665 sort_lib (const void *a, const void *b)
3666 {
3667 asection *const *s1 = a;
3668 asection *const *s2 = b;
3669 struct _spu_elf_section_data *sec_data;
3670 struct spu_elf_stack_info *sinfo;
3671 int delta;
3672
3673 delta = 0;
3674 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3675 && (sinfo = sec_data->u.i.stack_info) != NULL)
3676 {
3677 int i;
3678 for (i = 0; i < sinfo->num_fun; ++i)
3679 delta -= sinfo->fun[i].call_count;
3680 }
3681
3682 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3683 && (sinfo = sec_data->u.i.stack_info) != NULL)
3684 {
3685 int i;
3686 for (i = 0; i < sinfo->num_fun; ++i)
3687 delta += sinfo->fun[i].call_count;
3688 }
3689
3690 if (delta != 0)
3691 return delta;
3692
3693 return s1 - s2;
3694 }
3695
3696 /* Remove some sections from those marked to be in overlays. Choose
3697 those that are called from many places, likely library functions. */
3698
3699 static unsigned int
3700 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3701 {
3702 bfd *ibfd;
3703 asection **lib_sections;
3704 unsigned int i, lib_count;
3705 struct _cl_param collect_lib_param;
3706 struct function_info dummy_caller;
3707 struct spu_link_hash_table *htab;
3708
3709 memset (&dummy_caller, 0, sizeof (dummy_caller));
3710 lib_count = 0;
3711 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3712 {
3713 extern const bfd_target bfd_elf32_spu_vec;
3714 asection *sec;
3715
3716 if (ibfd->xvec != &bfd_elf32_spu_vec)
3717 continue;
3718
3719 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3720 if (sec->linker_mark
3721 && sec->size < lib_size
3722 && (sec->flags & SEC_CODE) != 0)
3723 lib_count += 1;
3724 }
3725 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3726 if (lib_sections == NULL)
3727 return (unsigned int) -1;
3728 collect_lib_param.lib_size = lib_size;
3729 collect_lib_param.lib_sections = lib_sections;
3730 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3731 TRUE))
3732 return (unsigned int) -1;
3733 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3734
3735 /* Sort sections so that those with the most calls are first. */
3736 if (lib_count > 1)
3737 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3738
3739 htab = spu_hash_table (info);
3740 for (i = 0; i < lib_count; i++)
3741 {
3742 unsigned int tmp, stub_size;
3743 asection *sec;
3744 struct _spu_elf_section_data *sec_data;
3745 struct spu_elf_stack_info *sinfo;
3746
3747 sec = lib_sections[2 * i];
3748 /* If this section is OK, its size must be less than lib_size. */
3749 tmp = sec->size;
3750 /* If it has a rodata section, then add that too. */
3751 if (lib_sections[2 * i + 1])
3752 tmp += lib_sections[2 * i + 1]->size;
3753 /* Add any new overlay call stubs needed by the section. */
3754 stub_size = 0;
3755 if (tmp < lib_size
3756 && (sec_data = spu_elf_section_data (sec)) != NULL
3757 && (sinfo = sec_data->u.i.stack_info) != NULL)
3758 {
3759 int k;
3760 struct call_info *call;
3761
3762 for (k = 0; k < sinfo->num_fun; ++k)
3763 for (call = sinfo->fun[k].call_list; call; call = call->next)
3764 if (call->fun->sec->linker_mark)
3765 {
3766 struct call_info *p;
3767 for (p = dummy_caller.call_list; p; p = p->next)
3768 if (p->fun == call->fun)
3769 break;
3770 if (!p)
3771 stub_size += ovl_stub_size (htab->params);
3772 }
3773 }
3774 if (tmp + stub_size < lib_size)
3775 {
3776 struct call_info **pp, *p;
3777
3778 /* This section fits. Mark it as non-overlay. */
3779 lib_sections[2 * i]->linker_mark = 0;
3780 if (lib_sections[2 * i + 1])
3781 lib_sections[2 * i + 1]->linker_mark = 0;
3782 lib_size -= tmp + stub_size;
3783 /* Call stubs to the section we just added are no longer
3784 needed. */
3785 pp = &dummy_caller.call_list;
3786 while ((p = *pp) != NULL)
3787 if (!p->fun->sec->linker_mark)
3788 {
3789 lib_size += ovl_stub_size (htab->params);
3790 *pp = p->next;
3791 free (p);
3792 }
3793 else
3794 pp = &p->next;
3795 /* Add new call stubs to dummy_caller. */
3796 if ((sec_data = spu_elf_section_data (sec)) != NULL
3797 && (sinfo = sec_data->u.i.stack_info) != NULL)
3798 {
3799 int k;
3800 struct call_info *call;
3801
3802 for (k = 0; k < sinfo->num_fun; ++k)
3803 for (call = sinfo->fun[k].call_list;
3804 call;
3805 call = call->next)
3806 if (call->fun->sec->linker_mark)
3807 {
3808 struct call_info *callee;
3809 callee = bfd_malloc (sizeof (*callee));
3810 if (callee == NULL)
3811 return (unsigned int) -1;
3812 *callee = *call;
3813 if (!insert_callee (&dummy_caller, callee))
3814 free (callee);
3815 }
3816 }
3817 }
3818 }
3819 while (dummy_caller.call_list != NULL)
3820 {
3821 struct call_info *call = dummy_caller.call_list;
3822 dummy_caller.call_list = call->next;
3823 free (call);
3824 }
3825 for (i = 0; i < 2 * lib_count; i++)
3826 if (lib_sections[i])
3827 lib_sections[i]->gc_mark = 1;
3828 free (lib_sections);
3829 return lib_size;
3830 }
3831
3832 /* Build an array of overlay sections. The deepest node's section is
3833 added first, then its parent node's section, then everything called
3834 from the parent section. The idea being to group sections to
3835 minimise calls between different overlays. */
3836
3837 static bfd_boolean
3838 collect_overlays (struct function_info *fun,
3839 struct bfd_link_info *info,
3840 void *param)
3841 {
3842 struct call_info *call;
3843 bfd_boolean added_fun;
3844 asection ***ovly_sections = param;
3845
3846 if (fun->visit7)
3847 return TRUE;
3848
3849 fun->visit7 = TRUE;
3850 for (call = fun->call_list; call != NULL; call = call->next)
3851 if (!call->is_pasted && !call->broken_cycle)
3852 {
3853 if (!collect_overlays (call->fun, info, ovly_sections))
3854 return FALSE;
3855 break;
3856 }
3857
3858 added_fun = FALSE;
3859 if (fun->sec->linker_mark && fun->sec->gc_mark)
3860 {
3861 fun->sec->gc_mark = 0;
3862 *(*ovly_sections)++ = fun->sec;
3863 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3864 {
3865 fun->rodata->gc_mark = 0;
3866 *(*ovly_sections)++ = fun->rodata;
3867 }
3868 else
3869 *(*ovly_sections)++ = NULL;
3870 added_fun = TRUE;
3871
3872 /* Pasted sections must stay with the first section. We don't
3873 put pasted sections in the array, just the first section.
3874 Mark subsequent sections as already considered. */
3875 if (fun->sec->segment_mark)
3876 {
3877 struct function_info *call_fun = fun;
3878 do
3879 {
3880 for (call = call_fun->call_list; call != NULL; call = call->next)
3881 if (call->is_pasted)
3882 {
3883 call_fun = call->fun;
3884 call_fun->sec->gc_mark = 0;
3885 if (call_fun->rodata)
3886 call_fun->rodata->gc_mark = 0;
3887 break;
3888 }
3889 if (call == NULL)
3890 abort ();
3891 }
3892 while (call_fun->sec->segment_mark);
3893 }
3894 }
3895
3896 for (call = fun->call_list; call != NULL; call = call->next)
3897 if (!call->broken_cycle
3898 && !collect_overlays (call->fun, info, ovly_sections))
3899 return FALSE;
3900
3901 if (added_fun)
3902 {
3903 struct _spu_elf_section_data *sec_data;
3904 struct spu_elf_stack_info *sinfo;
3905
3906 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3907 && (sinfo = sec_data->u.i.stack_info) != NULL)
3908 {
3909 int i;
3910 for (i = 0; i < sinfo->num_fun; ++i)
3911 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3912 return FALSE;
3913 }
3914 }
3915
3916 return TRUE;
3917 }
3918
3919 struct _sum_stack_param {
3920 size_t cum_stack;
3921 size_t overall_stack;
3922 bfd_boolean emit_stack_syms;
3923 };
3924
3925 /* Descend the call graph for FUN, accumulating total stack required. */
3926
3927 static bfd_boolean
3928 sum_stack (struct function_info *fun,
3929 struct bfd_link_info *info,
3930 void *param)
3931 {
3932 struct call_info *call;
3933 struct function_info *max;
3934 size_t stack, cum_stack;
3935 const char *f1;
3936 bfd_boolean has_call;
3937 struct _sum_stack_param *sum_stack_param = param;
3938 struct spu_link_hash_table *htab;
3939
3940 cum_stack = fun->stack;
3941 sum_stack_param->cum_stack = cum_stack;
3942 if (fun->visit3)
3943 return TRUE;
3944
3945 has_call = FALSE;
3946 max = NULL;
3947 for (call = fun->call_list; call; call = call->next)
3948 {
3949 if (call->broken_cycle)
3950 continue;
3951 if (!call->is_pasted)
3952 has_call = TRUE;
3953 if (!sum_stack (call->fun, info, sum_stack_param))
3954 return FALSE;
3955 stack = sum_stack_param->cum_stack;
3956 /* Include caller stack for normal calls, don't do so for
3957 tail calls. fun->stack here is local stack usage for
3958 this function. */
3959 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3960 stack += fun->stack;
3961 if (cum_stack < stack)
3962 {
3963 cum_stack = stack;
3964 max = call->fun;
3965 }
3966 }
3967
3968 sum_stack_param->cum_stack = cum_stack;
3969 stack = fun->stack;
3970 /* Now fun->stack holds cumulative stack. */
3971 fun->stack = cum_stack;
3972 fun->visit3 = TRUE;
3973
3974 if (!fun->non_root
3975 && sum_stack_param->overall_stack < cum_stack)
3976 sum_stack_param->overall_stack = cum_stack;
3977
3978 htab = spu_hash_table (info);
3979 if (htab->params->auto_overlay)
3980 return TRUE;
3981
3982 f1 = func_name (fun);
3983 if (htab->params->stack_analysis)
3984 {
3985 if (!fun->non_root)
3986 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3987 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3988 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3989
3990 if (has_call)
3991 {
3992 info->callbacks->minfo (_(" calls:\n"));
3993 for (call = fun->call_list; call; call = call->next)
3994 if (!call->is_pasted && !call->broken_cycle)
3995 {
3996 const char *f2 = func_name (call->fun);
3997 const char *ann1 = call->fun == max ? "*" : " ";
3998 const char *ann2 = call->is_tail ? "t" : " ";
3999
4000 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4001 }
4002 }
4003 }
4004
4005 if (sum_stack_param->emit_stack_syms)
4006 {
4007 char *name = bfd_malloc (18 + strlen (f1));
4008 struct elf_link_hash_entry *h;
4009
4010 if (name == NULL)
4011 return FALSE;
4012
4013 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4014 sprintf (name, "__stack_%s", f1);
4015 else
4016 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4017
4018 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4019 free (name);
4020 if (h != NULL
4021 && (h->root.type == bfd_link_hash_new
4022 || h->root.type == bfd_link_hash_undefined
4023 || h->root.type == bfd_link_hash_undefweak))
4024 {
4025 h->root.type = bfd_link_hash_defined;
4026 h->root.u.def.section = bfd_abs_section_ptr;
4027 h->root.u.def.value = cum_stack;
4028 h->size = 0;
4029 h->type = 0;
4030 h->ref_regular = 1;
4031 h->def_regular = 1;
4032 h->ref_regular_nonweak = 1;
4033 h->forced_local = 1;
4034 h->non_elf = 0;
4035 }
4036 }
4037
4038 return TRUE;
4039 }
4040
4041 /* SEC is part of a pasted function. Return the call_info for the
4042 next section of this function. */
4043
4044 static struct call_info *
4045 find_pasted_call (asection *sec)
4046 {
4047 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4048 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4049 struct call_info *call;
4050 int k;
4051
4052 for (k = 0; k < sinfo->num_fun; ++k)
4053 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4054 if (call->is_pasted)
4055 return call;
4056 abort ();
4057 return 0;
4058 }
4059
4060 /* qsort predicate to sort bfds by file name. */
4061
4062 static int
4063 sort_bfds (const void *a, const void *b)
4064 {
4065 bfd *const *abfd1 = a;
4066 bfd *const *abfd2 = b;
4067
4068 return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4069 }
4070
4071 static unsigned int
4072 print_one_overlay_section (FILE *script,
4073 unsigned int base,
4074 unsigned int count,
4075 unsigned int ovlynum,
4076 unsigned int *ovly_map,
4077 asection **ovly_sections,
4078 struct bfd_link_info *info)
4079 {
4080 unsigned int j;
4081
4082 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4083 {
4084 asection *sec = ovly_sections[2 * j];
4085
4086 if (fprintf (script, " %s%c%s (%s)\n",
4087 (sec->owner->my_archive != NULL
4088 ? sec->owner->my_archive->filename : ""),
4089 info->path_separator,
4090 sec->owner->filename,
4091 sec->name) <= 0)
4092 return -1;
4093 if (sec->segment_mark)
4094 {
4095 struct call_info *call = find_pasted_call (sec);
4096 while (call != NULL)
4097 {
4098 struct function_info *call_fun = call->fun;
4099 sec = call_fun->sec;
4100 if (fprintf (script, " %s%c%s (%s)\n",
4101 (sec->owner->my_archive != NULL
4102 ? sec->owner->my_archive->filename : ""),
4103 info->path_separator,
4104 sec->owner->filename,
4105 sec->name) <= 0)
4106 return -1;
4107 for (call = call_fun->call_list; call; call = call->next)
4108 if (call->is_pasted)
4109 break;
4110 }
4111 }
4112 }
4113
4114 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4115 {
4116 asection *sec = ovly_sections[2 * j + 1];
4117 if (sec != NULL
4118 && fprintf (script, " %s%c%s (%s)\n",
4119 (sec->owner->my_archive != NULL
4120 ? sec->owner->my_archive->filename : ""),
4121 info->path_separator,
4122 sec->owner->filename,
4123 sec->name) <= 0)
4124 return -1;
4125
4126 sec = ovly_sections[2 * j];
4127 if (sec->segment_mark)
4128 {
4129 struct call_info *call = find_pasted_call (sec);
4130 while (call != NULL)
4131 {
4132 struct function_info *call_fun = call->fun;
4133 sec = call_fun->rodata;
4134 if (sec != NULL
4135 && fprintf (script, " %s%c%s (%s)\n",
4136 (sec->owner->my_archive != NULL
4137 ? sec->owner->my_archive->filename : ""),
4138 info->path_separator,
4139 sec->owner->filename,
4140 sec->name) <= 0)
4141 return -1;
4142 for (call = call_fun->call_list; call; call = call->next)
4143 if (call->is_pasted)
4144 break;
4145 }
4146 }
4147 }
4148
4149 return j;
4150 }
4151
4152 /* Handle --auto-overlay. */
4153
4154 static void
4155 spu_elf_auto_overlay (struct bfd_link_info *info)
4156 {
4157 bfd *ibfd;
4158 bfd **bfd_arr;
4159 struct elf_segment_map *m;
4160 unsigned int fixed_size, lo, hi;
4161 unsigned int reserved;
4162 struct spu_link_hash_table *htab;
4163 unsigned int base, i, count, bfd_count;
4164 unsigned int region, ovlynum;
4165 asection **ovly_sections, **ovly_p;
4166 unsigned int *ovly_map;
4167 FILE *script;
4168 unsigned int total_overlay_size, overlay_size;
4169 const char *ovly_mgr_entry;
4170 struct elf_link_hash_entry *h;
4171 struct _mos_param mos_param;
4172 struct _uos_param uos_param;
4173 struct function_info dummy_caller;
4174
4175 /* Find the extents of our loadable image. */
4176 lo = (unsigned int) -1;
4177 hi = 0;
4178 for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4179 if (m->p_type == PT_LOAD)
4180 for (i = 0; i < m->count; i++)
4181 if (m->sections[i]->size != 0)
4182 {
4183 if (m->sections[i]->vma < lo)
4184 lo = m->sections[i]->vma;
4185 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4186 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4187 }
4188 fixed_size = hi + 1 - lo;
4189
4190 if (!discover_functions (info))
4191 goto err_exit;
4192
4193 if (!build_call_tree (info))
4194 goto err_exit;
4195
4196 htab = spu_hash_table (info);
4197 reserved = htab->params->auto_overlay_reserved;
4198 if (reserved == 0)
4199 {
4200 struct _sum_stack_param sum_stack_param;
4201
4202 sum_stack_param.emit_stack_syms = 0;
4203 sum_stack_param.overall_stack = 0;
4204 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4205 goto err_exit;
4206 reserved = (sum_stack_param.overall_stack
4207 + htab->params->extra_stack_space);
4208 }
4209
4210 /* No need for overlays if everything already fits. */
4211 if (fixed_size + reserved <= htab->local_store
4212 && htab->params->ovly_flavour != ovly_soft_icache)
4213 {
4214 htab->params->auto_overlay = 0;
4215 return;
4216 }
4217
4218 uos_param.exclude_input_section = 0;
4219 uos_param.exclude_output_section
4220 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4221
4222 ovly_mgr_entry = "__ovly_load";
4223 if (htab->params->ovly_flavour == ovly_soft_icache)
4224 ovly_mgr_entry = "__icache_br_handler";
4225 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4226 FALSE, FALSE, FALSE);
4227 if (h != NULL
4228 && (h->root.type == bfd_link_hash_defined
4229 || h->root.type == bfd_link_hash_defweak)
4230 && h->def_regular)
4231 {
4232 /* We have a user supplied overlay manager. */
4233 uos_param.exclude_input_section = h->root.u.def.section;
4234 }
4235 else
4236 {
4237 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4238 builtin version to .text, and will adjust .text size. */
4239 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4240 }
4241
4242 /* Mark overlay sections, and find max overlay section size. */
4243 mos_param.max_overlay_size = 0;
4244 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4245 goto err_exit;
4246
4247 /* We can't put the overlay manager or interrupt routines in
4248 overlays. */
4249 uos_param.clearing = 0;
4250 if ((uos_param.exclude_input_section
4251 || uos_param.exclude_output_section)
4252 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4253 goto err_exit;
4254
4255 bfd_count = 0;
4256 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4257 ++bfd_count;
4258 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4259 if (bfd_arr == NULL)
4260 goto err_exit;
4261
4262 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4263 count = 0;
4264 bfd_count = 0;
4265 total_overlay_size = 0;
4266 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4267 {
4268 extern const bfd_target bfd_elf32_spu_vec;
4269 asection *sec;
4270 unsigned int old_count;
4271
4272 if (ibfd->xvec != &bfd_elf32_spu_vec)
4273 continue;
4274
4275 old_count = count;
4276 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4277 if (sec->linker_mark)
4278 {
4279 if ((sec->flags & SEC_CODE) != 0)
4280 count += 1;
4281 fixed_size -= sec->size;
4282 total_overlay_size += sec->size;
4283 }
4284 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4285 && sec->output_section->owner == info->output_bfd
4286 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4287 fixed_size -= sec->size;
4288 if (count != old_count)
4289 bfd_arr[bfd_count++] = ibfd;
4290 }
4291
4292 /* Since the overlay link script selects sections by file name and
4293 section name, ensure that file names are unique. */
4294 if (bfd_count > 1)
4295 {
4296 bfd_boolean ok = TRUE;
4297
4298 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4299 for (i = 1; i < bfd_count; ++i)
4300 if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4301 {
4302 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4303 {
4304 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4305 info->callbacks->einfo (_("%s duplicated in %s\n"),
4306 bfd_arr[i]->filename,
4307 bfd_arr[i]->my_archive->filename);
4308 else
4309 info->callbacks->einfo (_("%s duplicated\n"),
4310 bfd_arr[i]->filename);
4311 ok = FALSE;
4312 }
4313 }
4314 if (!ok)
4315 {
4316 info->callbacks->einfo (_("sorry, no support for duplicate "
4317 "object files in auto-overlay script\n"));
4318 bfd_set_error (bfd_error_bad_value);
4319 goto err_exit;
4320 }
4321 }
4322 free (bfd_arr);
4323
4324 fixed_size += reserved;
4325 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4326 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4327 {
4328 if (htab->params->ovly_flavour == ovly_soft_icache)
4329 {
4330 /* Stubs in the non-icache area are bigger. */
4331 fixed_size += htab->non_ovly_stub * 16;
4332 /* Space for icache manager tables.
4333 a) Tag array, one quadword per cache line.
4334 - word 0: ia address of present line, init to zero. */
4335 fixed_size += 16 << htab->num_lines_log2;
4336 /* b) Rewrite "to" list, one quadword per cache line. */
4337 fixed_size += 16 << htab->num_lines_log2;
4338 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4339 to a power-of-two number of full quadwords) per cache line. */
4340 fixed_size += 16 << (htab->fromelem_size_log2
4341 + htab->num_lines_log2);
4342 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4343 fixed_size += 16;
4344 }
4345 else
4346 {
4347 /* Guess number of overlays. Assuming overlay buffer is on
4348 average only half full should be conservative. */
4349 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4350 / (htab->local_store - fixed_size));
4351 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4352 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4353 }
4354 }
4355
4356 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4357 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4358 "size of 0x%v exceeds local store\n"),
4359 (bfd_vma) fixed_size,
4360 (bfd_vma) mos_param.max_overlay_size);
4361
4362 /* Now see if we should put some functions in the non-overlay area. */
4363 else if (fixed_size < htab->params->auto_overlay_fixed)
4364 {
4365 unsigned int max_fixed, lib_size;
4366
4367 max_fixed = htab->local_store - mos_param.max_overlay_size;
4368 if (max_fixed > htab->params->auto_overlay_fixed)
4369 max_fixed = htab->params->auto_overlay_fixed;
4370 lib_size = max_fixed - fixed_size;
4371 lib_size = auto_ovl_lib_functions (info, lib_size);
4372 if (lib_size == (unsigned int) -1)
4373 goto err_exit;
4374 fixed_size = max_fixed - lib_size;
4375 }
4376
4377 /* Build an array of sections, suitably sorted to place into
4378 overlays. */
4379 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4380 if (ovly_sections == NULL)
4381 goto err_exit;
4382 ovly_p = ovly_sections;
4383 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4384 goto err_exit;
4385 count = (size_t) (ovly_p - ovly_sections) / 2;
4386 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4387 if (ovly_map == NULL)
4388 goto err_exit;
4389
4390 memset (&dummy_caller, 0, sizeof (dummy_caller));
4391 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4392 if (htab->params->line_size != 0)
4393 overlay_size = htab->params->line_size;
4394 base = 0;
4395 ovlynum = 0;
4396 while (base < count)
4397 {
4398 unsigned int size = 0, rosize = 0, roalign = 0;
4399
4400 for (i = base; i < count; i++)
4401 {
4402 asection *sec, *rosec;
4403 unsigned int tmp, rotmp;
4404 unsigned int num_stubs;
4405 struct call_info *call, *pasty;
4406 struct _spu_elf_section_data *sec_data;
4407 struct spu_elf_stack_info *sinfo;
4408 unsigned int k;
4409
4410 /* See whether we can add this section to the current
4411 overlay without overflowing our overlay buffer. */
4412 sec = ovly_sections[2 * i];
4413 tmp = align_power (size, sec->alignment_power) + sec->size;
4414 rotmp = rosize;
4415 rosec = ovly_sections[2 * i + 1];
4416 if (rosec != NULL)
4417 {
4418 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4419 if (roalign < rosec->alignment_power)
4420 roalign = rosec->alignment_power;
4421 }
4422 if (align_power (tmp, roalign) + rotmp > overlay_size)
4423 break;
4424 if (sec->segment_mark)
4425 {
4426 /* Pasted sections must stay together, so add their
4427 sizes too. */
4428 pasty = find_pasted_call (sec);
4429 while (pasty != NULL)
4430 {
4431 struct function_info *call_fun = pasty->fun;
4432 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4433 + call_fun->sec->size);
4434 if (call_fun->rodata)
4435 {
4436 rotmp = (align_power (rotmp,
4437 call_fun->rodata->alignment_power)
4438 + call_fun->rodata->size);
4439 if (roalign < rosec->alignment_power)
4440 roalign = rosec->alignment_power;
4441 }
4442 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4443 if (pasty->is_pasted)
4444 break;
4445 }
4446 }
4447 if (align_power (tmp, roalign) + rotmp > overlay_size)
4448 break;
4449
4450 /* If we add this section, we might need new overlay call
4451 stubs. Add any overlay section calls to dummy_call. */
4452 pasty = NULL;
4453 sec_data = spu_elf_section_data (sec);
4454 sinfo = sec_data->u.i.stack_info;
4455 for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4456 for (call = sinfo->fun[k].call_list; call; call = call->next)
4457 if (call->is_pasted)
4458 {
4459 BFD_ASSERT (pasty == NULL);
4460 pasty = call;
4461 }
4462 else if (call->fun->sec->linker_mark)
4463 {
4464 if (!copy_callee (&dummy_caller, call))
4465 goto err_exit;
4466 }
4467 while (pasty != NULL)
4468 {
4469 struct function_info *call_fun = pasty->fun;
4470 pasty = NULL;
4471 for (call = call_fun->call_list; call; call = call->next)
4472 if (call->is_pasted)
4473 {
4474 BFD_ASSERT (pasty == NULL);
4475 pasty = call;
4476 }
4477 else if (!copy_callee (&dummy_caller, call))
4478 goto err_exit;
4479 }
4480
4481 /* Calculate call stub size. */
4482 num_stubs = 0;
4483 for (call = dummy_caller.call_list; call; call = call->next)
4484 {
4485 unsigned int stub_delta = 1;
4486
4487 if (htab->params->ovly_flavour == ovly_soft_icache)
4488 stub_delta = call->count;
4489 num_stubs += stub_delta;
4490
4491 /* If the call is within this overlay, we won't need a
4492 stub. */
4493 for (k = base; k < i + 1; k++)
4494 if (call->fun->sec == ovly_sections[2 * k])
4495 {
4496 num_stubs -= stub_delta;
4497 break;
4498 }
4499 }
4500 if (htab->params->ovly_flavour == ovly_soft_icache
4501 && num_stubs > htab->params->max_branch)
4502 break;
4503 if (align_power (tmp, roalign) + rotmp
4504 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4505 break;
4506 size = tmp;
4507 rosize = rotmp;
4508 }
4509
4510 if (i == base)
4511 {
4512 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4513 ovly_sections[2 * i]->owner,
4514 ovly_sections[2 * i],
4515 ovly_sections[2 * i + 1] ? " + rodata" : "");
4516 bfd_set_error (bfd_error_bad_value);
4517 goto err_exit;
4518 }
4519
4520 while (dummy_caller.call_list != NULL)
4521 {
4522 struct call_info *call = dummy_caller.call_list;
4523 dummy_caller.call_list = call->next;
4524 free (call);
4525 }
4526
4527 ++ovlynum;
4528 while (base < i)
4529 ovly_map[base++] = ovlynum;
4530 }
4531
4532 script = htab->params->spu_elf_open_overlay_script ();
4533
4534 if (htab->params->ovly_flavour == ovly_soft_icache)
4535 {
4536 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4537 goto file_err;
4538
4539 if (fprintf (script,
4540 " . = ALIGN (%u);\n"
4541 " .ovl.init : { *(.ovl.init) }\n"
4542 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4543 htab->params->line_size) <= 0)
4544 goto file_err;
4545
4546 base = 0;
4547 ovlynum = 1;
4548 while (base < count)
4549 {
4550 unsigned int indx = ovlynum - 1;
4551 unsigned int vma, lma;
4552
4553 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4554 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4555
4556 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4557 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4558 ovlynum, vma, lma) <= 0)
4559 goto file_err;
4560
4561 base = print_one_overlay_section (script, base, count, ovlynum,
4562 ovly_map, ovly_sections, info);
4563 if (base == (unsigned) -1)
4564 goto file_err;
4565
4566 if (fprintf (script, " }\n") <= 0)
4567 goto file_err;
4568
4569 ovlynum++;
4570 }
4571
4572 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4573 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4574 goto file_err;
4575
4576 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4577 goto file_err;
4578 }
4579 else
4580 {
4581 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4582 goto file_err;
4583
4584 if (fprintf (script,
4585 " . = ALIGN (16);\n"
4586 " .ovl.init : { *(.ovl.init) }\n"
4587 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4588 goto file_err;
4589
4590 for (region = 1; region <= htab->params->num_lines; region++)
4591 {
4592 ovlynum = region;
4593 base = 0;
4594 while (base < count && ovly_map[base] < ovlynum)
4595 base++;
4596
4597 if (base == count)
4598 break;
4599
4600 if (region == 1)
4601 {
4602 /* We need to set lma since we are overlaying .ovl.init. */
4603 if (fprintf (script,
4604 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4605 goto file_err;
4606 }
4607 else
4608 {
4609 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4610 goto file_err;
4611 }
4612
4613 while (base < count)
4614 {
4615 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4616 goto file_err;
4617
4618 base = print_one_overlay_section (script, base, count, ovlynum,
4619 ovly_map, ovly_sections, info);
4620 if (base == (unsigned) -1)
4621 goto file_err;
4622
4623 if (fprintf (script, " }\n") <= 0)
4624 goto file_err;
4625
4626 ovlynum += htab->params->num_lines;
4627 while (base < count && ovly_map[base] < ovlynum)
4628 base++;
4629 }
4630
4631 if (fprintf (script, " }\n") <= 0)
4632 goto file_err;
4633 }
4634
4635 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4636 goto file_err;
4637 }
4638
4639 free (ovly_map);
4640 free (ovly_sections);
4641
4642 if (fclose (script) != 0)
4643 goto file_err;
4644
4645 if (htab->params->auto_overlay & AUTO_RELINK)
4646 (*htab->params->spu_elf_relink) ();
4647
4648 xexit (0);
4649
4650 file_err:
4651 bfd_set_error (bfd_error_system_call);
4652 err_exit:
4653 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4654 xexit (1);
4655 }
4656
4657 /* Provide an estimate of total stack required. */
4658
4659 static bfd_boolean
4660 spu_elf_stack_analysis (struct bfd_link_info *info)
4661 {
4662 struct spu_link_hash_table *htab;
4663 struct _sum_stack_param sum_stack_param;
4664
4665 if (!discover_functions (info))
4666 return FALSE;
4667
4668 if (!build_call_tree (info))
4669 return FALSE;
4670
4671 htab = spu_hash_table (info);
4672 if (htab->params->stack_analysis)
4673 {
4674 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4675 info->callbacks->minfo (_("\nStack size for functions. "
4676 "Annotations: '*' max stack, 't' tail call\n"));
4677 }
4678
4679 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4680 sum_stack_param.overall_stack = 0;
4681 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4682 return FALSE;
4683
4684 if (htab->params->stack_analysis)
4685 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4686 (bfd_vma) sum_stack_param.overall_stack);
4687 return TRUE;
4688 }
4689
4690 /* Perform a final link. */
4691
4692 static bfd_boolean
4693 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4694 {
4695 struct spu_link_hash_table *htab = spu_hash_table (info);
4696
4697 if (htab->params->auto_overlay)
4698 spu_elf_auto_overlay (info);
4699
4700 if ((htab->params->stack_analysis
4701 || (htab->params->ovly_flavour == ovly_soft_icache
4702 && htab->params->lrlive_analysis))
4703 && !spu_elf_stack_analysis (info))
4704 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4705
4706 if (!spu_elf_build_stubs (info))
4707 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4708
4709 return bfd_elf_final_link (output_bfd, info);
4710 }
4711
4712 /* Called when not normally emitting relocs, ie. !info->relocatable
4713 and !info->emitrelocations. Returns a count of special relocs
4714 that need to be emitted. */
4715
4716 static unsigned int
4717 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4718 {
4719 Elf_Internal_Rela *relocs;
4720 unsigned int count = 0;
4721
4722 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4723 info->keep_memory);
4724 if (relocs != NULL)
4725 {
4726 Elf_Internal_Rela *rel;
4727 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4728
4729 for (rel = relocs; rel < relend; rel++)
4730 {
4731 int r_type = ELF32_R_TYPE (rel->r_info);
4732 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4733 ++count;
4734 }
4735
4736 if (elf_section_data (sec)->relocs != relocs)
4737 free (relocs);
4738 }
4739
4740 return count;
4741 }
4742
4743 /* Functions for adding fixup records to .fixup */
4744
4745 #define FIXUP_RECORD_SIZE 4
4746
4747 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4748 bfd_put_32 (output_bfd, addr, \
4749 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4750 #define FIXUP_GET(output_bfd,htab,index) \
4751 bfd_get_32 (output_bfd, \
4752 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4753
4754 /* Store OFFSET in .fixup. This assumes it will be called with an
4755 increasing OFFSET. When this OFFSET fits with the last base offset,
4756 it just sets a bit, otherwise it adds a new fixup record. */
4757 static void
4758 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4759 bfd_vma offset)
4760 {
4761 struct spu_link_hash_table *htab = spu_hash_table (info);
4762 asection *sfixup = htab->sfixup;
4763 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4764 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4765 if (sfixup->reloc_count == 0)
4766 {
4767 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4768 sfixup->reloc_count++;
4769 }
4770 else
4771 {
4772 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4773 if (qaddr != (base & ~(bfd_vma) 15))
4774 {
4775 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4776 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4777 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4778 sfixup->reloc_count++;
4779 }
4780 else
4781 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4782 }
4783 }
4784
4785 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4786
4787 static int
4788 spu_elf_relocate_section (bfd *output_bfd,
4789 struct bfd_link_info *info,
4790 bfd *input_bfd,
4791 asection *input_section,
4792 bfd_byte *contents,
4793 Elf_Internal_Rela *relocs,
4794 Elf_Internal_Sym *local_syms,
4795 asection **local_sections)
4796 {
4797 Elf_Internal_Shdr *symtab_hdr;
4798 struct elf_link_hash_entry **sym_hashes;
4799 Elf_Internal_Rela *rel, *relend;
4800 struct spu_link_hash_table *htab;
4801 asection *ea;
4802 int ret = TRUE;
4803 bfd_boolean emit_these_relocs = FALSE;
4804 bfd_boolean is_ea_sym;
4805 bfd_boolean stubs;
4806 unsigned int iovl = 0;
4807
4808 htab = spu_hash_table (info);
4809 stubs = (htab->stub_sec != NULL
4810 && maybe_needs_stubs (input_section));
4811 iovl = overlay_index (input_section);
4812 ea = bfd_get_section_by_name (output_bfd, "._ea");
4813 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4814 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4815
4816 rel = relocs;
4817 relend = relocs + input_section->reloc_count;
4818 for (; rel < relend; rel++)
4819 {
4820 int r_type;
4821 reloc_howto_type *howto;
4822 unsigned int r_symndx;
4823 Elf_Internal_Sym *sym;
4824 asection *sec;
4825 struct elf_link_hash_entry *h;
4826 const char *sym_name;
4827 bfd_vma relocation;
4828 bfd_vma addend;
4829 bfd_reloc_status_type r;
4830 bfd_boolean unresolved_reloc;
4831 enum _stub_type stub_type;
4832
4833 r_symndx = ELF32_R_SYM (rel->r_info);
4834 r_type = ELF32_R_TYPE (rel->r_info);
4835 howto = elf_howto_table + r_type;
4836 unresolved_reloc = FALSE;
4837 h = NULL;
4838 sym = NULL;
4839 sec = NULL;
4840 if (r_symndx < symtab_hdr->sh_info)
4841 {
4842 sym = local_syms + r_symndx;
4843 sec = local_sections[r_symndx];
4844 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4845 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4846 }
4847 else
4848 {
4849 if (sym_hashes == NULL)
4850 return FALSE;
4851
4852 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4853
4854 while (h->root.type == bfd_link_hash_indirect
4855 || h->root.type == bfd_link_hash_warning)
4856 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4857
4858 relocation = 0;
4859 if (h->root.type == bfd_link_hash_defined
4860 || h->root.type == bfd_link_hash_defweak)
4861 {
4862 sec = h->root.u.def.section;
4863 if (sec == NULL
4864 || sec->output_section == NULL)
4865 /* Set a flag that will be cleared later if we find a
4866 relocation value for this symbol. output_section
4867 is typically NULL for symbols satisfied by a shared
4868 library. */
4869 unresolved_reloc = TRUE;
4870 else
4871 relocation = (h->root.u.def.value
4872 + sec->output_section->vma
4873 + sec->output_offset);
4874 }
4875 else if (h->root.type == bfd_link_hash_undefweak)
4876 ;
4877 else if (info->unresolved_syms_in_objects == RM_IGNORE
4878 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4879 ;
4880 else if (!info->relocatable
4881 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4882 {
4883 bfd_boolean err;
4884 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4885 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4886 if (!info->callbacks->undefined_symbol (info,
4887 h->root.root.string,
4888 input_bfd,
4889 input_section,
4890 rel->r_offset, err))
4891 return FALSE;
4892 }
4893 sym_name = h->root.root.string;
4894 }
4895
4896 if (sec != NULL && discarded_section (sec))
4897 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4898 rel, 1, relend, howto, 0, contents);
4899
4900 if (info->relocatable)
4901 continue;
4902
4903 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4904 if (r_type == R_SPU_ADD_PIC
4905 && h != NULL
4906 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4907 {
4908 bfd_byte *loc = contents + rel->r_offset;
4909 loc[0] = 0x1c;
4910 loc[1] = 0x00;
4911 loc[2] &= 0x3f;
4912 }
4913
4914 is_ea_sym = (ea != NULL
4915 && sec != NULL
4916 && sec->output_section == ea);
4917
4918 /* If this symbol is in an overlay area, we may need to relocate
4919 to the overlay stub. */
4920 addend = rel->r_addend;
4921 if (stubs
4922 && !is_ea_sym
4923 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4924 contents, info)) != no_stub)
4925 {
4926 unsigned int ovl = 0;
4927 struct got_entry *g, **head;
4928
4929 if (stub_type != nonovl_stub)
4930 ovl = iovl;
4931
4932 if (h != NULL)
4933 head = &h->got.glist;
4934 else
4935 head = elf_local_got_ents (input_bfd) + r_symndx;
4936
4937 for (g = *head; g != NULL; g = g->next)
4938 if (htab->params->ovly_flavour == ovly_soft_icache
4939 ? (g->ovl == ovl
4940 && g->br_addr == (rel->r_offset
4941 + input_section->output_offset
4942 + input_section->output_section->vma))
4943 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4944 break;
4945 if (g == NULL)
4946 abort ();
4947
4948 relocation = g->stub_addr;
4949 addend = 0;
4950 }
4951 else
4952 {
4953 /* For soft icache, encode the overlay index into addresses. */
4954 if (htab->params->ovly_flavour == ovly_soft_icache
4955 && (r_type == R_SPU_ADDR16_HI
4956 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4957 && !is_ea_sym)
4958 {
4959 unsigned int ovl = overlay_index (sec);
4960 if (ovl != 0)
4961 {
4962 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4963 relocation += set_id << 18;
4964 }
4965 }
4966 }
4967
4968 if (htab->params->emit_fixups && !info->relocatable
4969 && (input_section->flags & SEC_ALLOC) != 0
4970 && r_type == R_SPU_ADDR32)
4971 {
4972 bfd_vma offset;
4973 offset = rel->r_offset + input_section->output_section->vma
4974 + input_section->output_offset;
4975 spu_elf_emit_fixup (output_bfd, info, offset);
4976 }
4977
4978 if (unresolved_reloc)
4979 ;
4980 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4981 {
4982 if (is_ea_sym)
4983 {
4984 /* ._ea is a special section that isn't allocated in SPU
4985 memory, but rather occupies space in PPU memory as
4986 part of an embedded ELF image. If this reloc is
4987 against a symbol defined in ._ea, then transform the
4988 reloc into an equivalent one without a symbol
4989 relative to the start of the ELF image. */
4990 rel->r_addend += (relocation
4991 - ea->vma
4992 + elf_section_data (ea)->this_hdr.sh_offset);
4993 rel->r_info = ELF32_R_INFO (0, r_type);
4994 }
4995 emit_these_relocs = TRUE;
4996 continue;
4997 }
4998 else if (is_ea_sym)
4999 unresolved_reloc = TRUE;
5000
5001 if (unresolved_reloc
5002 && _bfd_elf_section_offset (output_bfd, info, input_section,
5003 rel->r_offset) != (bfd_vma) -1)
5004 {
5005 (*_bfd_error_handler)
5006 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5007 input_bfd,
5008 bfd_get_section_name (input_bfd, input_section),
5009 (long) rel->r_offset,
5010 howto->name,
5011 sym_name);
5012 ret = FALSE;
5013 }
5014
5015 r = _bfd_final_link_relocate (howto,
5016 input_bfd,
5017 input_section,
5018 contents,
5019 rel->r_offset, relocation, addend);
5020
5021 if (r != bfd_reloc_ok)
5022 {
5023 const char *msg = (const char *) 0;
5024
5025 switch (r)
5026 {
5027 case bfd_reloc_overflow:
5028 if (!((*info->callbacks->reloc_overflow)
5029 (info, (h ? &h->root : NULL), sym_name, howto->name,
5030 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5031 return FALSE;
5032 break;
5033
5034 case bfd_reloc_undefined:
5035 if (!((*info->callbacks->undefined_symbol)
5036 (info, sym_name, input_bfd, input_section,
5037 rel->r_offset, TRUE)))
5038 return FALSE;
5039 break;
5040
5041 case bfd_reloc_outofrange:
5042 msg = _("internal error: out of range error");
5043 goto common_error;
5044
5045 case bfd_reloc_notsupported:
5046 msg = _("internal error: unsupported relocation error");
5047 goto common_error;
5048
5049 case bfd_reloc_dangerous:
5050 msg = _("internal error: dangerous error");
5051 goto common_error;
5052
5053 default:
5054 msg = _("internal error: unknown error");
5055 /* fall through */
5056
5057 common_error:
5058 ret = FALSE;
5059 if (!((*info->callbacks->warning)
5060 (info, msg, sym_name, input_bfd, input_section,
5061 rel->r_offset)))
5062 return FALSE;
5063 break;
5064 }
5065 }
5066 }
5067
5068 if (ret
5069 && emit_these_relocs
5070 && !info->emitrelocations)
5071 {
5072 Elf_Internal_Rela *wrel;
5073 Elf_Internal_Shdr *rel_hdr;
5074
5075 wrel = rel = relocs;
5076 relend = relocs + input_section->reloc_count;
5077 for (; rel < relend; rel++)
5078 {
5079 int r_type;
5080
5081 r_type = ELF32_R_TYPE (rel->r_info);
5082 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5083 *wrel++ = *rel;
5084 }
5085 input_section->reloc_count = wrel - relocs;
5086 /* Backflips for _bfd_elf_link_output_relocs. */
5087 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5088 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5089 ret = 2;
5090 }
5091
5092 return ret;
5093 }
5094
5095 static bfd_boolean
5096 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5097 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5098 {
5099 return TRUE;
5100 }
5101
5102 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5103
5104 static int
5105 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5106 const char *sym_name ATTRIBUTE_UNUSED,
5107 Elf_Internal_Sym *sym,
5108 asection *sym_sec ATTRIBUTE_UNUSED,
5109 struct elf_link_hash_entry *h)
5110 {
5111 struct spu_link_hash_table *htab = spu_hash_table (info);
5112
5113 if (!info->relocatable
5114 && htab->stub_sec != NULL
5115 && h != NULL
5116 && (h->root.type == bfd_link_hash_defined
5117 || h->root.type == bfd_link_hash_defweak)
5118 && h->def_regular
5119 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5120 {
5121 struct got_entry *g;
5122
5123 for (g = h->got.glist; g != NULL; g = g->next)
5124 if (htab->params->ovly_flavour == ovly_soft_icache
5125 ? g->br_addr == g->stub_addr
5126 : g->addend == 0 && g->ovl == 0)
5127 {
5128 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5129 (htab->stub_sec[0]->output_section->owner,
5130 htab->stub_sec[0]->output_section));
5131 sym->st_value = g->stub_addr;
5132 break;
5133 }
5134 }
5135
5136 return 1;
5137 }
5138
5139 static int spu_plugin = 0;
5140
5141 void
5142 spu_elf_plugin (int val)
5143 {
5144 spu_plugin = val;
5145 }
5146
5147 /* Set ELF header e_type for plugins. */
5148
5149 static void
5150 spu_elf_post_process_headers (bfd *abfd,
5151 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5152 {
5153 if (spu_plugin)
5154 {
5155 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5156
5157 i_ehdrp->e_type = ET_DYN;
5158 }
5159 }
5160
5161 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5162 segments for overlays. */
5163
5164 static int
5165 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5166 {
5167 int extra = 0;
5168 asection *sec;
5169
5170 if (info != NULL)
5171 {
5172 struct spu_link_hash_table *htab = spu_hash_table (info);
5173 extra = htab->num_overlays;
5174 }
5175
5176 if (extra)
5177 ++extra;
5178
5179 sec = bfd_get_section_by_name (abfd, ".toe");
5180 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5181 ++extra;
5182
5183 return extra;
5184 }
5185
5186 /* Remove .toe section from other PT_LOAD segments and put it in
5187 a segment of its own. Put overlays in separate segments too. */
5188
5189 static bfd_boolean
5190 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5191 {
5192 asection *toe, *s;
5193 struct elf_segment_map *m, *m_overlay;
5194 struct elf_segment_map **p, **p_overlay;
5195 unsigned int i;
5196
5197 if (info == NULL)
5198 return TRUE;
5199
5200 toe = bfd_get_section_by_name (abfd, ".toe");
5201 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5202 if (m->p_type == PT_LOAD && m->count > 1)
5203 for (i = 0; i < m->count; i++)
5204 if ((s = m->sections[i]) == toe
5205 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5206 {
5207 struct elf_segment_map *m2;
5208 bfd_vma amt;
5209
5210 if (i + 1 < m->count)
5211 {
5212 amt = sizeof (struct elf_segment_map);
5213 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5214 m2 = bfd_zalloc (abfd, amt);
5215 if (m2 == NULL)
5216 return FALSE;
5217 m2->count = m->count - (i + 1);
5218 memcpy (m2->sections, m->sections + i + 1,
5219 m2->count * sizeof (m->sections[0]));
5220 m2->p_type = PT_LOAD;
5221 m2->next = m->next;
5222 m->next = m2;
5223 }
5224 m->count = 1;
5225 if (i != 0)
5226 {
5227 m->count = i;
5228 amt = sizeof (struct elf_segment_map);
5229 m2 = bfd_zalloc (abfd, amt);
5230 if (m2 == NULL)
5231 return FALSE;
5232 m2->p_type = PT_LOAD;
5233 m2->count = 1;
5234 m2->sections[0] = s;
5235 m2->next = m->next;
5236 m->next = m2;
5237 }
5238 break;
5239 }
5240
5241
5242 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5243 PT_LOAD segments. This can cause the .ovl.init section to be
5244 overwritten with the contents of some overlay segment. To work
5245 around this issue, we ensure that all PF_OVERLAY segments are
5246 sorted first amongst the program headers; this ensures that even
5247 with a broken loader, the .ovl.init section (which is not marked
5248 as PF_OVERLAY) will be placed into SPU local store on startup. */
5249
5250 /* Move all overlay segments onto a separate list. */
5251 p = &elf_seg_map (abfd);
5252 p_overlay = &m_overlay;
5253 while (*p != NULL)
5254 {
5255 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5256 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5257 {
5258 m = *p;
5259 *p = m->next;
5260 *p_overlay = m;
5261 p_overlay = &m->next;
5262 continue;
5263 }
5264
5265 p = &((*p)->next);
5266 }
5267
5268 /* Re-insert overlay segments at the head of the segment map. */
5269 *p_overlay = elf_seg_map (abfd);
5270 elf_seg_map (abfd) = m_overlay;
5271
5272 return TRUE;
5273 }
5274
5275 /* Tweak the section type of .note.spu_name. */
5276
5277 static bfd_boolean
5278 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5279 Elf_Internal_Shdr *hdr,
5280 asection *sec)
5281 {
5282 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5283 hdr->sh_type = SHT_NOTE;
5284 return TRUE;
5285 }
5286
5287 /* Tweak phdrs before writing them out. */
5288
5289 static int
5290 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5291 {
5292 const struct elf_backend_data *bed;
5293 struct elf_obj_tdata *tdata;
5294 Elf_Internal_Phdr *phdr, *last;
5295 struct spu_link_hash_table *htab;
5296 unsigned int count;
5297 unsigned int i;
5298
5299 if (info == NULL)
5300 return TRUE;
5301
5302 bed = get_elf_backend_data (abfd);
5303 tdata = elf_tdata (abfd);
5304 phdr = tdata->phdr;
5305 count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5306 htab = spu_hash_table (info);
5307 if (htab->num_overlays != 0)
5308 {
5309 struct elf_segment_map *m;
5310 unsigned int o;
5311
5312 for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5313 if (m->count != 0
5314 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5315 {
5316 /* Mark this as an overlay header. */
5317 phdr[i].p_flags |= PF_OVERLAY;
5318
5319 if (htab->ovtab != NULL && htab->ovtab->size != 0
5320 && htab->params->ovly_flavour != ovly_soft_icache)
5321 {
5322 bfd_byte *p = htab->ovtab->contents;
5323 unsigned int off = o * 16 + 8;
5324
5325 /* Write file_off into _ovly_table. */
5326 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5327 }
5328 }
5329 /* Soft-icache has its file offset put in .ovl.init. */
5330 if (htab->init != NULL && htab->init->size != 0)
5331 {
5332 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5333
5334 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5335 }
5336 }
5337
5338 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5339 of 16. This should always be possible when using the standard
5340 linker scripts, but don't create overlapping segments if
5341 someone is playing games with linker scripts. */
5342 last = NULL;
5343 for (i = count; i-- != 0; )
5344 if (phdr[i].p_type == PT_LOAD)
5345 {
5346 unsigned adjust;
5347
5348 adjust = -phdr[i].p_filesz & 15;
5349 if (adjust != 0
5350 && last != NULL
5351 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5352 break;
5353
5354 adjust = -phdr[i].p_memsz & 15;
5355 if (adjust != 0
5356 && last != NULL
5357 && phdr[i].p_filesz != 0
5358 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5359 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5360 break;
5361
5362 if (phdr[i].p_filesz != 0)
5363 last = &phdr[i];
5364 }
5365
5366 if (i == (unsigned int) -1)
5367 for (i = count; i-- != 0; )
5368 if (phdr[i].p_type == PT_LOAD)
5369 {
5370 unsigned adjust;
5371
5372 adjust = -phdr[i].p_filesz & 15;
5373 phdr[i].p_filesz += adjust;
5374
5375 adjust = -phdr[i].p_memsz & 15;
5376 phdr[i].p_memsz += adjust;
5377 }
5378
5379 return TRUE;
5380 }
5381
5382 bfd_boolean
5383 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5384 {
5385 struct spu_link_hash_table *htab = spu_hash_table (info);
5386 if (htab->params->emit_fixups)
5387 {
5388 asection *sfixup = htab->sfixup;
5389 int fixup_count = 0;
5390 bfd *ibfd;
5391 size_t size;
5392
5393 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5394 {
5395 asection *isec;
5396
5397 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5398 continue;
5399
5400 /* Walk over each section attached to the input bfd. */
5401 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5402 {
5403 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5404 bfd_vma base_end;
5405
5406 /* If there aren't any relocs, then there's nothing more
5407 to do. */
5408 if ((isec->flags & SEC_ALLOC) == 0
5409 || (isec->flags & SEC_RELOC) == 0
5410 || isec->reloc_count == 0)
5411 continue;
5412
5413 /* Get the relocs. */
5414 internal_relocs =
5415 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5416 info->keep_memory);
5417 if (internal_relocs == NULL)
5418 return FALSE;
5419
5420 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5421 relocations. They are stored in a single word by
5422 saving the upper 28 bits of the address and setting the
5423 lower 4 bits to a bit mask of the words that have the
5424 relocation. BASE_END keeps track of the next quadword. */
5425 irela = internal_relocs;
5426 irelaend = irela + isec->reloc_count;
5427 base_end = 0;
5428 for (; irela < irelaend; irela++)
5429 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5430 && irela->r_offset >= base_end)
5431 {
5432 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5433 fixup_count++;
5434 }
5435 }
5436 }
5437
5438 /* We always have a NULL fixup as a sentinel */
5439 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5440 if (!bfd_set_section_size (output_bfd, sfixup, size))
5441 return FALSE;
5442 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5443 if (sfixup->contents == NULL)
5444 return FALSE;
5445 }
5446 return TRUE;
5447 }
5448
5449 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5450 #define TARGET_BIG_NAME "elf32-spu"
5451 #define ELF_ARCH bfd_arch_spu
5452 #define ELF_TARGET_ID SPU_ELF_DATA
5453 #define ELF_MACHINE_CODE EM_SPU
5454 /* This matches the alignment need for DMA. */
5455 #define ELF_MAXPAGESIZE 0x80
5456 #define elf_backend_rela_normal 1
5457 #define elf_backend_can_gc_sections 1
5458
5459 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5460 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5461 #define elf_info_to_howto spu_elf_info_to_howto
5462 #define elf_backend_count_relocs spu_elf_count_relocs
5463 #define elf_backend_relocate_section spu_elf_relocate_section
5464 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5465 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5466 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5467 #define elf_backend_object_p spu_elf_object_p
5468 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5469 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5470
5471 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5472 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5473 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5474 #define elf_backend_post_process_headers spu_elf_post_process_headers
5475 #define elf_backend_fake_sections spu_elf_fake_sections
5476 #define elf_backend_special_sections spu_elf_special_sections
5477 #define bfd_elf32_bfd_final_link spu_elf_final_link
5478
5479 #include "elf32-target.h"
This page took 0.166906 seconds and 4 git commands to generate.