250beef3d0ffbbdad83e167096ecfcf0cff6c653
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* We use RELA style relocs. Don't define USE_REL. */
31
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
94 };
95
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 { NULL, 0, 0, 0, 0 }
100 };
101
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 {
105 switch (code)
106 {
107 default:
108 return R_SPU_NONE;
109 case BFD_RELOC_SPU_IMM10W:
110 return R_SPU_ADDR10;
111 case BFD_RELOC_SPU_IMM16W:
112 return R_SPU_ADDR16;
113 case BFD_RELOC_SPU_LO16:
114 return R_SPU_ADDR16_LO;
115 case BFD_RELOC_SPU_HI16:
116 return R_SPU_ADDR16_HI;
117 case BFD_RELOC_SPU_IMM18:
118 return R_SPU_ADDR18;
119 case BFD_RELOC_SPU_PCREL16:
120 return R_SPU_REL16;
121 case BFD_RELOC_SPU_IMM7:
122 return R_SPU_ADDR7;
123 case BFD_RELOC_SPU_IMM8:
124 return R_SPU_NONE;
125 case BFD_RELOC_SPU_PCREL9a:
126 return R_SPU_REL9;
127 case BFD_RELOC_SPU_PCREL9b:
128 return R_SPU_REL9I;
129 case BFD_RELOC_SPU_IMM10:
130 return R_SPU_ADDR10I;
131 case BFD_RELOC_SPU_IMM16:
132 return R_SPU_ADDR16I;
133 case BFD_RELOC_32:
134 return R_SPU_ADDR32;
135 case BFD_RELOC_32_PCREL:
136 return R_SPU_REL32;
137 case BFD_RELOC_SPU_PPU32:
138 return R_SPU_PPU32;
139 case BFD_RELOC_SPU_PPU64:
140 return R_SPU_PPU64;
141 case BFD_RELOC_SPU_ADD_PIC:
142 return R_SPU_ADD_PIC;
143 }
144 }
145
146 static void
147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
148 arelent *cache_ptr,
149 Elf_Internal_Rela *dst)
150 {
151 enum elf_spu_reloc_type r_type;
152
153 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154 BFD_ASSERT (r_type < R_SPU_max);
155 cache_ptr->howto = &elf_howto_table[(int) r_type];
156 }
157
158 static reloc_howto_type *
159 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160 bfd_reloc_code_real_type code)
161 {
162 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
163
164 if (r_type == R_SPU_NONE)
165 return NULL;
166
167 return elf_howto_table + r_type;
168 }
169
170 static reloc_howto_type *
171 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
172 const char *r_name)
173 {
174 unsigned int i;
175
176 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177 if (elf_howto_table[i].name != NULL
178 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179 return &elf_howto_table[i];
180
181 return NULL;
182 }
183
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
185
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188 void *data, asection *input_section,
189 bfd *output_bfd, char **error_message)
190 {
191 bfd_size_type octets;
192 bfd_vma val;
193 long insn;
194
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
197 link time. */
198 if (output_bfd != NULL)
199 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200 input_section, output_bfd, error_message);
201
202 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203 return bfd_reloc_outofrange;
204 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
205
206 /* Get symbol value. */
207 val = 0;
208 if (!bfd_is_com_section (symbol->section))
209 val = symbol->value;
210 if (symbol->section->output_section)
211 val += symbol->section->output_section->vma;
212
213 val += reloc_entry->addend;
214
215 /* Make it pc-relative. */
216 val -= input_section->output_section->vma + input_section->output_offset;
217
218 val >>= 2;
219 if (val + 256 >= 512)
220 return bfd_reloc_overflow;
221
222 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
223
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227 insn &= ~reloc_entry->howto->dst_mask;
228 insn |= val & reloc_entry->howto->dst_mask;
229 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
230 return bfd_reloc_ok;
231 }
232
233 static bfd_boolean
234 spu_elf_new_section_hook (bfd *abfd, asection *sec)
235 {
236 if (!sec->used_by_bfd)
237 {
238 struct _spu_elf_section_data *sdata;
239
240 sdata = bfd_zalloc (abfd, sizeof (*sdata));
241 if (sdata == NULL)
242 return FALSE;
243 sec->used_by_bfd = sdata;
244 }
245
246 return _bfd_elf_new_section_hook (abfd, sec);
247 }
248
249 /* Set up overlay info for executables. */
250
251 static bfd_boolean
252 spu_elf_object_p (bfd *abfd)
253 {
254 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
255 {
256 unsigned int i, num_ovl, num_buf;
257 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259 Elf_Internal_Phdr *last_phdr = NULL;
260
261 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
263 {
264 unsigned int j;
265
266 ++num_ovl;
267 if (last_phdr == NULL
268 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
269 ++num_buf;
270 last_phdr = phdr;
271 for (j = 1; j < elf_numsections (abfd); j++)
272 {
273 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
274
275 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
276 {
277 asection *sec = shdr->bfd_section;
278 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
279 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
280 }
281 }
282 }
283 }
284 return TRUE;
285 }
286
287 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288 strip --strip-unneeded will not remove them. */
289
290 static void
291 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
292 {
293 if (sym->name != NULL
294 && sym->section != bfd_abs_section_ptr
295 && strncmp (sym->name, "_EAR_", 5) == 0)
296 sym->flags |= BSF_KEEP;
297 }
298
299 /* SPU ELF linker hash table. */
300
301 struct spu_link_hash_table
302 {
303 struct elf_link_hash_table elf;
304
305 struct spu_elf_params *params;
306
307 /* Shortcuts to overlay sections. */
308 asection *ovtab;
309 asection *init;
310 asection *toe;
311 asection **ovl_sec;
312
313 /* Count of stubs in each overlay section. */
314 unsigned int *stub_count;
315
316 /* The stub section for each overlay section. */
317 asection **stub_sec;
318
319 struct elf_link_hash_entry *ovly_entry[2];
320
321 /* Number of overlay buffers. */
322 unsigned int num_buf;
323
324 /* Total number of overlays. */
325 unsigned int num_overlays;
326
327 /* For soft icache. */
328 unsigned int line_size_log2;
329 unsigned int num_lines_log2;
330 unsigned int fromelem_size_log2;
331
332 /* How much memory we have. */
333 unsigned int local_store;
334
335 /* Count of overlay stubs needed in non-overlay area. */
336 unsigned int non_ovly_stub;
337
338 /* Pointer to the fixup section */
339 asection *sfixup;
340
341 /* Set on error. */
342 unsigned int stub_err : 1;
343 };
344
345 /* Hijack the generic got fields for overlay stub accounting. */
346
347 struct got_entry
348 {
349 struct got_entry *next;
350 unsigned int ovl;
351 union {
352 bfd_vma addend;
353 bfd_vma br_addr;
354 };
355 bfd_vma stub_addr;
356 };
357
358 #define spu_hash_table(p) \
359 ((struct spu_link_hash_table *) ((p)->hash))
360
361 struct call_info
362 {
363 struct function_info *fun;
364 struct call_info *next;
365 unsigned int count;
366 unsigned int max_depth;
367 unsigned int is_tail : 1;
368 unsigned int is_pasted : 1;
369 unsigned int broken_cycle : 1;
370 unsigned int priority : 13;
371 };
372
373 struct function_info
374 {
375 /* List of functions called. Also branches to hot/cold part of
376 function. */
377 struct call_info *call_list;
378 /* For hot/cold part of function, point to owner. */
379 struct function_info *start;
380 /* Symbol at start of function. */
381 union {
382 Elf_Internal_Sym *sym;
383 struct elf_link_hash_entry *h;
384 } u;
385 /* Function section. */
386 asection *sec;
387 asection *rodata;
388 /* Where last called from, and number of sections called from. */
389 asection *last_caller;
390 unsigned int call_count;
391 /* Address range of (this part of) function. */
392 bfd_vma lo, hi;
393 /* Offset where we found a store of lr, or -1 if none found. */
394 bfd_vma lr_store;
395 /* Offset where we found the stack adjustment insn. */
396 bfd_vma sp_adjust;
397 /* Stack usage. */
398 int stack;
399 /* Distance from root of call tree. Tail and hot/cold branches
400 count as one deeper. We aren't counting stack frames here. */
401 unsigned int depth;
402 /* Set if global symbol. */
403 unsigned int global : 1;
404 /* Set if known to be start of function (as distinct from a hunk
405 in hot/cold section. */
406 unsigned int is_func : 1;
407 /* Set if not a root node. */
408 unsigned int non_root : 1;
409 /* Flags used during call tree traversal. It's cheaper to replicate
410 the visit flags than have one which needs clearing after a traversal. */
411 unsigned int visit1 : 1;
412 unsigned int visit2 : 1;
413 unsigned int marking : 1;
414 unsigned int visit3 : 1;
415 unsigned int visit4 : 1;
416 unsigned int visit5 : 1;
417 unsigned int visit6 : 1;
418 unsigned int visit7 : 1;
419 };
420
421 struct spu_elf_stack_info
422 {
423 int num_fun;
424 int max_fun;
425 /* Variable size array describing functions, one per contiguous
426 address range belonging to a function. */
427 struct function_info fun[1];
428 };
429
430 static struct function_info *find_function (asection *, bfd_vma,
431 struct bfd_link_info *);
432
433 /* Create a spu ELF linker hash table. */
434
435 static struct bfd_link_hash_table *
436 spu_elf_link_hash_table_create (bfd *abfd)
437 {
438 struct spu_link_hash_table *htab;
439
440 htab = bfd_malloc (sizeof (*htab));
441 if (htab == NULL)
442 return NULL;
443
444 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
445 _bfd_elf_link_hash_newfunc,
446 sizeof (struct elf_link_hash_entry)))
447 {
448 free (htab);
449 return NULL;
450 }
451
452 memset (&htab->ovtab, 0,
453 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
454
455 htab->elf.init_got_refcount.refcount = 0;
456 htab->elf.init_got_refcount.glist = NULL;
457 htab->elf.init_got_offset.offset = 0;
458 htab->elf.init_got_offset.glist = NULL;
459 return &htab->elf.root;
460 }
461
462 void
463 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
464 {
465 bfd_vma max_branch_log2;
466
467 struct spu_link_hash_table *htab = spu_hash_table (info);
468 htab->params = params;
469 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
470 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
471
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2 = bfd_log2 (htab->params->max_branch);
476 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
477 }
478
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
482
483 static bfd_boolean
484 get_sym_h (struct elf_link_hash_entry **hp,
485 Elf_Internal_Sym **symp,
486 asection **symsecp,
487 Elf_Internal_Sym **locsymsp,
488 unsigned long r_symndx,
489 bfd *ibfd)
490 {
491 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
492
493 if (r_symndx >= symtab_hdr->sh_info)
494 {
495 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
496 struct elf_link_hash_entry *h;
497
498 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
499 while (h->root.type == bfd_link_hash_indirect
500 || h->root.type == bfd_link_hash_warning)
501 h = (struct elf_link_hash_entry *) h->root.u.i.link;
502
503 if (hp != NULL)
504 *hp = h;
505
506 if (symp != NULL)
507 *symp = NULL;
508
509 if (symsecp != NULL)
510 {
511 asection *symsec = NULL;
512 if (h->root.type == bfd_link_hash_defined
513 || h->root.type == bfd_link_hash_defweak)
514 symsec = h->root.u.def.section;
515 *symsecp = symsec;
516 }
517 }
518 else
519 {
520 Elf_Internal_Sym *sym;
521 Elf_Internal_Sym *locsyms = *locsymsp;
522
523 if (locsyms == NULL)
524 {
525 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
526 if (locsyms == NULL)
527 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
528 symtab_hdr->sh_info,
529 0, NULL, NULL, NULL);
530 if (locsyms == NULL)
531 return FALSE;
532 *locsymsp = locsyms;
533 }
534 sym = locsyms + r_symndx;
535
536 if (hp != NULL)
537 *hp = NULL;
538
539 if (symp != NULL)
540 *symp = sym;
541
542 if (symsecp != NULL)
543 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
544 }
545
546 return TRUE;
547 }
548
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
551
552 bfd_boolean
553 spu_elf_create_sections (struct bfd_link_info *info)
554 {
555 struct spu_link_hash_table *htab = spu_hash_table (info);
556 bfd *ibfd;
557
558 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
559 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
560 break;
561
562 if (ibfd == NULL)
563 {
564 /* Make SPU_PTNOTE_SPUNAME section. */
565 asection *s;
566 size_t name_len;
567 size_t size;
568 bfd_byte *data;
569 flagword flags;
570
571 ibfd = info->input_bfds;
572 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
573 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
574 if (s == NULL
575 || !bfd_set_section_alignment (ibfd, s, 4))
576 return FALSE;
577
578 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
579 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
580 size += (name_len + 3) & -4;
581
582 if (!bfd_set_section_size (ibfd, s, size))
583 return FALSE;
584
585 data = bfd_zalloc (ibfd, size);
586 if (data == NULL)
587 return FALSE;
588
589 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
590 bfd_put_32 (ibfd, name_len, data + 4);
591 bfd_put_32 (ibfd, 1, data + 8);
592 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
593 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
594 bfd_get_filename (info->output_bfd), name_len);
595 s->contents = data;
596 }
597
598 if (htab->params->emit_fixups)
599 {
600 asection *s;
601 flagword flags;
602 ibfd = info->input_bfds;
603 flags = SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
604 | SEC_IN_MEMORY;
605 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
606 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
607 return FALSE;
608 htab->sfixup = s;
609 }
610
611 return TRUE;
612 }
613
614 /* qsort predicate to sort sections by vma. */
615
616 static int
617 sort_sections (const void *a, const void *b)
618 {
619 const asection *const *s1 = a;
620 const asection *const *s2 = b;
621 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
622
623 if (delta != 0)
624 return delta < 0 ? -1 : 1;
625
626 return (*s1)->index - (*s2)->index;
627 }
628
629 /* Identify overlays in the output bfd, and number them.
630 Returns 0 on error, 1 if no overlays, 2 if overlays. */
631
632 int
633 spu_elf_find_overlays (struct bfd_link_info *info)
634 {
635 struct spu_link_hash_table *htab = spu_hash_table (info);
636 asection **alloc_sec;
637 unsigned int i, n, ovl_index, num_buf;
638 asection *s;
639 bfd_vma ovl_end;
640 static const char *const entry_names[2][2] = {
641 { "__ovly_load", "__icache_br_handler" },
642 { "__ovly_return", "__icache_call_handler" }
643 };
644
645 if (info->output_bfd->section_count < 2)
646 return 1;
647
648 alloc_sec
649 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
650 if (alloc_sec == NULL)
651 return 0;
652
653 /* Pick out all the alloced sections. */
654 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
655 if ((s->flags & SEC_ALLOC) != 0
656 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
657 && s->size != 0)
658 alloc_sec[n++] = s;
659
660 if (n == 0)
661 {
662 free (alloc_sec);
663 return 1;
664 }
665
666 /* Sort them by vma. */
667 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
668
669 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
670 if (htab->params->ovly_flavour == ovly_soft_icache)
671 {
672 unsigned int prev_buf = 0, set_id = 0;
673
674 /* Look for an overlapping vma to find the first overlay section. */
675 bfd_vma vma_start = 0;
676
677 for (i = 1; i < n; i++)
678 {
679 s = alloc_sec[i];
680 if (s->vma < ovl_end)
681 {
682 asection *s0 = alloc_sec[i - 1];
683 vma_start = s0->vma;
684 ovl_end = (s0->vma
685 + ((bfd_vma) 1
686 << (htab->num_lines_log2 + htab->line_size_log2)));
687 --i;
688 break;
689 }
690 else
691 ovl_end = s->vma + s->size;
692 }
693
694 /* Now find any sections within the cache area. */
695 for (ovl_index = 0, num_buf = 0; i < n; i++)
696 {
697 s = alloc_sec[i];
698 if (s->vma >= ovl_end)
699 break;
700
701 /* A section in an overlay area called .ovl.init is not
702 an overlay, in the sense that it might be loaded in
703 by the overlay manager, but rather the initial
704 section contents for the overlay buffer. */
705 if (strncmp (s->name, ".ovl.init", 9) != 0)
706 {
707 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
708 set_id = (num_buf == prev_buf)? set_id + 1 : 0;
709 prev_buf = num_buf;
710
711 if ((s->vma - vma_start) & (htab->params->line_size - 1))
712 {
713 info->callbacks->einfo (_("%X%P: overlay section %A "
714 "does not start on a cache line.\n"),
715 s);
716 bfd_set_error (bfd_error_bad_value);
717 return 0;
718 }
719 else if (s->size > htab->params->line_size)
720 {
721 info->callbacks->einfo (_("%X%P: overlay section %A "
722 "is larger than a cache line.\n"),
723 s);
724 bfd_set_error (bfd_error_bad_value);
725 return 0;
726 }
727
728 alloc_sec[ovl_index++] = s;
729 spu_elf_section_data (s)->u.o.ovl_index
730 = (set_id << htab->num_lines_log2) + num_buf;
731 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
732 }
733 }
734
735 /* Ensure there are no more overlay sections. */
736 for ( ; i < n; i++)
737 {
738 s = alloc_sec[i];
739 if (s->vma < ovl_end)
740 {
741 info->callbacks->einfo (_("%X%P: overlay section %A "
742 "is not in cache area.\n"),
743 alloc_sec[i-1]);
744 bfd_set_error (bfd_error_bad_value);
745 return 0;
746 }
747 else
748 ovl_end = s->vma + s->size;
749 }
750 }
751 else
752 {
753 /* Look for overlapping vmas. Any with overlap must be overlays.
754 Count them. Also count the number of overlay regions. */
755 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
756 {
757 s = alloc_sec[i];
758 if (s->vma < ovl_end)
759 {
760 asection *s0 = alloc_sec[i - 1];
761
762 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
763 {
764 ++num_buf;
765 if (strncmp (s0->name, ".ovl.init", 9) != 0)
766 {
767 alloc_sec[ovl_index] = s0;
768 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
769 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
770 }
771 else
772 ovl_end = s->vma + s->size;
773 }
774 if (strncmp (s->name, ".ovl.init", 9) != 0)
775 {
776 alloc_sec[ovl_index] = s;
777 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
778 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
779 if (s0->vma != s->vma)
780 {
781 info->callbacks->einfo (_("%X%P: overlay sections %A "
782 "and %A do not start at the "
783 "same address.\n"),
784 s0, s);
785 bfd_set_error (bfd_error_bad_value);
786 return 0;
787 }
788 if (ovl_end < s->vma + s->size)
789 ovl_end = s->vma + s->size;
790 }
791 }
792 else
793 ovl_end = s->vma + s->size;
794 }
795 }
796
797 htab->num_overlays = ovl_index;
798 htab->num_buf = num_buf;
799 htab->ovl_sec = alloc_sec;
800
801 if (ovl_index == 0)
802 return 1;
803
804 for (i = 0; i < 2; i++)
805 {
806 const char *name;
807 struct elf_link_hash_entry *h;
808
809 name = entry_names[i][htab->params->ovly_flavour];
810 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
811 if (h == NULL)
812 return 0;
813
814 if (h->root.type == bfd_link_hash_new)
815 {
816 h->root.type = bfd_link_hash_undefined;
817 h->ref_regular = 1;
818 h->ref_regular_nonweak = 1;
819 h->non_elf = 0;
820 }
821 htab->ovly_entry[i] = h;
822 }
823
824 return 2;
825 }
826
827 /* Non-zero to use bra in overlay stubs rather than br. */
828 #define BRA_STUBS 0
829
830 #define BRA 0x30000000
831 #define BRASL 0x31000000
832 #define BR 0x32000000
833 #define BRSL 0x33000000
834 #define NOP 0x40200000
835 #define LNOP 0x00200000
836 #define ILA 0x42000000
837
838 /* Return true for all relative and absolute branch instructions.
839 bra 00110000 0..
840 brasl 00110001 0..
841 br 00110010 0..
842 brsl 00110011 0..
843 brz 00100000 0..
844 brnz 00100001 0..
845 brhz 00100010 0..
846 brhnz 00100011 0.. */
847
848 static bfd_boolean
849 is_branch (const unsigned char *insn)
850 {
851 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
852 }
853
854 /* Return true for all indirect branch instructions.
855 bi 00110101 000
856 bisl 00110101 001
857 iret 00110101 010
858 bisled 00110101 011
859 biz 00100101 000
860 binz 00100101 001
861 bihz 00100101 010
862 bihnz 00100101 011 */
863
864 static bfd_boolean
865 is_indirect_branch (const unsigned char *insn)
866 {
867 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
868 }
869
870 /* Return true for branch hint instructions.
871 hbra 0001000..
872 hbrr 0001001.. */
873
874 static bfd_boolean
875 is_hint (const unsigned char *insn)
876 {
877 return (insn[0] & 0xfc) == 0x10;
878 }
879
880 /* True if INPUT_SECTION might need overlay stubs. */
881
882 static bfd_boolean
883 maybe_needs_stubs (asection *input_section)
884 {
885 /* No stubs for debug sections and suchlike. */
886 if ((input_section->flags & SEC_ALLOC) == 0)
887 return FALSE;
888
889 /* No stubs for link-once sections that will be discarded. */
890 if (input_section->output_section == bfd_abs_section_ptr)
891 return FALSE;
892
893 /* Don't create stubs for .eh_frame references. */
894 if (strcmp (input_section->name, ".eh_frame") == 0)
895 return FALSE;
896
897 return TRUE;
898 }
899
900 enum _stub_type
901 {
902 no_stub,
903 call_ovl_stub,
904 br000_ovl_stub,
905 br001_ovl_stub,
906 br010_ovl_stub,
907 br011_ovl_stub,
908 br100_ovl_stub,
909 br101_ovl_stub,
910 br110_ovl_stub,
911 br111_ovl_stub,
912 nonovl_stub,
913 stub_error
914 };
915
916 /* Return non-zero if this reloc symbol should go via an overlay stub.
917 Return 2 if the stub must be in non-overlay area. */
918
919 static enum _stub_type
920 needs_ovl_stub (struct elf_link_hash_entry *h,
921 Elf_Internal_Sym *sym,
922 asection *sym_sec,
923 asection *input_section,
924 Elf_Internal_Rela *irela,
925 bfd_byte *contents,
926 struct bfd_link_info *info)
927 {
928 struct spu_link_hash_table *htab = spu_hash_table (info);
929 enum elf_spu_reloc_type r_type;
930 unsigned int sym_type;
931 bfd_boolean branch, hint, call;
932 enum _stub_type ret = no_stub;
933 bfd_byte insn[4];
934
935 if (sym_sec == NULL
936 || sym_sec->output_section == bfd_abs_section_ptr
937 || spu_elf_section_data (sym_sec->output_section) == NULL)
938 return ret;
939
940 if (h != NULL)
941 {
942 /* Ensure no stubs for user supplied overlay manager syms. */
943 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
944 return ret;
945
946 /* setjmp always goes via an overlay stub, because then the return
947 and hence the longjmp goes via __ovly_return. That magically
948 makes setjmp/longjmp between overlays work. */
949 if (strncmp (h->root.root.string, "setjmp", 6) == 0
950 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
951 ret = call_ovl_stub;
952 }
953
954 if (h != NULL)
955 sym_type = h->type;
956 else
957 sym_type = ELF_ST_TYPE (sym->st_info);
958
959 r_type = ELF32_R_TYPE (irela->r_info);
960 branch = FALSE;
961 hint = FALSE;
962 call = FALSE;
963 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
964 {
965 if (contents == NULL)
966 {
967 contents = insn;
968 if (!bfd_get_section_contents (input_section->owner,
969 input_section,
970 contents,
971 irela->r_offset, 4))
972 return stub_error;
973 }
974 else
975 contents += irela->r_offset;
976
977 branch = is_branch (contents);
978 hint = is_hint (contents);
979 if (branch || hint)
980 {
981 call = (contents[0] & 0xfd) == 0x31;
982 if (call
983 && sym_type != STT_FUNC
984 && contents != insn)
985 {
986 /* It's common for people to write assembly and forget
987 to give function symbols the right type. Handle
988 calls to such symbols, but warn so that (hopefully)
989 people will fix their code. We need the symbol
990 type to be correct to distinguish function pointer
991 initialisation from other pointer initialisations. */
992 const char *sym_name;
993
994 if (h != NULL)
995 sym_name = h->root.root.string;
996 else
997 {
998 Elf_Internal_Shdr *symtab_hdr;
999 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1000 sym_name = bfd_elf_sym_name (input_section->owner,
1001 symtab_hdr,
1002 sym,
1003 sym_sec);
1004 }
1005 (*_bfd_error_handler) (_("warning: call to non-function"
1006 " symbol %s defined in %B"),
1007 sym_sec->owner, sym_name);
1008
1009 }
1010 }
1011 }
1012
1013 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1014 || (sym_type != STT_FUNC
1015 && !(branch || hint)
1016 && (sym_sec->flags & SEC_CODE) == 0))
1017 return no_stub;
1018
1019 /* Usually, symbols in non-overlay sections don't need stubs. */
1020 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1021 && !htab->params->non_overlay_stubs)
1022 return ret;
1023
1024 /* A reference from some other section to a symbol in an overlay
1025 section needs a stub. */
1026 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1027 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1028 {
1029 unsigned int lrlive = 0;
1030 if (branch)
1031 lrlive = (contents[1] & 0x70) >> 4;
1032
1033 if (!lrlive && (call || sym_type == STT_FUNC))
1034 ret = call_ovl_stub;
1035 else
1036 ret = br000_ovl_stub + lrlive;
1037 }
1038
1039 /* If this insn isn't a branch then we are possibly taking the
1040 address of a function and passing it out somehow. Soft-icache code
1041 always generates inline code to do indirect branches. */
1042 if (!(branch || hint)
1043 && sym_type == STT_FUNC
1044 && htab->params->ovly_flavour != ovly_soft_icache)
1045 ret = nonovl_stub;
1046
1047 return ret;
1048 }
1049
1050 static bfd_boolean
1051 count_stub (struct spu_link_hash_table *htab,
1052 bfd *ibfd,
1053 asection *isec,
1054 enum _stub_type stub_type,
1055 struct elf_link_hash_entry *h,
1056 const Elf_Internal_Rela *irela)
1057 {
1058 unsigned int ovl = 0;
1059 struct got_entry *g, **head;
1060 bfd_vma addend;
1061
1062 /* If this instruction is a branch or call, we need a stub
1063 for it. One stub per function per overlay.
1064 If it isn't a branch, then we are taking the address of
1065 this function so need a stub in the non-overlay area
1066 for it. One stub per function. */
1067 if (stub_type != nonovl_stub)
1068 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1069
1070 if (h != NULL)
1071 head = &h->got.glist;
1072 else
1073 {
1074 if (elf_local_got_ents (ibfd) == NULL)
1075 {
1076 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1077 * sizeof (*elf_local_got_ents (ibfd)));
1078 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1079 if (elf_local_got_ents (ibfd) == NULL)
1080 return FALSE;
1081 }
1082 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1083 }
1084
1085 if (htab->params->ovly_flavour == ovly_soft_icache)
1086 {
1087 htab->stub_count[ovl] += 1;
1088 return TRUE;
1089 }
1090
1091 addend = 0;
1092 if (irela != NULL)
1093 addend = irela->r_addend;
1094
1095 if (ovl == 0)
1096 {
1097 struct got_entry *gnext;
1098
1099 for (g = *head; g != NULL; g = g->next)
1100 if (g->addend == addend && g->ovl == 0)
1101 break;
1102
1103 if (g == NULL)
1104 {
1105 /* Need a new non-overlay area stub. Zap other stubs. */
1106 for (g = *head; g != NULL; g = gnext)
1107 {
1108 gnext = g->next;
1109 if (g->addend == addend)
1110 {
1111 htab->stub_count[g->ovl] -= 1;
1112 free (g);
1113 }
1114 }
1115 }
1116 }
1117 else
1118 {
1119 for (g = *head; g != NULL; g = g->next)
1120 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1121 break;
1122 }
1123
1124 if (g == NULL)
1125 {
1126 g = bfd_malloc (sizeof *g);
1127 if (g == NULL)
1128 return FALSE;
1129 g->ovl = ovl;
1130 g->addend = addend;
1131 g->stub_addr = (bfd_vma) -1;
1132 g->next = *head;
1133 *head = g;
1134
1135 htab->stub_count[ovl] += 1;
1136 }
1137
1138 return TRUE;
1139 }
1140
1141 /* Support two sizes of overlay stubs, a slower more compact stub of two
1142 intructions, and a faster stub of four instructions.
1143 Soft-icache stubs are four or eight words. */
1144
1145 static unsigned int
1146 ovl_stub_size (struct spu_elf_params *params)
1147 {
1148 return 16 << params->ovly_flavour >> params->compact_stub;
1149 }
1150
1151 static unsigned int
1152 ovl_stub_size_log2 (struct spu_elf_params *params)
1153 {
1154 return 4 + params->ovly_flavour - params->compact_stub;
1155 }
1156
1157 /* Two instruction overlay stubs look like:
1158
1159 brsl $75,__ovly_load
1160 .word target_ovl_and_address
1161
1162 ovl_and_address is a word with the overlay number in the top 14 bits
1163 and local store address in the bottom 18 bits.
1164
1165 Four instruction overlay stubs look like:
1166
1167 ila $78,ovl_number
1168 lnop
1169 ila $79,target_address
1170 br __ovly_load
1171
1172 Software icache stubs are:
1173
1174 .word target_index
1175 .word target_ia;
1176 .word lrlive_branchlocalstoreaddr;
1177 brasl $75,__icache_br_handler
1178 .quad xor_pattern
1179 */
1180
1181 static bfd_boolean
1182 build_stub (struct bfd_link_info *info,
1183 bfd *ibfd,
1184 asection *isec,
1185 enum _stub_type stub_type,
1186 struct elf_link_hash_entry *h,
1187 const Elf_Internal_Rela *irela,
1188 bfd_vma dest,
1189 asection *dest_sec)
1190 {
1191 struct spu_link_hash_table *htab = spu_hash_table (info);
1192 unsigned int ovl, dest_ovl, set_id;
1193 struct got_entry *g, **head;
1194 asection *sec;
1195 bfd_vma addend, from, to, br_dest, patt;
1196 unsigned int lrlive;
1197
1198 ovl = 0;
1199 if (stub_type != nonovl_stub)
1200 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1201
1202 if (h != NULL)
1203 head = &h->got.glist;
1204 else
1205 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1206
1207 addend = 0;
1208 if (irela != NULL)
1209 addend = irela->r_addend;
1210
1211 if (htab->params->ovly_flavour == ovly_soft_icache)
1212 {
1213 g = bfd_malloc (sizeof *g);
1214 if (g == NULL)
1215 return FALSE;
1216 g->ovl = ovl;
1217 g->br_addr = 0;
1218 if (irela != NULL)
1219 g->br_addr = (irela->r_offset
1220 + isec->output_offset
1221 + isec->output_section->vma);
1222 g->next = *head;
1223 *head = g;
1224 }
1225 else
1226 {
1227 for (g = *head; g != NULL; g = g->next)
1228 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1229 break;
1230 if (g == NULL)
1231 abort ();
1232
1233 if (g->ovl == 0 && ovl != 0)
1234 return TRUE;
1235
1236 if (g->stub_addr != (bfd_vma) -1)
1237 return TRUE;
1238 }
1239
1240 sec = htab->stub_sec[ovl];
1241 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1242 from = sec->size + sec->output_offset + sec->output_section->vma;
1243 g->stub_addr = from;
1244 to = (htab->ovly_entry[0]->root.u.def.value
1245 + htab->ovly_entry[0]->root.u.def.section->output_offset
1246 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1247
1248 if (((dest | to | from) & 3) != 0)
1249 {
1250 htab->stub_err = 1;
1251 return FALSE;
1252 }
1253 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1254
1255 if (htab->params->ovly_flavour == ovly_normal
1256 && !htab->params->compact_stub)
1257 {
1258 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1259 sec->contents + sec->size);
1260 bfd_put_32 (sec->owner, LNOP,
1261 sec->contents + sec->size + 4);
1262 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1263 sec->contents + sec->size + 8);
1264 if (!BRA_STUBS)
1265 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1266 sec->contents + sec->size + 12);
1267 else
1268 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1269 sec->contents + sec->size + 12);
1270 }
1271 else if (htab->params->ovly_flavour == ovly_normal
1272 && htab->params->compact_stub)
1273 {
1274 if (!BRA_STUBS)
1275 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1276 sec->contents + sec->size);
1277 else
1278 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1279 sec->contents + sec->size);
1280 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1281 sec->contents + sec->size + 4);
1282 }
1283 else if (htab->params->ovly_flavour == ovly_soft_icache
1284 && htab->params->compact_stub)
1285 {
1286 lrlive = 0;
1287 if (stub_type == nonovl_stub)
1288 ;
1289 else if (stub_type == call_ovl_stub)
1290 /* A brsl makes lr live and *(*sp+16) is live.
1291 Tail calls have the same liveness. */
1292 lrlive = 5;
1293 else if (!htab->params->lrlive_analysis)
1294 /* Assume stack frame and lr save. */
1295 lrlive = 1;
1296 else if (irela != NULL)
1297 {
1298 /* Analyse branch instructions. */
1299 struct function_info *caller;
1300 bfd_vma off;
1301
1302 caller = find_function (isec, irela->r_offset, info);
1303 if (caller->start == NULL)
1304 off = irela->r_offset;
1305 else
1306 {
1307 struct function_info *found = NULL;
1308
1309 /* Find the earliest piece of this function that
1310 has frame adjusting instructions. We might
1311 see dynamic frame adjustment (eg. for alloca)
1312 in some later piece, but functions using
1313 alloca always set up a frame earlier. Frame
1314 setup instructions are always in one piece. */
1315 if (caller->lr_store != (bfd_vma) -1
1316 || caller->sp_adjust != (bfd_vma) -1)
1317 found = caller;
1318 while (caller->start != NULL)
1319 {
1320 caller = caller->start;
1321 if (caller->lr_store != (bfd_vma) -1
1322 || caller->sp_adjust != (bfd_vma) -1)
1323 found = caller;
1324 }
1325 if (found != NULL)
1326 caller = found;
1327 off = (bfd_vma) -1;
1328 }
1329
1330 if (off > caller->sp_adjust)
1331 {
1332 if (off > caller->lr_store)
1333 /* Only *(*sp+16) is live. */
1334 lrlive = 1;
1335 else
1336 /* If no lr save, then we must be in a
1337 leaf function with a frame.
1338 lr is still live. */
1339 lrlive = 4;
1340 }
1341 else if (off > caller->lr_store)
1342 {
1343 /* Between lr save and stack adjust. */
1344 lrlive = 3;
1345 /* This should never happen since prologues won't
1346 be split here. */
1347 BFD_ASSERT (0);
1348 }
1349 else
1350 /* On entry to function. */
1351 lrlive = 5;
1352
1353 if (stub_type != br000_ovl_stub
1354 && lrlive != stub_type - br000_ovl_stub)
1355 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1356 "from analysis (%u)\n"),
1357 isec, irela->r_offset, lrlive,
1358 stub_type - br000_ovl_stub);
1359 }
1360
1361 /* If given lrlive info via .brinfo, use it. */
1362 if (stub_type > br000_ovl_stub)
1363 lrlive = stub_type - br000_ovl_stub;
1364
1365 if (ovl == 0)
1366 to = (htab->ovly_entry[1]->root.u.def.value
1367 + htab->ovly_entry[1]->root.u.def.section->output_offset
1368 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1369
1370 /* The branch that uses this stub goes to stub_addr + 4. We'll
1371 set up an xor pattern that can be used by the icache manager
1372 to modify this branch to go directly to its destination. */
1373 g->stub_addr += 4;
1374 br_dest = g->stub_addr;
1375 if (irela == NULL)
1376 {
1377 /* Except in the case of _SPUEAR_ stubs, the branch in
1378 question is the one in the stub itself. */
1379 BFD_ASSERT (stub_type == nonovl_stub);
1380 g->br_addr = g->stub_addr;
1381 br_dest = to;
1382 }
1383
1384 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1385 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1386 sec->contents + sec->size);
1387 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1388 sec->contents + sec->size + 4);
1389 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1390 sec->contents + sec->size + 8);
1391 patt = dest ^ br_dest;
1392 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1393 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1394 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1395 sec->contents + sec->size + 12);
1396
1397 if (ovl == 0)
1398 /* Extra space for linked list entries. */
1399 sec->size += 16;
1400 }
1401 else
1402 abort ();
1403
1404 sec->size += ovl_stub_size (htab->params);
1405
1406 if (htab->params->emit_stub_syms)
1407 {
1408 size_t len;
1409 char *name;
1410 int add;
1411
1412 len = 8 + sizeof (".ovl_call.") - 1;
1413 if (h != NULL)
1414 len += strlen (h->root.root.string);
1415 else
1416 len += 8 + 1 + 8;
1417 add = 0;
1418 if (irela != NULL)
1419 add = (int) irela->r_addend & 0xffffffff;
1420 if (add != 0)
1421 len += 1 + 8;
1422 name = bfd_malloc (len);
1423 if (name == NULL)
1424 return FALSE;
1425
1426 sprintf (name, "%08x.ovl_call.", g->ovl);
1427 if (h != NULL)
1428 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1429 else
1430 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1431 dest_sec->id & 0xffffffff,
1432 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1433 if (add != 0)
1434 sprintf (name + len - 9, "+%x", add);
1435
1436 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1437 free (name);
1438 if (h == NULL)
1439 return FALSE;
1440 if (h->root.type == bfd_link_hash_new)
1441 {
1442 h->root.type = bfd_link_hash_defined;
1443 h->root.u.def.section = sec;
1444 h->size = ovl_stub_size (htab->params);
1445 h->root.u.def.value = sec->size - h->size;
1446 h->type = STT_FUNC;
1447 h->ref_regular = 1;
1448 h->def_regular = 1;
1449 h->ref_regular_nonweak = 1;
1450 h->forced_local = 1;
1451 h->non_elf = 0;
1452 }
1453 }
1454
1455 return TRUE;
1456 }
1457
1458 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1459 symbols. */
1460
1461 static bfd_boolean
1462 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1463 {
1464 /* Symbols starting with _SPUEAR_ need a stub because they may be
1465 invoked by the PPU. */
1466 struct bfd_link_info *info = inf;
1467 struct spu_link_hash_table *htab = spu_hash_table (info);
1468 asection *sym_sec;
1469
1470 if ((h->root.type == bfd_link_hash_defined
1471 || h->root.type == bfd_link_hash_defweak)
1472 && h->def_regular
1473 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1474 && (sym_sec = h->root.u.def.section) != NULL
1475 && sym_sec->output_section != bfd_abs_section_ptr
1476 && spu_elf_section_data (sym_sec->output_section) != NULL
1477 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1478 || htab->params->non_overlay_stubs))
1479 {
1480 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1481 }
1482
1483 return TRUE;
1484 }
1485
1486 static bfd_boolean
1487 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1488 {
1489 /* Symbols starting with _SPUEAR_ need a stub because they may be
1490 invoked by the PPU. */
1491 struct bfd_link_info *info = inf;
1492 struct spu_link_hash_table *htab = spu_hash_table (info);
1493 asection *sym_sec;
1494
1495 if ((h->root.type == bfd_link_hash_defined
1496 || h->root.type == bfd_link_hash_defweak)
1497 && h->def_regular
1498 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1499 && (sym_sec = h->root.u.def.section) != NULL
1500 && sym_sec->output_section != bfd_abs_section_ptr
1501 && spu_elf_section_data (sym_sec->output_section) != NULL
1502 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1503 || htab->params->non_overlay_stubs))
1504 {
1505 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1506 h->root.u.def.value, sym_sec);
1507 }
1508
1509 return TRUE;
1510 }
1511
1512 /* Size or build stubs. */
1513
1514 static bfd_boolean
1515 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1516 {
1517 struct spu_link_hash_table *htab = spu_hash_table (info);
1518 bfd *ibfd;
1519
1520 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1521 {
1522 extern const bfd_target bfd_elf32_spu_vec;
1523 Elf_Internal_Shdr *symtab_hdr;
1524 asection *isec;
1525 Elf_Internal_Sym *local_syms = NULL;
1526
1527 if (ibfd->xvec != &bfd_elf32_spu_vec)
1528 continue;
1529
1530 /* We'll need the symbol table in a second. */
1531 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1532 if (symtab_hdr->sh_info == 0)
1533 continue;
1534
1535 /* Walk over each section attached to the input bfd. */
1536 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1537 {
1538 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1539
1540 /* If there aren't any relocs, then there's nothing more to do. */
1541 if ((isec->flags & SEC_RELOC) == 0
1542 || isec->reloc_count == 0)
1543 continue;
1544
1545 if (!maybe_needs_stubs (isec))
1546 continue;
1547
1548 /* Get the relocs. */
1549 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1550 info->keep_memory);
1551 if (internal_relocs == NULL)
1552 goto error_ret_free_local;
1553
1554 /* Now examine each relocation. */
1555 irela = internal_relocs;
1556 irelaend = irela + isec->reloc_count;
1557 for (; irela < irelaend; irela++)
1558 {
1559 enum elf_spu_reloc_type r_type;
1560 unsigned int r_indx;
1561 asection *sym_sec;
1562 Elf_Internal_Sym *sym;
1563 struct elf_link_hash_entry *h;
1564 enum _stub_type stub_type;
1565
1566 r_type = ELF32_R_TYPE (irela->r_info);
1567 r_indx = ELF32_R_SYM (irela->r_info);
1568
1569 if (r_type >= R_SPU_max)
1570 {
1571 bfd_set_error (bfd_error_bad_value);
1572 error_ret_free_internal:
1573 if (elf_section_data (isec)->relocs != internal_relocs)
1574 free (internal_relocs);
1575 error_ret_free_local:
1576 if (local_syms != NULL
1577 && (symtab_hdr->contents
1578 != (unsigned char *) local_syms))
1579 free (local_syms);
1580 return FALSE;
1581 }
1582
1583 /* Determine the reloc target section. */
1584 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1585 goto error_ret_free_internal;
1586
1587 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1588 NULL, info);
1589 if (stub_type == no_stub)
1590 continue;
1591 else if (stub_type == stub_error)
1592 goto error_ret_free_internal;
1593
1594 if (htab->stub_count == NULL)
1595 {
1596 bfd_size_type amt;
1597 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1598 htab->stub_count = bfd_zmalloc (amt);
1599 if (htab->stub_count == NULL)
1600 goto error_ret_free_internal;
1601 }
1602
1603 if (!build)
1604 {
1605 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1606 goto error_ret_free_internal;
1607 }
1608 else
1609 {
1610 bfd_vma dest;
1611
1612 if (h != NULL)
1613 dest = h->root.u.def.value;
1614 else
1615 dest = sym->st_value;
1616 dest += irela->r_addend;
1617 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1618 dest, sym_sec))
1619 goto error_ret_free_internal;
1620 }
1621 }
1622
1623 /* We're done with the internal relocs, free them. */
1624 if (elf_section_data (isec)->relocs != internal_relocs)
1625 free (internal_relocs);
1626 }
1627
1628 if (local_syms != NULL
1629 && symtab_hdr->contents != (unsigned char *) local_syms)
1630 {
1631 if (!info->keep_memory)
1632 free (local_syms);
1633 else
1634 symtab_hdr->contents = (unsigned char *) local_syms;
1635 }
1636 }
1637
1638 return TRUE;
1639 }
1640
1641 /* Allocate space for overlay call and return stubs.
1642 Return 0 on error, 1 if no overlays, 2 otherwise. */
1643
1644 int
1645 spu_elf_size_stubs (struct bfd_link_info *info)
1646 {
1647 struct spu_link_hash_table *htab;
1648 bfd *ibfd;
1649 bfd_size_type amt;
1650 flagword flags;
1651 unsigned int i;
1652 asection *stub;
1653
1654 if (!process_stubs (info, FALSE))
1655 return 0;
1656
1657 htab = spu_hash_table (info);
1658 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1659 if (htab->stub_err)
1660 return 0;
1661
1662 ibfd = info->input_bfds;
1663 if (htab->stub_count != NULL)
1664 {
1665 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1666 htab->stub_sec = bfd_zmalloc (amt);
1667 if (htab->stub_sec == NULL)
1668 return 0;
1669
1670 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1671 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1672 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1673 htab->stub_sec[0] = stub;
1674 if (stub == NULL
1675 || !bfd_set_section_alignment (ibfd, stub,
1676 ovl_stub_size_log2 (htab->params)))
1677 return 0;
1678 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1679 if (htab->params->ovly_flavour == ovly_soft_icache)
1680 /* Extra space for linked list entries. */
1681 stub->size += htab->stub_count[0] * 16;
1682
1683 for (i = 0; i < htab->num_overlays; ++i)
1684 {
1685 asection *osec = htab->ovl_sec[i];
1686 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1687 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1688 htab->stub_sec[ovl] = stub;
1689 if (stub == NULL
1690 || !bfd_set_section_alignment (ibfd, stub,
1691 ovl_stub_size_log2 (htab->params)))
1692 return 0;
1693 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1694 }
1695 }
1696
1697 if (htab->params->ovly_flavour == ovly_soft_icache)
1698 {
1699 /* Space for icache manager tables.
1700 a) Tag array, one quadword per cache line.
1701 b) Rewrite "to" list, one quadword per cache line.
1702 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1703 a power-of-two number of full quadwords) per cache line. */
1704
1705 flags = SEC_ALLOC;
1706 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1707 if (htab->ovtab == NULL
1708 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1709 return 0;
1710
1711 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1712 << htab->num_lines_log2;
1713
1714 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1715 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1716 if (htab->init == NULL
1717 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1718 return 0;
1719
1720 htab->init->size = 16;
1721 }
1722 else if (htab->stub_count == NULL)
1723 return 1;
1724 else
1725 {
1726 /* htab->ovtab consists of two arrays.
1727 . struct {
1728 . u32 vma;
1729 . u32 size;
1730 . u32 file_off;
1731 . u32 buf;
1732 . } _ovly_table[];
1733 .
1734 . struct {
1735 . u32 mapped;
1736 . } _ovly_buf_table[];
1737 . */
1738
1739 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1740 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1741 if (htab->ovtab == NULL
1742 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1743 return 0;
1744
1745 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1746 }
1747
1748 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1749 if (htab->toe == NULL
1750 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1751 return 0;
1752 htab->toe->size = 16;
1753
1754 return 2;
1755 }
1756
1757 /* Called from ld to place overlay manager data sections. This is done
1758 after the overlay manager itself is loaded, mainly so that the
1759 linker's htab->init section is placed after any other .ovl.init
1760 sections. */
1761
1762 void
1763 spu_elf_place_overlay_data (struct bfd_link_info *info)
1764 {
1765 struct spu_link_hash_table *htab = spu_hash_table (info);
1766 unsigned int i;
1767
1768 if (htab->stub_sec != NULL)
1769 {
1770 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1771
1772 for (i = 0; i < htab->num_overlays; ++i)
1773 {
1774 asection *osec = htab->ovl_sec[i];
1775 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1776 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1777 }
1778 }
1779
1780 if (htab->params->ovly_flavour == ovly_soft_icache)
1781 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1782
1783 if (htab->ovtab != NULL)
1784 {
1785 const char *ovout = ".data";
1786 if (htab->params->ovly_flavour == ovly_soft_icache)
1787 ovout = ".bss";
1788 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1789 }
1790
1791 if (htab->toe != NULL)
1792 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1793 }
1794
1795 /* Functions to handle embedded spu_ovl.o object. */
1796
1797 static void *
1798 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1799 {
1800 return stream;
1801 }
1802
1803 static file_ptr
1804 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1805 void *stream,
1806 void *buf,
1807 file_ptr nbytes,
1808 file_ptr offset)
1809 {
1810 struct _ovl_stream *os;
1811 size_t count;
1812 size_t max;
1813
1814 os = (struct _ovl_stream *) stream;
1815 max = (const char *) os->end - (const char *) os->start;
1816
1817 if ((ufile_ptr) offset >= max)
1818 return 0;
1819
1820 count = nbytes;
1821 if (count > max - offset)
1822 count = max - offset;
1823
1824 memcpy (buf, (const char *) os->start + offset, count);
1825 return count;
1826 }
1827
1828 bfd_boolean
1829 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1830 {
1831 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1832 "elf32-spu",
1833 ovl_mgr_open,
1834 (void *) stream,
1835 ovl_mgr_pread,
1836 NULL,
1837 NULL);
1838 return *ovl_bfd != NULL;
1839 }
1840
1841 static unsigned int
1842 overlay_index (asection *sec)
1843 {
1844 if (sec == NULL
1845 || sec->output_section == bfd_abs_section_ptr)
1846 return 0;
1847 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1848 }
1849
1850 /* Define an STT_OBJECT symbol. */
1851
1852 static struct elf_link_hash_entry *
1853 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1854 {
1855 struct elf_link_hash_entry *h;
1856
1857 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1858 if (h == NULL)
1859 return NULL;
1860
1861 if (h->root.type != bfd_link_hash_defined
1862 || !h->def_regular)
1863 {
1864 h->root.type = bfd_link_hash_defined;
1865 h->root.u.def.section = htab->ovtab;
1866 h->type = STT_OBJECT;
1867 h->ref_regular = 1;
1868 h->def_regular = 1;
1869 h->ref_regular_nonweak = 1;
1870 h->non_elf = 0;
1871 }
1872 else if (h->root.u.def.section->owner != NULL)
1873 {
1874 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1875 h->root.u.def.section->owner,
1876 h->root.root.string);
1877 bfd_set_error (bfd_error_bad_value);
1878 return NULL;
1879 }
1880 else
1881 {
1882 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1883 h->root.root.string);
1884 bfd_set_error (bfd_error_bad_value);
1885 return NULL;
1886 }
1887
1888 return h;
1889 }
1890
1891 /* Fill in all stubs and the overlay tables. */
1892
1893 static bfd_boolean
1894 spu_elf_build_stubs (struct bfd_link_info *info)
1895 {
1896 struct spu_link_hash_table *htab = spu_hash_table (info);
1897 struct elf_link_hash_entry *h;
1898 bfd_byte *p;
1899 asection *s;
1900 bfd *obfd;
1901 unsigned int i;
1902
1903 if (htab->num_overlays != 0)
1904 {
1905 for (i = 0; i < 2; i++)
1906 {
1907 h = htab->ovly_entry[i];
1908 if (h != NULL
1909 && (h->root.type == bfd_link_hash_defined
1910 || h->root.type == bfd_link_hash_defweak)
1911 && h->def_regular)
1912 {
1913 s = h->root.u.def.section->output_section;
1914 if (spu_elf_section_data (s)->u.o.ovl_index)
1915 {
1916 (*_bfd_error_handler) (_("%s in overlay section"),
1917 h->root.root.string);
1918 bfd_set_error (bfd_error_bad_value);
1919 return FALSE;
1920 }
1921 }
1922 }
1923 }
1924
1925 if (htab->stub_sec != NULL)
1926 {
1927 for (i = 0; i <= htab->num_overlays; i++)
1928 if (htab->stub_sec[i]->size != 0)
1929 {
1930 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1931 htab->stub_sec[i]->size);
1932 if (htab->stub_sec[i]->contents == NULL)
1933 return FALSE;
1934 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1935 htab->stub_sec[i]->size = 0;
1936 }
1937
1938 /* Fill in all the stubs. */
1939 process_stubs (info, TRUE);
1940 if (!htab->stub_err)
1941 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1942
1943 if (htab->stub_err)
1944 {
1945 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1946 bfd_set_error (bfd_error_bad_value);
1947 return FALSE;
1948 }
1949
1950 for (i = 0; i <= htab->num_overlays; i++)
1951 {
1952 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1953 {
1954 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1955 bfd_set_error (bfd_error_bad_value);
1956 return FALSE;
1957 }
1958 htab->stub_sec[i]->rawsize = 0;
1959 }
1960 }
1961
1962 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1963 return TRUE;
1964
1965 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1966 if (htab->ovtab->contents == NULL)
1967 return FALSE;
1968
1969 p = htab->ovtab->contents;
1970 if (htab->params->ovly_flavour == ovly_soft_icache)
1971 {
1972 bfd_vma off;
1973
1974 h = define_ovtab_symbol (htab, "__icache_tag_array");
1975 if (h == NULL)
1976 return FALSE;
1977 h->root.u.def.value = 0;
1978 h->size = 16 << htab->num_lines_log2;
1979 off = h->size;
1980
1981 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1982 if (h == NULL)
1983 return FALSE;
1984 h->root.u.def.value = 16 << htab->num_lines_log2;
1985 h->root.u.def.section = bfd_abs_section_ptr;
1986
1987 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1988 if (h == NULL)
1989 return FALSE;
1990 h->root.u.def.value = off;
1991 h->size = 16 << htab->num_lines_log2;
1992 off += h->size;
1993
1994 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1995 if (h == NULL)
1996 return FALSE;
1997 h->root.u.def.value = 16 << htab->num_lines_log2;
1998 h->root.u.def.section = bfd_abs_section_ptr;
1999
2000 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2001 if (h == NULL)
2002 return FALSE;
2003 h->root.u.def.value = off;
2004 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2005 off += h->size;
2006
2007 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2008 if (h == NULL)
2009 return FALSE;
2010 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2011 + htab->num_lines_log2);
2012 h->root.u.def.section = bfd_abs_section_ptr;
2013
2014 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2015 if (h == NULL)
2016 return FALSE;
2017 h->root.u.def.value = htab->fromelem_size_log2;
2018 h->root.u.def.section = bfd_abs_section_ptr;
2019
2020 h = define_ovtab_symbol (htab, "__icache_base");
2021 if (h == NULL)
2022 return FALSE;
2023 h->root.u.def.value = htab->ovl_sec[0]->vma;
2024 h->root.u.def.section = bfd_abs_section_ptr;
2025 h->size = htab->num_buf << htab->line_size_log2;
2026
2027 h = define_ovtab_symbol (htab, "__icache_linesize");
2028 if (h == NULL)
2029 return FALSE;
2030 h->root.u.def.value = 1 << htab->line_size_log2;
2031 h->root.u.def.section = bfd_abs_section_ptr;
2032
2033 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2034 if (h == NULL)
2035 return FALSE;
2036 h->root.u.def.value = htab->line_size_log2;
2037 h->root.u.def.section = bfd_abs_section_ptr;
2038
2039 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2040 if (h == NULL)
2041 return FALSE;
2042 h->root.u.def.value = -htab->line_size_log2;
2043 h->root.u.def.section = bfd_abs_section_ptr;
2044
2045 h = define_ovtab_symbol (htab, "__icache_cachesize");
2046 if (h == NULL)
2047 return FALSE;
2048 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2049 h->root.u.def.section = bfd_abs_section_ptr;
2050
2051 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2052 if (h == NULL)
2053 return FALSE;
2054 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2055 h->root.u.def.section = bfd_abs_section_ptr;
2056
2057 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2058 if (h == NULL)
2059 return FALSE;
2060 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2061 h->root.u.def.section = bfd_abs_section_ptr;
2062
2063 if (htab->init != NULL && htab->init->size != 0)
2064 {
2065 htab->init->contents = bfd_zalloc (htab->init->owner,
2066 htab->init->size);
2067 if (htab->init->contents == NULL)
2068 return FALSE;
2069
2070 h = define_ovtab_symbol (htab, "__icache_fileoff");
2071 if (h == NULL)
2072 return FALSE;
2073 h->root.u.def.value = 0;
2074 h->root.u.def.section = htab->init;
2075 h->size = 8;
2076 }
2077 }
2078 else
2079 {
2080 /* Write out _ovly_table. */
2081 /* set low bit of .size to mark non-overlay area as present. */
2082 p[7] = 1;
2083 obfd = htab->ovtab->output_section->owner;
2084 for (s = obfd->sections; s != NULL; s = s->next)
2085 {
2086 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2087
2088 if (ovl_index != 0)
2089 {
2090 unsigned long off = ovl_index * 16;
2091 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2092
2093 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2094 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2095 p + off + 4);
2096 /* file_off written later in spu_elf_modify_program_headers. */
2097 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2098 }
2099 }
2100
2101 h = define_ovtab_symbol (htab, "_ovly_table");
2102 if (h == NULL)
2103 return FALSE;
2104 h->root.u.def.value = 16;
2105 h->size = htab->num_overlays * 16;
2106
2107 h = define_ovtab_symbol (htab, "_ovly_table_end");
2108 if (h == NULL)
2109 return FALSE;
2110 h->root.u.def.value = htab->num_overlays * 16 + 16;
2111 h->size = 0;
2112
2113 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2114 if (h == NULL)
2115 return FALSE;
2116 h->root.u.def.value = htab->num_overlays * 16 + 16;
2117 h->size = htab->num_buf * 4;
2118
2119 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2120 if (h == NULL)
2121 return FALSE;
2122 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2123 h->size = 0;
2124 }
2125
2126 h = define_ovtab_symbol (htab, "_EAR_");
2127 if (h == NULL)
2128 return FALSE;
2129 h->root.u.def.section = htab->toe;
2130 h->root.u.def.value = 0;
2131 h->size = 16;
2132
2133 return TRUE;
2134 }
2135
2136 /* Check that all loadable section VMAs lie in the range
2137 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2138
2139 asection *
2140 spu_elf_check_vma (struct bfd_link_info *info)
2141 {
2142 struct elf_segment_map *m;
2143 unsigned int i;
2144 struct spu_link_hash_table *htab = spu_hash_table (info);
2145 bfd *abfd = info->output_bfd;
2146 bfd_vma hi = htab->params->local_store_hi;
2147 bfd_vma lo = htab->params->local_store_lo;
2148
2149 htab->local_store = hi + 1 - lo;
2150
2151 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2152 if (m->p_type == PT_LOAD)
2153 for (i = 0; i < m->count; i++)
2154 if (m->sections[i]->size != 0
2155 && (m->sections[i]->vma < lo
2156 || m->sections[i]->vma > hi
2157 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2158 return m->sections[i];
2159
2160 return NULL;
2161 }
2162
2163 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2164 Search for stack adjusting insns, and return the sp delta.
2165 If a store of lr is found save the instruction offset to *LR_STORE.
2166 If a stack adjusting instruction is found, save that offset to
2167 *SP_ADJUST. */
2168
2169 static int
2170 find_function_stack_adjust (asection *sec,
2171 bfd_vma offset,
2172 bfd_vma *lr_store,
2173 bfd_vma *sp_adjust)
2174 {
2175 int reg[128];
2176
2177 memset (reg, 0, sizeof (reg));
2178 for ( ; offset + 4 <= sec->size; offset += 4)
2179 {
2180 unsigned char buf[4];
2181 int rt, ra;
2182 int imm;
2183
2184 /* Assume no relocs on stack adjusing insns. */
2185 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2186 break;
2187
2188 rt = buf[3] & 0x7f;
2189 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2190
2191 if (buf[0] == 0x24 /* stqd */)
2192 {
2193 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2194 *lr_store = offset;
2195 continue;
2196 }
2197
2198 /* Partly decoded immediate field. */
2199 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2200
2201 if (buf[0] == 0x1c /* ai */)
2202 {
2203 imm >>= 7;
2204 imm = (imm ^ 0x200) - 0x200;
2205 reg[rt] = reg[ra] + imm;
2206
2207 if (rt == 1 /* sp */)
2208 {
2209 if (reg[rt] > 0)
2210 break;
2211 *sp_adjust = offset;
2212 return reg[rt];
2213 }
2214 }
2215 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2216 {
2217 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2218
2219 reg[rt] = reg[ra] + reg[rb];
2220 if (rt == 1)
2221 {
2222 if (reg[rt] > 0)
2223 break;
2224 *sp_adjust = offset;
2225 return reg[rt];
2226 }
2227 }
2228 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2229 {
2230 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2231
2232 reg[rt] = reg[rb] - reg[ra];
2233 if (rt == 1)
2234 {
2235 if (reg[rt] > 0)
2236 break;
2237 *sp_adjust = offset;
2238 return reg[rt];
2239 }
2240 }
2241 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2242 {
2243 if (buf[0] >= 0x42 /* ila */)
2244 imm |= (buf[0] & 1) << 17;
2245 else
2246 {
2247 imm &= 0xffff;
2248
2249 if (buf[0] == 0x40 /* il */)
2250 {
2251 if ((buf[1] & 0x80) == 0)
2252 continue;
2253 imm = (imm ^ 0x8000) - 0x8000;
2254 }
2255 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2256 imm <<= 16;
2257 }
2258 reg[rt] = imm;
2259 continue;
2260 }
2261 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2262 {
2263 reg[rt] |= imm & 0xffff;
2264 continue;
2265 }
2266 else if (buf[0] == 0x04 /* ori */)
2267 {
2268 imm >>= 7;
2269 imm = (imm ^ 0x200) - 0x200;
2270 reg[rt] = reg[ra] | imm;
2271 continue;
2272 }
2273 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2274 {
2275 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2276 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2277 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2278 | ((imm & 0x1000) ? 0x000000ff : 0));
2279 continue;
2280 }
2281 else if (buf[0] == 0x16 /* andbi */)
2282 {
2283 imm >>= 7;
2284 imm &= 0xff;
2285 imm |= imm << 8;
2286 imm |= imm << 16;
2287 reg[rt] = reg[ra] & imm;
2288 continue;
2289 }
2290 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2291 {
2292 /* Used in pic reg load. Say rt is trashed. Won't be used
2293 in stack adjust, but we need to continue past this branch. */
2294 reg[rt] = 0;
2295 continue;
2296 }
2297 else if (is_branch (buf) || is_indirect_branch (buf))
2298 /* If we hit a branch then we must be out of the prologue. */
2299 break;
2300 }
2301
2302 return 0;
2303 }
2304
2305 /* qsort predicate to sort symbols by section and value. */
2306
2307 static Elf_Internal_Sym *sort_syms_syms;
2308 static asection **sort_syms_psecs;
2309
2310 static int
2311 sort_syms (const void *a, const void *b)
2312 {
2313 Elf_Internal_Sym *const *s1 = a;
2314 Elf_Internal_Sym *const *s2 = b;
2315 asection *sec1,*sec2;
2316 bfd_signed_vma delta;
2317
2318 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2319 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2320
2321 if (sec1 != sec2)
2322 return sec1->index - sec2->index;
2323
2324 delta = (*s1)->st_value - (*s2)->st_value;
2325 if (delta != 0)
2326 return delta < 0 ? -1 : 1;
2327
2328 delta = (*s2)->st_size - (*s1)->st_size;
2329 if (delta != 0)
2330 return delta < 0 ? -1 : 1;
2331
2332 return *s1 < *s2 ? -1 : 1;
2333 }
2334
2335 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2336 entries for section SEC. */
2337
2338 static struct spu_elf_stack_info *
2339 alloc_stack_info (asection *sec, int max_fun)
2340 {
2341 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2342 bfd_size_type amt;
2343
2344 amt = sizeof (struct spu_elf_stack_info);
2345 amt += (max_fun - 1) * sizeof (struct function_info);
2346 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2347 if (sec_data->u.i.stack_info != NULL)
2348 sec_data->u.i.stack_info->max_fun = max_fun;
2349 return sec_data->u.i.stack_info;
2350 }
2351
2352 /* Add a new struct function_info describing a (part of a) function
2353 starting at SYM_H. Keep the array sorted by address. */
2354
2355 static struct function_info *
2356 maybe_insert_function (asection *sec,
2357 void *sym_h,
2358 bfd_boolean global,
2359 bfd_boolean is_func)
2360 {
2361 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2362 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2363 int i;
2364 bfd_vma off, size;
2365
2366 if (sinfo == NULL)
2367 {
2368 sinfo = alloc_stack_info (sec, 20);
2369 if (sinfo == NULL)
2370 return NULL;
2371 }
2372
2373 if (!global)
2374 {
2375 Elf_Internal_Sym *sym = sym_h;
2376 off = sym->st_value;
2377 size = sym->st_size;
2378 }
2379 else
2380 {
2381 struct elf_link_hash_entry *h = sym_h;
2382 off = h->root.u.def.value;
2383 size = h->size;
2384 }
2385
2386 for (i = sinfo->num_fun; --i >= 0; )
2387 if (sinfo->fun[i].lo <= off)
2388 break;
2389
2390 if (i >= 0)
2391 {
2392 /* Don't add another entry for an alias, but do update some
2393 info. */
2394 if (sinfo->fun[i].lo == off)
2395 {
2396 /* Prefer globals over local syms. */
2397 if (global && !sinfo->fun[i].global)
2398 {
2399 sinfo->fun[i].global = TRUE;
2400 sinfo->fun[i].u.h = sym_h;
2401 }
2402 if (is_func)
2403 sinfo->fun[i].is_func = TRUE;
2404 return &sinfo->fun[i];
2405 }
2406 /* Ignore a zero-size symbol inside an existing function. */
2407 else if (sinfo->fun[i].hi > off && size == 0)
2408 return &sinfo->fun[i];
2409 }
2410
2411 if (sinfo->num_fun >= sinfo->max_fun)
2412 {
2413 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2414 bfd_size_type old = amt;
2415
2416 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2417 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2418 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2419 sinfo = bfd_realloc (sinfo, amt);
2420 if (sinfo == NULL)
2421 return NULL;
2422 memset ((char *) sinfo + old, 0, amt - old);
2423 sec_data->u.i.stack_info = sinfo;
2424 }
2425
2426 if (++i < sinfo->num_fun)
2427 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2428 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2429 sinfo->fun[i].is_func = is_func;
2430 sinfo->fun[i].global = global;
2431 sinfo->fun[i].sec = sec;
2432 if (global)
2433 sinfo->fun[i].u.h = sym_h;
2434 else
2435 sinfo->fun[i].u.sym = sym_h;
2436 sinfo->fun[i].lo = off;
2437 sinfo->fun[i].hi = off + size;
2438 sinfo->fun[i].lr_store = -1;
2439 sinfo->fun[i].sp_adjust = -1;
2440 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2441 &sinfo->fun[i].lr_store,
2442 &sinfo->fun[i].sp_adjust);
2443 sinfo->num_fun += 1;
2444 return &sinfo->fun[i];
2445 }
2446
2447 /* Return the name of FUN. */
2448
2449 static const char *
2450 func_name (struct function_info *fun)
2451 {
2452 asection *sec;
2453 bfd *ibfd;
2454 Elf_Internal_Shdr *symtab_hdr;
2455
2456 while (fun->start != NULL)
2457 fun = fun->start;
2458
2459 if (fun->global)
2460 return fun->u.h->root.root.string;
2461
2462 sec = fun->sec;
2463 if (fun->u.sym->st_name == 0)
2464 {
2465 size_t len = strlen (sec->name);
2466 char *name = bfd_malloc (len + 10);
2467 if (name == NULL)
2468 return "(null)";
2469 sprintf (name, "%s+%lx", sec->name,
2470 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2471 return name;
2472 }
2473 ibfd = sec->owner;
2474 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2475 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2476 }
2477
2478 /* Read the instruction at OFF in SEC. Return true iff the instruction
2479 is a nop, lnop, or stop 0 (all zero insn). */
2480
2481 static bfd_boolean
2482 is_nop (asection *sec, bfd_vma off)
2483 {
2484 unsigned char insn[4];
2485
2486 if (off + 4 > sec->size
2487 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2488 return FALSE;
2489 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2490 return TRUE;
2491 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2492 return TRUE;
2493 return FALSE;
2494 }
2495
2496 /* Extend the range of FUN to cover nop padding up to LIMIT.
2497 Return TRUE iff some instruction other than a NOP was found. */
2498
2499 static bfd_boolean
2500 insns_at_end (struct function_info *fun, bfd_vma limit)
2501 {
2502 bfd_vma off = (fun->hi + 3) & -4;
2503
2504 while (off < limit && is_nop (fun->sec, off))
2505 off += 4;
2506 if (off < limit)
2507 {
2508 fun->hi = off;
2509 return TRUE;
2510 }
2511 fun->hi = limit;
2512 return FALSE;
2513 }
2514
2515 /* Check and fix overlapping function ranges. Return TRUE iff there
2516 are gaps in the current info we have about functions in SEC. */
2517
2518 static bfd_boolean
2519 check_function_ranges (asection *sec, struct bfd_link_info *info)
2520 {
2521 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2522 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2523 int i;
2524 bfd_boolean gaps = FALSE;
2525
2526 if (sinfo == NULL)
2527 return FALSE;
2528
2529 for (i = 1; i < sinfo->num_fun; i++)
2530 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2531 {
2532 /* Fix overlapping symbols. */
2533 const char *f1 = func_name (&sinfo->fun[i - 1]);
2534 const char *f2 = func_name (&sinfo->fun[i]);
2535
2536 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2537 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2538 }
2539 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2540 gaps = TRUE;
2541
2542 if (sinfo->num_fun == 0)
2543 gaps = TRUE;
2544 else
2545 {
2546 if (sinfo->fun[0].lo != 0)
2547 gaps = TRUE;
2548 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2549 {
2550 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2551
2552 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2553 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2554 }
2555 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2556 gaps = TRUE;
2557 }
2558 return gaps;
2559 }
2560
2561 /* Search current function info for a function that contains address
2562 OFFSET in section SEC. */
2563
2564 static struct function_info *
2565 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2566 {
2567 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2568 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2569 int lo, hi, mid;
2570
2571 lo = 0;
2572 hi = sinfo->num_fun;
2573 while (lo < hi)
2574 {
2575 mid = (lo + hi) / 2;
2576 if (offset < sinfo->fun[mid].lo)
2577 hi = mid;
2578 else if (offset >= sinfo->fun[mid].hi)
2579 lo = mid + 1;
2580 else
2581 return &sinfo->fun[mid];
2582 }
2583 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2584 sec, offset);
2585 bfd_set_error (bfd_error_bad_value);
2586 return NULL;
2587 }
2588
2589 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2590 if CALLEE was new. If this function return FALSE, CALLEE should
2591 be freed. */
2592
2593 static bfd_boolean
2594 insert_callee (struct function_info *caller, struct call_info *callee)
2595 {
2596 struct call_info **pp, *p;
2597
2598 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2599 if (p->fun == callee->fun)
2600 {
2601 /* Tail calls use less stack than normal calls. Retain entry
2602 for normal call over one for tail call. */
2603 p->is_tail &= callee->is_tail;
2604 if (!p->is_tail)
2605 {
2606 p->fun->start = NULL;
2607 p->fun->is_func = TRUE;
2608 }
2609 p->count += callee->count;
2610 /* Reorder list so most recent call is first. */
2611 *pp = p->next;
2612 p->next = caller->call_list;
2613 caller->call_list = p;
2614 return FALSE;
2615 }
2616 callee->next = caller->call_list;
2617 caller->call_list = callee;
2618 return TRUE;
2619 }
2620
2621 /* Copy CALL and insert the copy into CALLER. */
2622
2623 static bfd_boolean
2624 copy_callee (struct function_info *caller, const struct call_info *call)
2625 {
2626 struct call_info *callee;
2627 callee = bfd_malloc (sizeof (*callee));
2628 if (callee == NULL)
2629 return FALSE;
2630 *callee = *call;
2631 if (!insert_callee (caller, callee))
2632 free (callee);
2633 return TRUE;
2634 }
2635
2636 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2637 overlay stub sections. */
2638
2639 static bfd_boolean
2640 interesting_section (asection *s)
2641 {
2642 return (s->output_section != bfd_abs_section_ptr
2643 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2644 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2645 && s->size != 0);
2646 }
2647
2648 /* Rummage through the relocs for SEC, looking for function calls.
2649 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2650 mark destination symbols on calls as being functions. Also
2651 look at branches, which may be tail calls or go to hot/cold
2652 section part of same function. */
2653
2654 static bfd_boolean
2655 mark_functions_via_relocs (asection *sec,
2656 struct bfd_link_info *info,
2657 int call_tree)
2658 {
2659 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2660 Elf_Internal_Shdr *symtab_hdr;
2661 void *psyms;
2662 unsigned int priority = 0;
2663 static bfd_boolean warned;
2664
2665 if (!interesting_section (sec)
2666 || sec->reloc_count == 0)
2667 return TRUE;
2668
2669 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2670 info->keep_memory);
2671 if (internal_relocs == NULL)
2672 return FALSE;
2673
2674 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2675 psyms = &symtab_hdr->contents;
2676 irela = internal_relocs;
2677 irelaend = irela + sec->reloc_count;
2678 for (; irela < irelaend; irela++)
2679 {
2680 enum elf_spu_reloc_type r_type;
2681 unsigned int r_indx;
2682 asection *sym_sec;
2683 Elf_Internal_Sym *sym;
2684 struct elf_link_hash_entry *h;
2685 bfd_vma val;
2686 bfd_boolean nonbranch, is_call;
2687 struct function_info *caller;
2688 struct call_info *callee;
2689
2690 r_type = ELF32_R_TYPE (irela->r_info);
2691 nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2692
2693 r_indx = ELF32_R_SYM (irela->r_info);
2694 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2695 return FALSE;
2696
2697 if (sym_sec == NULL
2698 || sym_sec->output_section == bfd_abs_section_ptr)
2699 continue;
2700
2701 is_call = FALSE;
2702 if (!nonbranch)
2703 {
2704 unsigned char insn[4];
2705
2706 if (!bfd_get_section_contents (sec->owner, sec, insn,
2707 irela->r_offset, 4))
2708 return FALSE;
2709 if (is_branch (insn))
2710 {
2711 is_call = (insn[0] & 0xfd) == 0x31;
2712 priority = insn[1] & 0x0f;
2713 priority <<= 8;
2714 priority |= insn[2];
2715 priority <<= 8;
2716 priority |= insn[3];
2717 priority >>= 7;
2718 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2719 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2720 {
2721 if (!warned)
2722 info->callbacks->einfo
2723 (_("%B(%A+0x%v): call to non-code section"
2724 " %B(%A), analysis incomplete\n"),
2725 sec->owner, sec, irela->r_offset,
2726 sym_sec->owner, sym_sec);
2727 warned = TRUE;
2728 continue;
2729 }
2730 }
2731 else
2732 {
2733 nonbranch = TRUE;
2734 if (is_hint (insn))
2735 continue;
2736 }
2737 }
2738
2739 if (nonbranch)
2740 {
2741 /* For --auto-overlay, count possible stubs we need for
2742 function pointer references. */
2743 unsigned int sym_type;
2744 if (h)
2745 sym_type = h->type;
2746 else
2747 sym_type = ELF_ST_TYPE (sym->st_info);
2748 if (sym_type == STT_FUNC)
2749 {
2750 if (call_tree && spu_hash_table (info)->params->auto_overlay)
2751 spu_hash_table (info)->non_ovly_stub += 1;
2752 /* If the symbol type is STT_FUNC then this must be a
2753 function pointer initialisation. */
2754 continue;
2755 }
2756 /* Ignore data references. */
2757 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2758 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2759 continue;
2760 /* Otherwise we probably have a jump table reloc for
2761 a switch statement or some other reference to a
2762 code label. */
2763 }
2764
2765 if (h)
2766 val = h->root.u.def.value;
2767 else
2768 val = sym->st_value;
2769 val += irela->r_addend;
2770
2771 if (!call_tree)
2772 {
2773 struct function_info *fun;
2774
2775 if (irela->r_addend != 0)
2776 {
2777 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2778 if (fake == NULL)
2779 return FALSE;
2780 fake->st_value = val;
2781 fake->st_shndx
2782 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2783 sym = fake;
2784 }
2785 if (sym)
2786 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2787 else
2788 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2789 if (fun == NULL)
2790 return FALSE;
2791 if (irela->r_addend != 0
2792 && fun->u.sym != sym)
2793 free (sym);
2794 continue;
2795 }
2796
2797 caller = find_function (sec, irela->r_offset, info);
2798 if (caller == NULL)
2799 return FALSE;
2800 callee = bfd_malloc (sizeof *callee);
2801 if (callee == NULL)
2802 return FALSE;
2803
2804 callee->fun = find_function (sym_sec, val, info);
2805 if (callee->fun == NULL)
2806 return FALSE;
2807 callee->is_tail = !is_call;
2808 callee->is_pasted = FALSE;
2809 callee->broken_cycle = FALSE;
2810 callee->priority = priority;
2811 callee->count = nonbranch? 0 : 1;
2812 if (callee->fun->last_caller != sec)
2813 {
2814 callee->fun->last_caller = sec;
2815 callee->fun->call_count += 1;
2816 }
2817 if (!insert_callee (caller, callee))
2818 free (callee);
2819 else if (!is_call
2820 && !callee->fun->is_func
2821 && callee->fun->stack == 0)
2822 {
2823 /* This is either a tail call or a branch from one part of
2824 the function to another, ie. hot/cold section. If the
2825 destination has been called by some other function then
2826 it is a separate function. We also assume that functions
2827 are not split across input files. */
2828 if (sec->owner != sym_sec->owner)
2829 {
2830 callee->fun->start = NULL;
2831 callee->fun->is_func = TRUE;
2832 }
2833 else if (callee->fun->start == NULL)
2834 {
2835 struct function_info *caller_start = caller;
2836 while (caller_start->start)
2837 caller_start = caller_start->start;
2838
2839 if (caller_start != callee->fun)
2840 callee->fun->start = caller_start;
2841 }
2842 else
2843 {
2844 struct function_info *callee_start;
2845 struct function_info *caller_start;
2846 callee_start = callee->fun;
2847 while (callee_start->start)
2848 callee_start = callee_start->start;
2849 caller_start = caller;
2850 while (caller_start->start)
2851 caller_start = caller_start->start;
2852 if (caller_start != callee_start)
2853 {
2854 callee->fun->start = NULL;
2855 callee->fun->is_func = TRUE;
2856 }
2857 }
2858 }
2859 }
2860
2861 return TRUE;
2862 }
2863
2864 /* Handle something like .init or .fini, which has a piece of a function.
2865 These sections are pasted together to form a single function. */
2866
2867 static bfd_boolean
2868 pasted_function (asection *sec)
2869 {
2870 struct bfd_link_order *l;
2871 struct _spu_elf_section_data *sec_data;
2872 struct spu_elf_stack_info *sinfo;
2873 Elf_Internal_Sym *fake;
2874 struct function_info *fun, *fun_start;
2875
2876 fake = bfd_zmalloc (sizeof (*fake));
2877 if (fake == NULL)
2878 return FALSE;
2879 fake->st_value = 0;
2880 fake->st_size = sec->size;
2881 fake->st_shndx
2882 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2883 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2884 if (!fun)
2885 return FALSE;
2886
2887 /* Find a function immediately preceding this section. */
2888 fun_start = NULL;
2889 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2890 {
2891 if (l->u.indirect.section == sec)
2892 {
2893 if (fun_start != NULL)
2894 {
2895 struct call_info *callee = bfd_malloc (sizeof *callee);
2896 if (callee == NULL)
2897 return FALSE;
2898
2899 fun->start = fun_start;
2900 callee->fun = fun;
2901 callee->is_tail = TRUE;
2902 callee->is_pasted = TRUE;
2903 callee->broken_cycle = FALSE;
2904 callee->priority = 0;
2905 callee->count = 1;
2906 if (!insert_callee (fun_start, callee))
2907 free (callee);
2908 return TRUE;
2909 }
2910 break;
2911 }
2912 if (l->type == bfd_indirect_link_order
2913 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2914 && (sinfo = sec_data->u.i.stack_info) != NULL
2915 && sinfo->num_fun != 0)
2916 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2917 }
2918
2919 /* Don't return an error if we did not find a function preceding this
2920 section. The section may have incorrect flags. */
2921 return TRUE;
2922 }
2923
2924 /* Map address ranges in code sections to functions. */
2925
2926 static bfd_boolean
2927 discover_functions (struct bfd_link_info *info)
2928 {
2929 bfd *ibfd;
2930 int bfd_idx;
2931 Elf_Internal_Sym ***psym_arr;
2932 asection ***sec_arr;
2933 bfd_boolean gaps = FALSE;
2934
2935 bfd_idx = 0;
2936 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2937 bfd_idx++;
2938
2939 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2940 if (psym_arr == NULL)
2941 return FALSE;
2942 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2943 if (sec_arr == NULL)
2944 return FALSE;
2945
2946 for (ibfd = info->input_bfds, bfd_idx = 0;
2947 ibfd != NULL;
2948 ibfd = ibfd->link_next, bfd_idx++)
2949 {
2950 extern const bfd_target bfd_elf32_spu_vec;
2951 Elf_Internal_Shdr *symtab_hdr;
2952 asection *sec;
2953 size_t symcount;
2954 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2955 asection **psecs, **p;
2956
2957 if (ibfd->xvec != &bfd_elf32_spu_vec)
2958 continue;
2959
2960 /* Read all the symbols. */
2961 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2962 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2963 if (symcount == 0)
2964 {
2965 if (!gaps)
2966 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2967 if (interesting_section (sec))
2968 {
2969 gaps = TRUE;
2970 break;
2971 }
2972 continue;
2973 }
2974
2975 if (symtab_hdr->contents != NULL)
2976 {
2977 /* Don't use cached symbols since the generic ELF linker
2978 code only reads local symbols, and we need globals too. */
2979 free (symtab_hdr->contents);
2980 symtab_hdr->contents = NULL;
2981 }
2982 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2983 NULL, NULL, NULL);
2984 symtab_hdr->contents = (void *) syms;
2985 if (syms == NULL)
2986 return FALSE;
2987
2988 /* Select defined function symbols that are going to be output. */
2989 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2990 if (psyms == NULL)
2991 return FALSE;
2992 psym_arr[bfd_idx] = psyms;
2993 psecs = bfd_malloc (symcount * sizeof (*psecs));
2994 if (psecs == NULL)
2995 return FALSE;
2996 sec_arr[bfd_idx] = psecs;
2997 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2998 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2999 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3000 {
3001 asection *s;
3002
3003 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3004 if (s != NULL && interesting_section (s))
3005 *psy++ = sy;
3006 }
3007 symcount = psy - psyms;
3008 *psy = NULL;
3009
3010 /* Sort them by section and offset within section. */
3011 sort_syms_syms = syms;
3012 sort_syms_psecs = psecs;
3013 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3014
3015 /* Now inspect the function symbols. */
3016 for (psy = psyms; psy < psyms + symcount; )
3017 {
3018 asection *s = psecs[*psy - syms];
3019 Elf_Internal_Sym **psy2;
3020
3021 for (psy2 = psy; ++psy2 < psyms + symcount; )
3022 if (psecs[*psy2 - syms] != s)
3023 break;
3024
3025 if (!alloc_stack_info (s, psy2 - psy))
3026 return FALSE;
3027 psy = psy2;
3028 }
3029
3030 /* First install info about properly typed and sized functions.
3031 In an ideal world this will cover all code sections, except
3032 when partitioning functions into hot and cold sections,
3033 and the horrible pasted together .init and .fini functions. */
3034 for (psy = psyms; psy < psyms + symcount; ++psy)
3035 {
3036 sy = *psy;
3037 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3038 {
3039 asection *s = psecs[sy - syms];
3040 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3041 return FALSE;
3042 }
3043 }
3044
3045 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3046 if (interesting_section (sec))
3047 gaps |= check_function_ranges (sec, info);
3048 }
3049
3050 if (gaps)
3051 {
3052 /* See if we can discover more function symbols by looking at
3053 relocations. */
3054 for (ibfd = info->input_bfds, bfd_idx = 0;
3055 ibfd != NULL;
3056 ibfd = ibfd->link_next, bfd_idx++)
3057 {
3058 asection *sec;
3059
3060 if (psym_arr[bfd_idx] == NULL)
3061 continue;
3062
3063 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3064 if (!mark_functions_via_relocs (sec, info, FALSE))
3065 return FALSE;
3066 }
3067
3068 for (ibfd = info->input_bfds, bfd_idx = 0;
3069 ibfd != NULL;
3070 ibfd = ibfd->link_next, bfd_idx++)
3071 {
3072 Elf_Internal_Shdr *symtab_hdr;
3073 asection *sec;
3074 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3075 asection **psecs;
3076
3077 if ((psyms = psym_arr[bfd_idx]) == NULL)
3078 continue;
3079
3080 psecs = sec_arr[bfd_idx];
3081
3082 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3083 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3084
3085 gaps = FALSE;
3086 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3087 if (interesting_section (sec))
3088 gaps |= check_function_ranges (sec, info);
3089 if (!gaps)
3090 continue;
3091
3092 /* Finally, install all globals. */
3093 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3094 {
3095 asection *s;
3096
3097 s = psecs[sy - syms];
3098
3099 /* Global syms might be improperly typed functions. */
3100 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3101 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3102 {
3103 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3104 return FALSE;
3105 }
3106 }
3107 }
3108
3109 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3110 {
3111 extern const bfd_target bfd_elf32_spu_vec;
3112 asection *sec;
3113
3114 if (ibfd->xvec != &bfd_elf32_spu_vec)
3115 continue;
3116
3117 /* Some of the symbols we've installed as marking the
3118 beginning of functions may have a size of zero. Extend
3119 the range of such functions to the beginning of the
3120 next symbol of interest. */
3121 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3122 if (interesting_section (sec))
3123 {
3124 struct _spu_elf_section_data *sec_data;
3125 struct spu_elf_stack_info *sinfo;
3126
3127 sec_data = spu_elf_section_data (sec);
3128 sinfo = sec_data->u.i.stack_info;
3129 if (sinfo != NULL && sinfo->num_fun != 0)
3130 {
3131 int fun_idx;
3132 bfd_vma hi = sec->size;
3133
3134 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3135 {
3136 sinfo->fun[fun_idx].hi = hi;
3137 hi = sinfo->fun[fun_idx].lo;
3138 }
3139
3140 sinfo->fun[0].lo = 0;
3141 }
3142 /* No symbols in this section. Must be .init or .fini
3143 or something similar. */
3144 else if (!pasted_function (sec))
3145 return FALSE;
3146 }
3147 }
3148 }
3149
3150 for (ibfd = info->input_bfds, bfd_idx = 0;
3151 ibfd != NULL;
3152 ibfd = ibfd->link_next, bfd_idx++)
3153 {
3154 if (psym_arr[bfd_idx] == NULL)
3155 continue;
3156
3157 free (psym_arr[bfd_idx]);
3158 free (sec_arr[bfd_idx]);
3159 }
3160
3161 free (psym_arr);
3162 free (sec_arr);
3163
3164 return TRUE;
3165 }
3166
3167 /* Iterate over all function_info we have collected, calling DOIT on
3168 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3169 if ROOT_ONLY. */
3170
3171 static bfd_boolean
3172 for_each_node (bfd_boolean (*doit) (struct function_info *,
3173 struct bfd_link_info *,
3174 void *),
3175 struct bfd_link_info *info,
3176 void *param,
3177 int root_only)
3178 {
3179 bfd *ibfd;
3180
3181 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3182 {
3183 extern const bfd_target bfd_elf32_spu_vec;
3184 asection *sec;
3185
3186 if (ibfd->xvec != &bfd_elf32_spu_vec)
3187 continue;
3188
3189 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3190 {
3191 struct _spu_elf_section_data *sec_data;
3192 struct spu_elf_stack_info *sinfo;
3193
3194 if ((sec_data = spu_elf_section_data (sec)) != NULL
3195 && (sinfo = sec_data->u.i.stack_info) != NULL)
3196 {
3197 int i;
3198 for (i = 0; i < sinfo->num_fun; ++i)
3199 if (!root_only || !sinfo->fun[i].non_root)
3200 if (!doit (&sinfo->fun[i], info, param))
3201 return FALSE;
3202 }
3203 }
3204 }
3205 return TRUE;
3206 }
3207
3208 /* Transfer call info attached to struct function_info entries for
3209 all of a given function's sections to the first entry. */
3210
3211 static bfd_boolean
3212 transfer_calls (struct function_info *fun,
3213 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3214 void *param ATTRIBUTE_UNUSED)
3215 {
3216 struct function_info *start = fun->start;
3217
3218 if (start != NULL)
3219 {
3220 struct call_info *call, *call_next;
3221
3222 while (start->start != NULL)
3223 start = start->start;
3224 for (call = fun->call_list; call != NULL; call = call_next)
3225 {
3226 call_next = call->next;
3227 if (!insert_callee (start, call))
3228 free (call);
3229 }
3230 fun->call_list = NULL;
3231 }
3232 return TRUE;
3233 }
3234
3235 /* Mark nodes in the call graph that are called by some other node. */
3236
3237 static bfd_boolean
3238 mark_non_root (struct function_info *fun,
3239 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3240 void *param ATTRIBUTE_UNUSED)
3241 {
3242 struct call_info *call;
3243
3244 if (fun->visit1)
3245 return TRUE;
3246 fun->visit1 = TRUE;
3247 for (call = fun->call_list; call; call = call->next)
3248 {
3249 call->fun->non_root = TRUE;
3250 mark_non_root (call->fun, 0, 0);
3251 }
3252 return TRUE;
3253 }
3254
3255 /* Remove cycles from the call graph. Set depth of nodes. */
3256
3257 static bfd_boolean
3258 remove_cycles (struct function_info *fun,
3259 struct bfd_link_info *info,
3260 void *param)
3261 {
3262 struct call_info **callp, *call;
3263 unsigned int depth = *(unsigned int *) param;
3264 unsigned int max_depth = depth;
3265
3266 fun->depth = depth;
3267 fun->visit2 = TRUE;
3268 fun->marking = TRUE;
3269
3270 callp = &fun->call_list;
3271 while ((call = *callp) != NULL)
3272 {
3273 call->max_depth = depth + !call->is_pasted;
3274 if (!call->fun->visit2)
3275 {
3276 if (!remove_cycles (call->fun, info, &call->max_depth))
3277 return FALSE;
3278 if (max_depth < call->max_depth)
3279 max_depth = call->max_depth;
3280 }
3281 else if (call->fun->marking)
3282 {
3283 struct spu_link_hash_table *htab = spu_hash_table (info);
3284
3285 if (!htab->params->auto_overlay
3286 && htab->params->stack_analysis)
3287 {
3288 const char *f1 = func_name (fun);
3289 const char *f2 = func_name (call->fun);
3290
3291 info->callbacks->info (_("Stack analysis will ignore the call "
3292 "from %s to %s\n"),
3293 f1, f2);
3294 }
3295
3296 call->broken_cycle = TRUE;
3297 }
3298 callp = &call->next;
3299 }
3300 fun->marking = FALSE;
3301 *(unsigned int *) param = max_depth;
3302 return TRUE;
3303 }
3304
3305 /* Check that we actually visited all nodes in remove_cycles. If we
3306 didn't, then there is some cycle in the call graph not attached to
3307 any root node. Arbitrarily choose a node in the cycle as a new
3308 root and break the cycle. */
3309
3310 static bfd_boolean
3311 mark_detached_root (struct function_info *fun,
3312 struct bfd_link_info *info,
3313 void *param)
3314 {
3315 if (fun->visit2)
3316 return TRUE;
3317 fun->non_root = FALSE;
3318 *(unsigned int *) param = 0;
3319 return remove_cycles (fun, info, param);
3320 }
3321
3322 /* Populate call_list for each function. */
3323
3324 static bfd_boolean
3325 build_call_tree (struct bfd_link_info *info)
3326 {
3327 bfd *ibfd;
3328 unsigned int depth;
3329
3330 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3331 {
3332 extern const bfd_target bfd_elf32_spu_vec;
3333 asection *sec;
3334
3335 if (ibfd->xvec != &bfd_elf32_spu_vec)
3336 continue;
3337
3338 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3339 if (!mark_functions_via_relocs (sec, info, TRUE))
3340 return FALSE;
3341 }
3342
3343 /* Transfer call info from hot/cold section part of function
3344 to main entry. */
3345 if (!spu_hash_table (info)->params->auto_overlay
3346 && !for_each_node (transfer_calls, info, 0, FALSE))
3347 return FALSE;
3348
3349 /* Find the call graph root(s). */
3350 if (!for_each_node (mark_non_root, info, 0, FALSE))
3351 return FALSE;
3352
3353 /* Remove cycles from the call graph. We start from the root node(s)
3354 so that we break cycles in a reasonable place. */
3355 depth = 0;
3356 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3357 return FALSE;
3358
3359 return for_each_node (mark_detached_root, info, &depth, FALSE);
3360 }
3361
3362 /* qsort predicate to sort calls by priority, max_depth then count. */
3363
3364 static int
3365 sort_calls (const void *a, const void *b)
3366 {
3367 struct call_info *const *c1 = a;
3368 struct call_info *const *c2 = b;
3369 int delta;
3370
3371 delta = (*c2)->priority - (*c1)->priority;
3372 if (delta != 0)
3373 return delta;
3374
3375 delta = (*c2)->max_depth - (*c1)->max_depth;
3376 if (delta != 0)
3377 return delta;
3378
3379 delta = (*c2)->count - (*c1)->count;
3380 if (delta != 0)
3381 return delta;
3382
3383 return (char *) c1 - (char *) c2;
3384 }
3385
3386 struct _mos_param {
3387 unsigned int max_overlay_size;
3388 };
3389
3390 /* Set linker_mark and gc_mark on any sections that we will put in
3391 overlays. These flags are used by the generic ELF linker, but we
3392 won't be continuing on to bfd_elf_final_link so it is OK to use
3393 them. linker_mark is clear before we get here. Set segment_mark
3394 on sections that are part of a pasted function (excluding the last
3395 section).
3396
3397 Set up function rodata section if --overlay-rodata. We don't
3398 currently include merged string constant rodata sections since
3399
3400 Sort the call graph so that the deepest nodes will be visited
3401 first. */
3402
3403 static bfd_boolean
3404 mark_overlay_section (struct function_info *fun,
3405 struct bfd_link_info *info,
3406 void *param)
3407 {
3408 struct call_info *call;
3409 unsigned int count;
3410 struct _mos_param *mos_param = param;
3411 struct spu_link_hash_table *htab = spu_hash_table (info);
3412
3413 if (fun->visit4)
3414 return TRUE;
3415
3416 fun->visit4 = TRUE;
3417 if (!fun->sec->linker_mark
3418 && (htab->params->ovly_flavour != ovly_soft_icache
3419 || htab->params->non_ia_text
3420 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3421 || strcmp (fun->sec->name, ".init") == 0
3422 || strcmp (fun->sec->name, ".fini") == 0))
3423 {
3424 unsigned int size;
3425
3426 fun->sec->linker_mark = 1;
3427 fun->sec->gc_mark = 1;
3428 fun->sec->segment_mark = 0;
3429 /* Ensure SEC_CODE is set on this text section (it ought to
3430 be!), and SEC_CODE is clear on rodata sections. We use
3431 this flag to differentiate the two overlay section types. */
3432 fun->sec->flags |= SEC_CODE;
3433
3434 size = fun->sec->size;
3435 if (htab->params->auto_overlay & OVERLAY_RODATA)
3436 {
3437 char *name = NULL;
3438
3439 /* Find the rodata section corresponding to this function's
3440 text section. */
3441 if (strcmp (fun->sec->name, ".text") == 0)
3442 {
3443 name = bfd_malloc (sizeof (".rodata"));
3444 if (name == NULL)
3445 return FALSE;
3446 memcpy (name, ".rodata", sizeof (".rodata"));
3447 }
3448 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3449 {
3450 size_t len = strlen (fun->sec->name);
3451 name = bfd_malloc (len + 3);
3452 if (name == NULL)
3453 return FALSE;
3454 memcpy (name, ".rodata", sizeof (".rodata"));
3455 memcpy (name + 7, fun->sec->name + 5, len - 4);
3456 }
3457 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3458 {
3459 size_t len = strlen (fun->sec->name) + 1;
3460 name = bfd_malloc (len);
3461 if (name == NULL)
3462 return FALSE;
3463 memcpy (name, fun->sec->name, len);
3464 name[14] = 'r';
3465 }
3466
3467 if (name != NULL)
3468 {
3469 asection *rodata = NULL;
3470 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3471 if (group_sec == NULL)
3472 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3473 else
3474 while (group_sec != NULL && group_sec != fun->sec)
3475 {
3476 if (strcmp (group_sec->name, name) == 0)
3477 {
3478 rodata = group_sec;
3479 break;
3480 }
3481 group_sec = elf_section_data (group_sec)->next_in_group;
3482 }
3483 fun->rodata = rodata;
3484 if (fun->rodata)
3485 {
3486 size += fun->rodata->size;
3487 if (htab->params->line_size != 0
3488 && size > htab->params->line_size)
3489 {
3490 size -= fun->rodata->size;
3491 fun->rodata = NULL;
3492 }
3493 else
3494 {
3495 fun->rodata->linker_mark = 1;
3496 fun->rodata->gc_mark = 1;
3497 fun->rodata->flags &= ~SEC_CODE;
3498 }
3499 }
3500 free (name);
3501 }
3502 }
3503 if (mos_param->max_overlay_size < size)
3504 mos_param->max_overlay_size = size;
3505 }
3506
3507 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3508 count += 1;
3509
3510 if (count > 1)
3511 {
3512 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3513 if (calls == NULL)
3514 return FALSE;
3515
3516 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3517 calls[count++] = call;
3518
3519 qsort (calls, count, sizeof (*calls), sort_calls);
3520
3521 fun->call_list = NULL;
3522 while (count != 0)
3523 {
3524 --count;
3525 calls[count]->next = fun->call_list;
3526 fun->call_list = calls[count];
3527 }
3528 free (calls);
3529 }
3530
3531 for (call = fun->call_list; call != NULL; call = call->next)
3532 {
3533 if (call->is_pasted)
3534 {
3535 /* There can only be one is_pasted call per function_info. */
3536 BFD_ASSERT (!fun->sec->segment_mark);
3537 fun->sec->segment_mark = 1;
3538 }
3539 if (!call->broken_cycle
3540 && !mark_overlay_section (call->fun, info, param))
3541 return FALSE;
3542 }
3543
3544 /* Don't put entry code into an overlay. The overlay manager needs
3545 a stack! Also, don't mark .ovl.init as an overlay. */
3546 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3547 == info->output_bfd->start_address
3548 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3549 {
3550 fun->sec->linker_mark = 0;
3551 if (fun->rodata != NULL)
3552 fun->rodata->linker_mark = 0;
3553 }
3554 return TRUE;
3555 }
3556
3557 /* If non-zero then unmark functions called from those within sections
3558 that we need to unmark. Unfortunately this isn't reliable since the
3559 call graph cannot know the destination of function pointer calls. */
3560 #define RECURSE_UNMARK 0
3561
3562 struct _uos_param {
3563 asection *exclude_input_section;
3564 asection *exclude_output_section;
3565 unsigned long clearing;
3566 };
3567
3568 /* Undo some of mark_overlay_section's work. */
3569
3570 static bfd_boolean
3571 unmark_overlay_section (struct function_info *fun,
3572 struct bfd_link_info *info,
3573 void *param)
3574 {
3575 struct call_info *call;
3576 struct _uos_param *uos_param = param;
3577 unsigned int excluded = 0;
3578
3579 if (fun->visit5)
3580 return TRUE;
3581
3582 fun->visit5 = TRUE;
3583
3584 excluded = 0;
3585 if (fun->sec == uos_param->exclude_input_section
3586 || fun->sec->output_section == uos_param->exclude_output_section)
3587 excluded = 1;
3588
3589 if (RECURSE_UNMARK)
3590 uos_param->clearing += excluded;
3591
3592 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3593 {
3594 fun->sec->linker_mark = 0;
3595 if (fun->rodata)
3596 fun->rodata->linker_mark = 0;
3597 }
3598
3599 for (call = fun->call_list; call != NULL; call = call->next)
3600 if (!call->broken_cycle
3601 && !unmark_overlay_section (call->fun, info, param))
3602 return FALSE;
3603
3604 if (RECURSE_UNMARK)
3605 uos_param->clearing -= excluded;
3606 return TRUE;
3607 }
3608
3609 struct _cl_param {
3610 unsigned int lib_size;
3611 asection **lib_sections;
3612 };
3613
3614 /* Add sections we have marked as belonging to overlays to an array
3615 for consideration as non-overlay sections. The array consist of
3616 pairs of sections, (text,rodata), for functions in the call graph. */
3617
3618 static bfd_boolean
3619 collect_lib_sections (struct function_info *fun,
3620 struct bfd_link_info *info,
3621 void *param)
3622 {
3623 struct _cl_param *lib_param = param;
3624 struct call_info *call;
3625 unsigned int size;
3626
3627 if (fun->visit6)
3628 return TRUE;
3629
3630 fun->visit6 = TRUE;
3631 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3632 return TRUE;
3633
3634 size = fun->sec->size;
3635 if (fun->rodata)
3636 size += fun->rodata->size;
3637
3638 if (size <= lib_param->lib_size)
3639 {
3640 *lib_param->lib_sections++ = fun->sec;
3641 fun->sec->gc_mark = 0;
3642 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3643 {
3644 *lib_param->lib_sections++ = fun->rodata;
3645 fun->rodata->gc_mark = 0;
3646 }
3647 else
3648 *lib_param->lib_sections++ = NULL;
3649 }
3650
3651 for (call = fun->call_list; call != NULL; call = call->next)
3652 if (!call->broken_cycle)
3653 collect_lib_sections (call->fun, info, param);
3654
3655 return TRUE;
3656 }
3657
3658 /* qsort predicate to sort sections by call count. */
3659
3660 static int
3661 sort_lib (const void *a, const void *b)
3662 {
3663 asection *const *s1 = a;
3664 asection *const *s2 = b;
3665 struct _spu_elf_section_data *sec_data;
3666 struct spu_elf_stack_info *sinfo;
3667 int delta;
3668
3669 delta = 0;
3670 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3671 && (sinfo = sec_data->u.i.stack_info) != NULL)
3672 {
3673 int i;
3674 for (i = 0; i < sinfo->num_fun; ++i)
3675 delta -= sinfo->fun[i].call_count;
3676 }
3677
3678 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3679 && (sinfo = sec_data->u.i.stack_info) != NULL)
3680 {
3681 int i;
3682 for (i = 0; i < sinfo->num_fun; ++i)
3683 delta += sinfo->fun[i].call_count;
3684 }
3685
3686 if (delta != 0)
3687 return delta;
3688
3689 return s1 - s2;
3690 }
3691
3692 /* Remove some sections from those marked to be in overlays. Choose
3693 those that are called from many places, likely library functions. */
3694
3695 static unsigned int
3696 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3697 {
3698 bfd *ibfd;
3699 asection **lib_sections;
3700 unsigned int i, lib_count;
3701 struct _cl_param collect_lib_param;
3702 struct function_info dummy_caller;
3703 struct spu_link_hash_table *htab;
3704
3705 memset (&dummy_caller, 0, sizeof (dummy_caller));
3706 lib_count = 0;
3707 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3708 {
3709 extern const bfd_target bfd_elf32_spu_vec;
3710 asection *sec;
3711
3712 if (ibfd->xvec != &bfd_elf32_spu_vec)
3713 continue;
3714
3715 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3716 if (sec->linker_mark
3717 && sec->size < lib_size
3718 && (sec->flags & SEC_CODE) != 0)
3719 lib_count += 1;
3720 }
3721 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3722 if (lib_sections == NULL)
3723 return (unsigned int) -1;
3724 collect_lib_param.lib_size = lib_size;
3725 collect_lib_param.lib_sections = lib_sections;
3726 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3727 TRUE))
3728 return (unsigned int) -1;
3729 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3730
3731 /* Sort sections so that those with the most calls are first. */
3732 if (lib_count > 1)
3733 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3734
3735 htab = spu_hash_table (info);
3736 for (i = 0; i < lib_count; i++)
3737 {
3738 unsigned int tmp, stub_size;
3739 asection *sec;
3740 struct _spu_elf_section_data *sec_data;
3741 struct spu_elf_stack_info *sinfo;
3742
3743 sec = lib_sections[2 * i];
3744 /* If this section is OK, its size must be less than lib_size. */
3745 tmp = sec->size;
3746 /* If it has a rodata section, then add that too. */
3747 if (lib_sections[2 * i + 1])
3748 tmp += lib_sections[2 * i + 1]->size;
3749 /* Add any new overlay call stubs needed by the section. */
3750 stub_size = 0;
3751 if (tmp < lib_size
3752 && (sec_data = spu_elf_section_data (sec)) != NULL
3753 && (sinfo = sec_data->u.i.stack_info) != NULL)
3754 {
3755 int k;
3756 struct call_info *call;
3757
3758 for (k = 0; k < sinfo->num_fun; ++k)
3759 for (call = sinfo->fun[k].call_list; call; call = call->next)
3760 if (call->fun->sec->linker_mark)
3761 {
3762 struct call_info *p;
3763 for (p = dummy_caller.call_list; p; p = p->next)
3764 if (p->fun == call->fun)
3765 break;
3766 if (!p)
3767 stub_size += ovl_stub_size (htab->params);
3768 }
3769 }
3770 if (tmp + stub_size < lib_size)
3771 {
3772 struct call_info **pp, *p;
3773
3774 /* This section fits. Mark it as non-overlay. */
3775 lib_sections[2 * i]->linker_mark = 0;
3776 if (lib_sections[2 * i + 1])
3777 lib_sections[2 * i + 1]->linker_mark = 0;
3778 lib_size -= tmp + stub_size;
3779 /* Call stubs to the section we just added are no longer
3780 needed. */
3781 pp = &dummy_caller.call_list;
3782 while ((p = *pp) != NULL)
3783 if (!p->fun->sec->linker_mark)
3784 {
3785 lib_size += ovl_stub_size (htab->params);
3786 *pp = p->next;
3787 free (p);
3788 }
3789 else
3790 pp = &p->next;
3791 /* Add new call stubs to dummy_caller. */
3792 if ((sec_data = spu_elf_section_data (sec)) != NULL
3793 && (sinfo = sec_data->u.i.stack_info) != NULL)
3794 {
3795 int k;
3796 struct call_info *call;
3797
3798 for (k = 0; k < sinfo->num_fun; ++k)
3799 for (call = sinfo->fun[k].call_list;
3800 call;
3801 call = call->next)
3802 if (call->fun->sec->linker_mark)
3803 {
3804 struct call_info *callee;
3805 callee = bfd_malloc (sizeof (*callee));
3806 if (callee == NULL)
3807 return (unsigned int) -1;
3808 *callee = *call;
3809 if (!insert_callee (&dummy_caller, callee))
3810 free (callee);
3811 }
3812 }
3813 }
3814 }
3815 while (dummy_caller.call_list != NULL)
3816 {
3817 struct call_info *call = dummy_caller.call_list;
3818 dummy_caller.call_list = call->next;
3819 free (call);
3820 }
3821 for (i = 0; i < 2 * lib_count; i++)
3822 if (lib_sections[i])
3823 lib_sections[i]->gc_mark = 1;
3824 free (lib_sections);
3825 return lib_size;
3826 }
3827
3828 /* Build an array of overlay sections. The deepest node's section is
3829 added first, then its parent node's section, then everything called
3830 from the parent section. The idea being to group sections to
3831 minimise calls between different overlays. */
3832
3833 static bfd_boolean
3834 collect_overlays (struct function_info *fun,
3835 struct bfd_link_info *info,
3836 void *param)
3837 {
3838 struct call_info *call;
3839 bfd_boolean added_fun;
3840 asection ***ovly_sections = param;
3841
3842 if (fun->visit7)
3843 return TRUE;
3844
3845 fun->visit7 = TRUE;
3846 for (call = fun->call_list; call != NULL; call = call->next)
3847 if (!call->is_pasted && !call->broken_cycle)
3848 {
3849 if (!collect_overlays (call->fun, info, ovly_sections))
3850 return FALSE;
3851 break;
3852 }
3853
3854 added_fun = FALSE;
3855 if (fun->sec->linker_mark && fun->sec->gc_mark)
3856 {
3857 fun->sec->gc_mark = 0;
3858 *(*ovly_sections)++ = fun->sec;
3859 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3860 {
3861 fun->rodata->gc_mark = 0;
3862 *(*ovly_sections)++ = fun->rodata;
3863 }
3864 else
3865 *(*ovly_sections)++ = NULL;
3866 added_fun = TRUE;
3867
3868 /* Pasted sections must stay with the first section. We don't
3869 put pasted sections in the array, just the first section.
3870 Mark subsequent sections as already considered. */
3871 if (fun->sec->segment_mark)
3872 {
3873 struct function_info *call_fun = fun;
3874 do
3875 {
3876 for (call = call_fun->call_list; call != NULL; call = call->next)
3877 if (call->is_pasted)
3878 {
3879 call_fun = call->fun;
3880 call_fun->sec->gc_mark = 0;
3881 if (call_fun->rodata)
3882 call_fun->rodata->gc_mark = 0;
3883 break;
3884 }
3885 if (call == NULL)
3886 abort ();
3887 }
3888 while (call_fun->sec->segment_mark);
3889 }
3890 }
3891
3892 for (call = fun->call_list; call != NULL; call = call->next)
3893 if (!call->broken_cycle
3894 && !collect_overlays (call->fun, info, ovly_sections))
3895 return FALSE;
3896
3897 if (added_fun)
3898 {
3899 struct _spu_elf_section_data *sec_data;
3900 struct spu_elf_stack_info *sinfo;
3901
3902 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3903 && (sinfo = sec_data->u.i.stack_info) != NULL)
3904 {
3905 int i;
3906 for (i = 0; i < sinfo->num_fun; ++i)
3907 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3908 return FALSE;
3909 }
3910 }
3911
3912 return TRUE;
3913 }
3914
3915 struct _sum_stack_param {
3916 size_t cum_stack;
3917 size_t overall_stack;
3918 bfd_boolean emit_stack_syms;
3919 };
3920
3921 /* Descend the call graph for FUN, accumulating total stack required. */
3922
3923 static bfd_boolean
3924 sum_stack (struct function_info *fun,
3925 struct bfd_link_info *info,
3926 void *param)
3927 {
3928 struct call_info *call;
3929 struct function_info *max;
3930 size_t stack, cum_stack;
3931 const char *f1;
3932 bfd_boolean has_call;
3933 struct _sum_stack_param *sum_stack_param = param;
3934 struct spu_link_hash_table *htab;
3935
3936 cum_stack = fun->stack;
3937 sum_stack_param->cum_stack = cum_stack;
3938 if (fun->visit3)
3939 return TRUE;
3940
3941 has_call = FALSE;
3942 max = NULL;
3943 for (call = fun->call_list; call; call = call->next)
3944 {
3945 if (call->broken_cycle)
3946 continue;
3947 if (!call->is_pasted)
3948 has_call = TRUE;
3949 if (!sum_stack (call->fun, info, sum_stack_param))
3950 return FALSE;
3951 stack = sum_stack_param->cum_stack;
3952 /* Include caller stack for normal calls, don't do so for
3953 tail calls. fun->stack here is local stack usage for
3954 this function. */
3955 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3956 stack += fun->stack;
3957 if (cum_stack < stack)
3958 {
3959 cum_stack = stack;
3960 max = call->fun;
3961 }
3962 }
3963
3964 sum_stack_param->cum_stack = cum_stack;
3965 stack = fun->stack;
3966 /* Now fun->stack holds cumulative stack. */
3967 fun->stack = cum_stack;
3968 fun->visit3 = TRUE;
3969
3970 if (!fun->non_root
3971 && sum_stack_param->overall_stack < cum_stack)
3972 sum_stack_param->overall_stack = cum_stack;
3973
3974 htab = spu_hash_table (info);
3975 if (htab->params->auto_overlay)
3976 return TRUE;
3977
3978 f1 = func_name (fun);
3979 if (htab->params->stack_analysis)
3980 {
3981 if (!fun->non_root)
3982 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3983 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3984 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3985
3986 if (has_call)
3987 {
3988 info->callbacks->minfo (_(" calls:\n"));
3989 for (call = fun->call_list; call; call = call->next)
3990 if (!call->is_pasted && !call->broken_cycle)
3991 {
3992 const char *f2 = func_name (call->fun);
3993 const char *ann1 = call->fun == max ? "*" : " ";
3994 const char *ann2 = call->is_tail ? "t" : " ";
3995
3996 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3997 }
3998 }
3999 }
4000
4001 if (sum_stack_param->emit_stack_syms)
4002 {
4003 char *name = bfd_malloc (18 + strlen (f1));
4004 struct elf_link_hash_entry *h;
4005
4006 if (name == NULL)
4007 return FALSE;
4008
4009 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4010 sprintf (name, "__stack_%s", f1);
4011 else
4012 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4013
4014 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4015 free (name);
4016 if (h != NULL
4017 && (h->root.type == bfd_link_hash_new
4018 || h->root.type == bfd_link_hash_undefined
4019 || h->root.type == bfd_link_hash_undefweak))
4020 {
4021 h->root.type = bfd_link_hash_defined;
4022 h->root.u.def.section = bfd_abs_section_ptr;
4023 h->root.u.def.value = cum_stack;
4024 h->size = 0;
4025 h->type = 0;
4026 h->ref_regular = 1;
4027 h->def_regular = 1;
4028 h->ref_regular_nonweak = 1;
4029 h->forced_local = 1;
4030 h->non_elf = 0;
4031 }
4032 }
4033
4034 return TRUE;
4035 }
4036
4037 /* SEC is part of a pasted function. Return the call_info for the
4038 next section of this function. */
4039
4040 static struct call_info *
4041 find_pasted_call (asection *sec)
4042 {
4043 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4044 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4045 struct call_info *call;
4046 int k;
4047
4048 for (k = 0; k < sinfo->num_fun; ++k)
4049 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4050 if (call->is_pasted)
4051 return call;
4052 abort ();
4053 return 0;
4054 }
4055
4056 /* qsort predicate to sort bfds by file name. */
4057
4058 static int
4059 sort_bfds (const void *a, const void *b)
4060 {
4061 bfd *const *abfd1 = a;
4062 bfd *const *abfd2 = b;
4063
4064 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4065 }
4066
4067 static unsigned int
4068 print_one_overlay_section (FILE *script,
4069 unsigned int base,
4070 unsigned int count,
4071 unsigned int ovlynum,
4072 unsigned int *ovly_map,
4073 asection **ovly_sections,
4074 struct bfd_link_info *info)
4075 {
4076 unsigned int j;
4077
4078 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4079 {
4080 asection *sec = ovly_sections[2 * j];
4081
4082 if (fprintf (script, " %s%c%s (%s)\n",
4083 (sec->owner->my_archive != NULL
4084 ? sec->owner->my_archive->filename : ""),
4085 info->path_separator,
4086 sec->owner->filename,
4087 sec->name) <= 0)
4088 return -1;
4089 if (sec->segment_mark)
4090 {
4091 struct call_info *call = find_pasted_call (sec);
4092 while (call != NULL)
4093 {
4094 struct function_info *call_fun = call->fun;
4095 sec = call_fun->sec;
4096 if (fprintf (script, " %s%c%s (%s)\n",
4097 (sec->owner->my_archive != NULL
4098 ? sec->owner->my_archive->filename : ""),
4099 info->path_separator,
4100 sec->owner->filename,
4101 sec->name) <= 0)
4102 return -1;
4103 for (call = call_fun->call_list; call; call = call->next)
4104 if (call->is_pasted)
4105 break;
4106 }
4107 }
4108 }
4109
4110 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4111 {
4112 asection *sec = ovly_sections[2 * j + 1];
4113 if (sec != NULL
4114 && fprintf (script, " %s%c%s (%s)\n",
4115 (sec->owner->my_archive != NULL
4116 ? sec->owner->my_archive->filename : ""),
4117 info->path_separator,
4118 sec->owner->filename,
4119 sec->name) <= 0)
4120 return -1;
4121
4122 sec = ovly_sections[2 * j];
4123 if (sec->segment_mark)
4124 {
4125 struct call_info *call = find_pasted_call (sec);
4126 while (call != NULL)
4127 {
4128 struct function_info *call_fun = call->fun;
4129 sec = call_fun->rodata;
4130 if (sec != NULL
4131 && fprintf (script, " %s%c%s (%s)\n",
4132 (sec->owner->my_archive != NULL
4133 ? sec->owner->my_archive->filename : ""),
4134 info->path_separator,
4135 sec->owner->filename,
4136 sec->name) <= 0)
4137 return -1;
4138 for (call = call_fun->call_list; call; call = call->next)
4139 if (call->is_pasted)
4140 break;
4141 }
4142 }
4143 }
4144
4145 return j;
4146 }
4147
4148 /* Handle --auto-overlay. */
4149
4150 static void
4151 spu_elf_auto_overlay (struct bfd_link_info *info)
4152 {
4153 bfd *ibfd;
4154 bfd **bfd_arr;
4155 struct elf_segment_map *m;
4156 unsigned int fixed_size, lo, hi;
4157 unsigned int reserved;
4158 struct spu_link_hash_table *htab;
4159 unsigned int base, i, count, bfd_count;
4160 unsigned int region, ovlynum;
4161 asection **ovly_sections, **ovly_p;
4162 unsigned int *ovly_map;
4163 FILE *script;
4164 unsigned int total_overlay_size, overlay_size;
4165 const char *ovly_mgr_entry;
4166 struct elf_link_hash_entry *h;
4167 struct _mos_param mos_param;
4168 struct _uos_param uos_param;
4169 struct function_info dummy_caller;
4170
4171 /* Find the extents of our loadable image. */
4172 lo = (unsigned int) -1;
4173 hi = 0;
4174 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4175 if (m->p_type == PT_LOAD)
4176 for (i = 0; i < m->count; i++)
4177 if (m->sections[i]->size != 0)
4178 {
4179 if (m->sections[i]->vma < lo)
4180 lo = m->sections[i]->vma;
4181 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4182 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4183 }
4184 fixed_size = hi + 1 - lo;
4185
4186 if (!discover_functions (info))
4187 goto err_exit;
4188
4189 if (!build_call_tree (info))
4190 goto err_exit;
4191
4192 htab = spu_hash_table (info);
4193 reserved = htab->params->auto_overlay_reserved;
4194 if (reserved == 0)
4195 {
4196 struct _sum_stack_param sum_stack_param;
4197
4198 sum_stack_param.emit_stack_syms = 0;
4199 sum_stack_param.overall_stack = 0;
4200 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4201 goto err_exit;
4202 reserved = (sum_stack_param.overall_stack
4203 + htab->params->extra_stack_space);
4204 }
4205
4206 /* No need for overlays if everything already fits. */
4207 if (fixed_size + reserved <= htab->local_store
4208 && htab->params->ovly_flavour != ovly_soft_icache)
4209 {
4210 htab->params->auto_overlay = 0;
4211 return;
4212 }
4213
4214 uos_param.exclude_input_section = 0;
4215 uos_param.exclude_output_section
4216 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4217
4218 ovly_mgr_entry = "__ovly_load";
4219 if (htab->params->ovly_flavour == ovly_soft_icache)
4220 ovly_mgr_entry = "__icache_br_handler";
4221 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4222 FALSE, FALSE, FALSE);
4223 if (h != NULL
4224 && (h->root.type == bfd_link_hash_defined
4225 || h->root.type == bfd_link_hash_defweak)
4226 && h->def_regular)
4227 {
4228 /* We have a user supplied overlay manager. */
4229 uos_param.exclude_input_section = h->root.u.def.section;
4230 }
4231 else
4232 {
4233 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4234 builtin version to .text, and will adjust .text size. */
4235 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4236 }
4237
4238 /* Mark overlay sections, and find max overlay section size. */
4239 mos_param.max_overlay_size = 0;
4240 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4241 goto err_exit;
4242
4243 /* We can't put the overlay manager or interrupt routines in
4244 overlays. */
4245 uos_param.clearing = 0;
4246 if ((uos_param.exclude_input_section
4247 || uos_param.exclude_output_section)
4248 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4249 goto err_exit;
4250
4251 bfd_count = 0;
4252 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4253 ++bfd_count;
4254 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4255 if (bfd_arr == NULL)
4256 goto err_exit;
4257
4258 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4259 count = 0;
4260 bfd_count = 0;
4261 total_overlay_size = 0;
4262 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4263 {
4264 extern const bfd_target bfd_elf32_spu_vec;
4265 asection *sec;
4266 unsigned int old_count;
4267
4268 if (ibfd->xvec != &bfd_elf32_spu_vec)
4269 continue;
4270
4271 old_count = count;
4272 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4273 if (sec->linker_mark)
4274 {
4275 if ((sec->flags & SEC_CODE) != 0)
4276 count += 1;
4277 fixed_size -= sec->size;
4278 total_overlay_size += sec->size;
4279 }
4280 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4281 && sec->output_section->owner == info->output_bfd
4282 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4283 fixed_size -= sec->size;
4284 if (count != old_count)
4285 bfd_arr[bfd_count++] = ibfd;
4286 }
4287
4288 /* Since the overlay link script selects sections by file name and
4289 section name, ensure that file names are unique. */
4290 if (bfd_count > 1)
4291 {
4292 bfd_boolean ok = TRUE;
4293
4294 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4295 for (i = 1; i < bfd_count; ++i)
4296 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4297 {
4298 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4299 {
4300 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4301 info->callbacks->einfo (_("%s duplicated in %s\n"),
4302 bfd_arr[i]->filename,
4303 bfd_arr[i]->my_archive->filename);
4304 else
4305 info->callbacks->einfo (_("%s duplicated\n"),
4306 bfd_arr[i]->filename);
4307 ok = FALSE;
4308 }
4309 }
4310 if (!ok)
4311 {
4312 info->callbacks->einfo (_("sorry, no support for duplicate "
4313 "object files in auto-overlay script\n"));
4314 bfd_set_error (bfd_error_bad_value);
4315 goto err_exit;
4316 }
4317 }
4318 free (bfd_arr);
4319
4320 fixed_size += reserved;
4321 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4322 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4323 {
4324 if (htab->params->ovly_flavour == ovly_soft_icache)
4325 {
4326 /* Stubs in the non-icache area are bigger. */
4327 fixed_size += htab->non_ovly_stub * 16;
4328 /* Space for icache manager tables.
4329 a) Tag array, one quadword per cache line.
4330 - word 0: ia address of present line, init to zero. */
4331 fixed_size += 16 << htab->num_lines_log2;
4332 /* b) Rewrite "to" list, one quadword per cache line. */
4333 fixed_size += 16 << htab->num_lines_log2;
4334 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4335 to a power-of-two number of full quadwords) per cache line. */
4336 fixed_size += 16 << (htab->fromelem_size_log2
4337 + htab->num_lines_log2);
4338 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4339 fixed_size += 16;
4340 }
4341 else
4342 {
4343 /* Guess number of overlays. Assuming overlay buffer is on
4344 average only half full should be conservative. */
4345 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4346 / (htab->local_store - fixed_size));
4347 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4348 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4349 }
4350 }
4351
4352 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4353 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4354 "size of 0x%v exceeds local store\n"),
4355 (bfd_vma) fixed_size,
4356 (bfd_vma) mos_param.max_overlay_size);
4357
4358 /* Now see if we should put some functions in the non-overlay area. */
4359 else if (fixed_size < htab->params->auto_overlay_fixed)
4360 {
4361 unsigned int max_fixed, lib_size;
4362
4363 max_fixed = htab->local_store - mos_param.max_overlay_size;
4364 if (max_fixed > htab->params->auto_overlay_fixed)
4365 max_fixed = htab->params->auto_overlay_fixed;
4366 lib_size = max_fixed - fixed_size;
4367 lib_size = auto_ovl_lib_functions (info, lib_size);
4368 if (lib_size == (unsigned int) -1)
4369 goto err_exit;
4370 fixed_size = max_fixed - lib_size;
4371 }
4372
4373 /* Build an array of sections, suitably sorted to place into
4374 overlays. */
4375 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4376 if (ovly_sections == NULL)
4377 goto err_exit;
4378 ovly_p = ovly_sections;
4379 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4380 goto err_exit;
4381 count = (size_t) (ovly_p - ovly_sections) / 2;
4382 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4383 if (ovly_map == NULL)
4384 goto err_exit;
4385
4386 memset (&dummy_caller, 0, sizeof (dummy_caller));
4387 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4388 if (htab->params->line_size != 0)
4389 overlay_size = htab->params->line_size;
4390 base = 0;
4391 ovlynum = 0;
4392 while (base < count)
4393 {
4394 unsigned int size = 0, rosize = 0, roalign = 0;
4395
4396 for (i = base; i < count; i++)
4397 {
4398 asection *sec, *rosec;
4399 unsigned int tmp, rotmp;
4400 unsigned int num_stubs;
4401 struct call_info *call, *pasty;
4402 struct _spu_elf_section_data *sec_data;
4403 struct spu_elf_stack_info *sinfo;
4404 int k;
4405
4406 /* See whether we can add this section to the current
4407 overlay without overflowing our overlay buffer. */
4408 sec = ovly_sections[2 * i];
4409 tmp = align_power (size, sec->alignment_power) + sec->size;
4410 rotmp = rosize;
4411 rosec = ovly_sections[2 * i + 1];
4412 if (rosec != NULL)
4413 {
4414 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4415 if (roalign < rosec->alignment_power)
4416 roalign = rosec->alignment_power;
4417 }
4418 if (align_power (tmp, roalign) + rotmp > overlay_size)
4419 break;
4420 if (sec->segment_mark)
4421 {
4422 /* Pasted sections must stay together, so add their
4423 sizes too. */
4424 struct call_info *pasty = find_pasted_call (sec);
4425 while (pasty != NULL)
4426 {
4427 struct function_info *call_fun = pasty->fun;
4428 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4429 + call_fun->sec->size);
4430 if (call_fun->rodata)
4431 {
4432 rotmp = (align_power (rotmp,
4433 call_fun->rodata->alignment_power)
4434 + call_fun->rodata->size);
4435 if (roalign < rosec->alignment_power)
4436 roalign = rosec->alignment_power;
4437 }
4438 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4439 if (pasty->is_pasted)
4440 break;
4441 }
4442 }
4443 if (align_power (tmp, roalign) + rotmp > overlay_size)
4444 break;
4445
4446 /* If we add this section, we might need new overlay call
4447 stubs. Add any overlay section calls to dummy_call. */
4448 pasty = NULL;
4449 sec_data = spu_elf_section_data (sec);
4450 sinfo = sec_data->u.i.stack_info;
4451 for (k = 0; k < sinfo->num_fun; ++k)
4452 for (call = sinfo->fun[k].call_list; call; call = call->next)
4453 if (call->is_pasted)
4454 {
4455 BFD_ASSERT (pasty == NULL);
4456 pasty = call;
4457 }
4458 else if (call->fun->sec->linker_mark)
4459 {
4460 if (!copy_callee (&dummy_caller, call))
4461 goto err_exit;
4462 }
4463 while (pasty != NULL)
4464 {
4465 struct function_info *call_fun = pasty->fun;
4466 pasty = NULL;
4467 for (call = call_fun->call_list; call; call = call->next)
4468 if (call->is_pasted)
4469 {
4470 BFD_ASSERT (pasty == NULL);
4471 pasty = call;
4472 }
4473 else if (!copy_callee (&dummy_caller, call))
4474 goto err_exit;
4475 }
4476
4477 /* Calculate call stub size. */
4478 num_stubs = 0;
4479 for (call = dummy_caller.call_list; call; call = call->next)
4480 {
4481 unsigned int k;
4482 unsigned int stub_delta = 1;
4483
4484 if (htab->params->ovly_flavour == ovly_soft_icache)
4485 stub_delta = call->count;
4486 num_stubs += stub_delta;
4487
4488 /* If the call is within this overlay, we won't need a
4489 stub. */
4490 for (k = base; k < i + 1; k++)
4491 if (call->fun->sec == ovly_sections[2 * k])
4492 {
4493 num_stubs -= stub_delta;
4494 break;
4495 }
4496 }
4497 if (htab->params->ovly_flavour == ovly_soft_icache
4498 && num_stubs > htab->params->max_branch)
4499 break;
4500 if (align_power (tmp, roalign) + rotmp
4501 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4502 break;
4503 size = tmp;
4504 rosize = rotmp;
4505 }
4506
4507 if (i == base)
4508 {
4509 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4510 ovly_sections[2 * i]->owner,
4511 ovly_sections[2 * i],
4512 ovly_sections[2 * i + 1] ? " + rodata" : "");
4513 bfd_set_error (bfd_error_bad_value);
4514 goto err_exit;
4515 }
4516
4517 while (dummy_caller.call_list != NULL)
4518 {
4519 struct call_info *call = dummy_caller.call_list;
4520 dummy_caller.call_list = call->next;
4521 free (call);
4522 }
4523
4524 ++ovlynum;
4525 while (base < i)
4526 ovly_map[base++] = ovlynum;
4527 }
4528
4529 script = htab->params->spu_elf_open_overlay_script ();
4530
4531 if (htab->params->ovly_flavour == ovly_soft_icache)
4532 {
4533 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4534 goto file_err;
4535
4536 if (fprintf (script,
4537 " . = ALIGN (%u);\n"
4538 " .ovl.init : { *(.ovl.init) }\n"
4539 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4540 htab->params->line_size) <= 0)
4541 goto file_err;
4542
4543 base = 0;
4544 ovlynum = 1;
4545 while (base < count)
4546 {
4547 unsigned int indx = ovlynum - 1;
4548 unsigned int vma, lma;
4549
4550 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4551 lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4552
4553 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4554 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4555 ovlynum, vma, lma) <= 0)
4556 goto file_err;
4557
4558 base = print_one_overlay_section (script, base, count, ovlynum,
4559 ovly_map, ovly_sections, info);
4560 if (base == (unsigned) -1)
4561 goto file_err;
4562
4563 if (fprintf (script, " }\n") <= 0)
4564 goto file_err;
4565
4566 ovlynum++;
4567 }
4568
4569 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4570 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4571 goto file_err;
4572
4573 if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4574 goto file_err;
4575 }
4576 else
4577 {
4578 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4579 goto file_err;
4580
4581 if (fprintf (script,
4582 " . = ALIGN (16);\n"
4583 " .ovl.init : { *(.ovl.init) }\n"
4584 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4585 goto file_err;
4586
4587 for (region = 1; region <= htab->params->num_lines; region++)
4588 {
4589 ovlynum = region;
4590 base = 0;
4591 while (base < count && ovly_map[base] < ovlynum)
4592 base++;
4593
4594 if (base == count)
4595 break;
4596
4597 if (region == 1)
4598 {
4599 /* We need to set lma since we are overlaying .ovl.init. */
4600 if (fprintf (script,
4601 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4602 goto file_err;
4603 }
4604 else
4605 {
4606 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4607 goto file_err;
4608 }
4609
4610 while (base < count)
4611 {
4612 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4613 goto file_err;
4614
4615 base = print_one_overlay_section (script, base, count, ovlynum,
4616 ovly_map, ovly_sections, info);
4617 if (base == (unsigned) -1)
4618 goto file_err;
4619
4620 if (fprintf (script, " }\n") <= 0)
4621 goto file_err;
4622
4623 ovlynum += htab->params->num_lines;
4624 while (base < count && ovly_map[base] < ovlynum)
4625 base++;
4626 }
4627
4628 if (fprintf (script, " }\n") <= 0)
4629 goto file_err;
4630 }
4631
4632 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4633 goto file_err;
4634 }
4635
4636 free (ovly_map);
4637 free (ovly_sections);
4638
4639 if (fclose (script) != 0)
4640 goto file_err;
4641
4642 if (htab->params->auto_overlay & AUTO_RELINK)
4643 (*htab->params->spu_elf_relink) ();
4644
4645 xexit (0);
4646
4647 file_err:
4648 bfd_set_error (bfd_error_system_call);
4649 err_exit:
4650 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4651 xexit (1);
4652 }
4653
4654 /* Provide an estimate of total stack required. */
4655
4656 static bfd_boolean
4657 spu_elf_stack_analysis (struct bfd_link_info *info)
4658 {
4659 struct spu_link_hash_table *htab;
4660 struct _sum_stack_param sum_stack_param;
4661
4662 if (!discover_functions (info))
4663 return FALSE;
4664
4665 if (!build_call_tree (info))
4666 return FALSE;
4667
4668 htab = spu_hash_table (info);
4669 if (htab->params->stack_analysis)
4670 {
4671 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4672 info->callbacks->minfo (_("\nStack size for functions. "
4673 "Annotations: '*' max stack, 't' tail call\n"));
4674 }
4675
4676 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4677 sum_stack_param.overall_stack = 0;
4678 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4679 return FALSE;
4680
4681 if (htab->params->stack_analysis)
4682 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4683 (bfd_vma) sum_stack_param.overall_stack);
4684 return TRUE;
4685 }
4686
4687 /* Perform a final link. */
4688
4689 static bfd_boolean
4690 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4691 {
4692 struct spu_link_hash_table *htab = spu_hash_table (info);
4693
4694 if (htab->params->auto_overlay)
4695 spu_elf_auto_overlay (info);
4696
4697 if ((htab->params->stack_analysis
4698 || (htab->params->ovly_flavour == ovly_soft_icache
4699 && htab->params->lrlive_analysis))
4700 && !spu_elf_stack_analysis (info))
4701 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4702
4703 if (!spu_elf_build_stubs (info))
4704 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4705
4706 return bfd_elf_final_link (output_bfd, info);
4707 }
4708
4709 /* Called when not normally emitting relocs, ie. !info->relocatable
4710 and !info->emitrelocations. Returns a count of special relocs
4711 that need to be emitted. */
4712
4713 static unsigned int
4714 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4715 {
4716 Elf_Internal_Rela *relocs;
4717 unsigned int count = 0;
4718
4719 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4720 info->keep_memory);
4721 if (relocs != NULL)
4722 {
4723 Elf_Internal_Rela *rel;
4724 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4725
4726 for (rel = relocs; rel < relend; rel++)
4727 {
4728 int r_type = ELF32_R_TYPE (rel->r_info);
4729 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4730 ++count;
4731 }
4732
4733 if (elf_section_data (sec)->relocs != relocs)
4734 free (relocs);
4735 }
4736
4737 return count;
4738 }
4739
4740 /* Functions for adding fixup records to .fixup */
4741
4742 #define FIXUP_RECORD_SIZE 4
4743
4744 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4745 bfd_put_32 (output_bfd, addr, \
4746 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4747 #define FIXUP_GET(output_bfd,htab,index) \
4748 bfd_get_32 (output_bfd, \
4749 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4750
4751 /* Store OFFSET in .fixup. This assumes it will be called with an
4752 increasing OFFSET. When this OFFSET fits with the last base offset,
4753 it just sets a bit, otherwise it adds a new fixup record. */
4754 static void
4755 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4756 bfd_vma offset)
4757 {
4758 struct spu_link_hash_table *htab = spu_hash_table (info);
4759 asection *sfixup = htab->sfixup;
4760 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4761 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4762 if (sfixup->reloc_count == 0)
4763 {
4764 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4765 sfixup->reloc_count++;
4766 }
4767 else
4768 {
4769 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4770 if (qaddr != (base & ~(bfd_vma) 15))
4771 {
4772 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4773 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4774 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4775 sfixup->reloc_count++;
4776 }
4777 else
4778 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4779 }
4780 }
4781
4782 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4783
4784 static int
4785 spu_elf_relocate_section (bfd *output_bfd,
4786 struct bfd_link_info *info,
4787 bfd *input_bfd,
4788 asection *input_section,
4789 bfd_byte *contents,
4790 Elf_Internal_Rela *relocs,
4791 Elf_Internal_Sym *local_syms,
4792 asection **local_sections)
4793 {
4794 Elf_Internal_Shdr *symtab_hdr;
4795 struct elf_link_hash_entry **sym_hashes;
4796 Elf_Internal_Rela *rel, *relend;
4797 struct spu_link_hash_table *htab;
4798 asection *ea;
4799 int ret = TRUE;
4800 bfd_boolean emit_these_relocs = FALSE;
4801 bfd_boolean is_ea_sym;
4802 bfd_boolean stubs;
4803 unsigned int iovl = 0;
4804
4805 htab = spu_hash_table (info);
4806 stubs = (htab->stub_sec != NULL
4807 && maybe_needs_stubs (input_section));
4808 iovl = overlay_index (input_section);
4809 ea = bfd_get_section_by_name (output_bfd, "._ea");
4810 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4811 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4812
4813 rel = relocs;
4814 relend = relocs + input_section->reloc_count;
4815 for (; rel < relend; rel++)
4816 {
4817 int r_type;
4818 reloc_howto_type *howto;
4819 unsigned int r_symndx;
4820 Elf_Internal_Sym *sym;
4821 asection *sec;
4822 struct elf_link_hash_entry *h;
4823 const char *sym_name;
4824 bfd_vma relocation;
4825 bfd_vma addend;
4826 bfd_reloc_status_type r;
4827 bfd_boolean unresolved_reloc;
4828 bfd_boolean warned;
4829 enum _stub_type stub_type;
4830
4831 r_symndx = ELF32_R_SYM (rel->r_info);
4832 r_type = ELF32_R_TYPE (rel->r_info);
4833 howto = elf_howto_table + r_type;
4834 unresolved_reloc = FALSE;
4835 warned = FALSE;
4836 h = NULL;
4837 sym = NULL;
4838 sec = NULL;
4839 if (r_symndx < symtab_hdr->sh_info)
4840 {
4841 sym = local_syms + r_symndx;
4842 sec = local_sections[r_symndx];
4843 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4844 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4845 }
4846 else
4847 {
4848 if (sym_hashes == NULL)
4849 return FALSE;
4850
4851 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4852
4853 while (h->root.type == bfd_link_hash_indirect
4854 || h->root.type == bfd_link_hash_warning)
4855 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4856
4857 relocation = 0;
4858 if (h->root.type == bfd_link_hash_defined
4859 || h->root.type == bfd_link_hash_defweak)
4860 {
4861 sec = h->root.u.def.section;
4862 if (sec == NULL
4863 || sec->output_section == NULL)
4864 /* Set a flag that will be cleared later if we find a
4865 relocation value for this symbol. output_section
4866 is typically NULL for symbols satisfied by a shared
4867 library. */
4868 unresolved_reloc = TRUE;
4869 else
4870 relocation = (h->root.u.def.value
4871 + sec->output_section->vma
4872 + sec->output_offset);
4873 }
4874 else if (h->root.type == bfd_link_hash_undefweak)
4875 ;
4876 else if (info->unresolved_syms_in_objects == RM_IGNORE
4877 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4878 ;
4879 else if (!info->relocatable
4880 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4881 {
4882 bfd_boolean err;
4883 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4884 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4885 if (!info->callbacks->undefined_symbol (info,
4886 h->root.root.string,
4887 input_bfd,
4888 input_section,
4889 rel->r_offset, err))
4890 return FALSE;
4891 warned = TRUE;
4892 }
4893 sym_name = h->root.root.string;
4894 }
4895
4896 if (sec != NULL && elf_discarded_section (sec))
4897 {
4898 /* For relocs against symbols from removed linkonce sections,
4899 or sections discarded by a linker script, we just want the
4900 section contents zeroed. Avoid any special processing. */
4901 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4902 rel->r_info = 0;
4903 rel->r_addend = 0;
4904 continue;
4905 }
4906
4907 if (info->relocatable)
4908 continue;
4909
4910 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4911 if (r_type == R_SPU_ADD_PIC
4912 && h != NULL
4913 && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4914 {
4915 bfd_byte *loc = contents + rel->r_offset;
4916 loc[0] = 0x1c;
4917 loc[1] = 0x00;
4918 loc[2] &= 0x3f;
4919 }
4920
4921 is_ea_sym = (ea != NULL
4922 && sec != NULL
4923 && sec->output_section == ea);
4924
4925 /* If this symbol is in an overlay area, we may need to relocate
4926 to the overlay stub. */
4927 addend = rel->r_addend;
4928 if (stubs
4929 && !is_ea_sym
4930 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4931 contents, info)) != no_stub)
4932 {
4933 unsigned int ovl = 0;
4934 struct got_entry *g, **head;
4935
4936 if (stub_type != nonovl_stub)
4937 ovl = iovl;
4938
4939 if (h != NULL)
4940 head = &h->got.glist;
4941 else
4942 head = elf_local_got_ents (input_bfd) + r_symndx;
4943
4944 for (g = *head; g != NULL; g = g->next)
4945 if (htab->params->ovly_flavour == ovly_soft_icache
4946 ? (g->ovl == ovl
4947 && g->br_addr == (rel->r_offset
4948 + input_section->output_offset
4949 + input_section->output_section->vma))
4950 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4951 break;
4952 if (g == NULL)
4953 abort ();
4954
4955 relocation = g->stub_addr;
4956 addend = 0;
4957 }
4958 else
4959 {
4960 /* For soft icache, encode the overlay index into addresses. */
4961 if (htab->params->ovly_flavour == ovly_soft_icache
4962 && (r_type == R_SPU_ADDR16_HI
4963 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4964 && !is_ea_sym)
4965 {
4966 unsigned int ovl = overlay_index (sec);
4967 if (ovl != 0)
4968 {
4969 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4970 relocation += set_id << 18;
4971 }
4972 }
4973 }
4974
4975 if (htab->params->emit_fixups && !info->relocatable
4976 && (input_section->flags & SEC_ALLOC) != 0
4977 && r_type == R_SPU_ADDR32)
4978 {
4979 bfd_vma offset;
4980 offset = rel->r_offset + input_section->output_section->vma
4981 + input_section->output_offset;
4982 spu_elf_emit_fixup (output_bfd, info, offset);
4983 }
4984
4985 if (unresolved_reloc)
4986 ;
4987 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4988 {
4989 if (is_ea_sym)
4990 {
4991 /* ._ea is a special section that isn't allocated in SPU
4992 memory, but rather occupies space in PPU memory as
4993 part of an embedded ELF image. If this reloc is
4994 against a symbol defined in ._ea, then transform the
4995 reloc into an equivalent one without a symbol
4996 relative to the start of the ELF image. */
4997 rel->r_addend += (relocation
4998 - ea->vma
4999 + elf_section_data (ea)->this_hdr.sh_offset);
5000 rel->r_info = ELF32_R_INFO (0, r_type);
5001 }
5002 emit_these_relocs = TRUE;
5003 continue;
5004 }
5005 else if (is_ea_sym)
5006 unresolved_reloc = TRUE;
5007
5008 if (unresolved_reloc)
5009 {
5010 (*_bfd_error_handler)
5011 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5012 input_bfd,
5013 bfd_get_section_name (input_bfd, input_section),
5014 (long) rel->r_offset,
5015 howto->name,
5016 sym_name);
5017 ret = FALSE;
5018 }
5019
5020 r = _bfd_final_link_relocate (howto,
5021 input_bfd,
5022 input_section,
5023 contents,
5024 rel->r_offset, relocation, addend);
5025
5026 if (r != bfd_reloc_ok)
5027 {
5028 const char *msg = (const char *) 0;
5029
5030 switch (r)
5031 {
5032 case bfd_reloc_overflow:
5033 if (!((*info->callbacks->reloc_overflow)
5034 (info, (h ? &h->root : NULL), sym_name, howto->name,
5035 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5036 return FALSE;
5037 break;
5038
5039 case bfd_reloc_undefined:
5040 if (!((*info->callbacks->undefined_symbol)
5041 (info, sym_name, input_bfd, input_section,
5042 rel->r_offset, TRUE)))
5043 return FALSE;
5044 break;
5045
5046 case bfd_reloc_outofrange:
5047 msg = _("internal error: out of range error");
5048 goto common_error;
5049
5050 case bfd_reloc_notsupported:
5051 msg = _("internal error: unsupported relocation error");
5052 goto common_error;
5053
5054 case bfd_reloc_dangerous:
5055 msg = _("internal error: dangerous error");
5056 goto common_error;
5057
5058 default:
5059 msg = _("internal error: unknown error");
5060 /* fall through */
5061
5062 common_error:
5063 ret = FALSE;
5064 if (!((*info->callbacks->warning)
5065 (info, msg, sym_name, input_bfd, input_section,
5066 rel->r_offset)))
5067 return FALSE;
5068 break;
5069 }
5070 }
5071 }
5072
5073 if (ret
5074 && emit_these_relocs
5075 && !info->emitrelocations)
5076 {
5077 Elf_Internal_Rela *wrel;
5078 Elf_Internal_Shdr *rel_hdr;
5079
5080 wrel = rel = relocs;
5081 relend = relocs + input_section->reloc_count;
5082 for (; rel < relend; rel++)
5083 {
5084 int r_type;
5085
5086 r_type = ELF32_R_TYPE (rel->r_info);
5087 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5088 *wrel++ = *rel;
5089 }
5090 input_section->reloc_count = wrel - relocs;
5091 /* Backflips for _bfd_elf_link_output_relocs. */
5092 rel_hdr = &elf_section_data (input_section)->rel_hdr;
5093 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5094 ret = 2;
5095 }
5096
5097 return ret;
5098 }
5099
5100 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5101
5102 static int
5103 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5104 const char *sym_name ATTRIBUTE_UNUSED,
5105 Elf_Internal_Sym *sym,
5106 asection *sym_sec ATTRIBUTE_UNUSED,
5107 struct elf_link_hash_entry *h)
5108 {
5109 struct spu_link_hash_table *htab = spu_hash_table (info);
5110
5111 if (!info->relocatable
5112 && htab->stub_sec != NULL
5113 && h != NULL
5114 && (h->root.type == bfd_link_hash_defined
5115 || h->root.type == bfd_link_hash_defweak)
5116 && h->def_regular
5117 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5118 {
5119 struct got_entry *g;
5120
5121 for (g = h->got.glist; g != NULL; g = g->next)
5122 if (htab->params->ovly_flavour == ovly_soft_icache
5123 ? g->br_addr == g->stub_addr
5124 : g->addend == 0 && g->ovl == 0)
5125 {
5126 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5127 (htab->stub_sec[0]->output_section->owner,
5128 htab->stub_sec[0]->output_section));
5129 sym->st_value = g->stub_addr;
5130 break;
5131 }
5132 }
5133
5134 return 1;
5135 }
5136
5137 static int spu_plugin = 0;
5138
5139 void
5140 spu_elf_plugin (int val)
5141 {
5142 spu_plugin = val;
5143 }
5144
5145 /* Set ELF header e_type for plugins. */
5146
5147 static void
5148 spu_elf_post_process_headers (bfd *abfd,
5149 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5150 {
5151 if (spu_plugin)
5152 {
5153 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5154
5155 i_ehdrp->e_type = ET_DYN;
5156 }
5157 }
5158
5159 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5160 segments for overlays. */
5161
5162 static int
5163 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5164 {
5165 int extra = 0;
5166 asection *sec;
5167
5168 if (info != NULL)
5169 {
5170 struct spu_link_hash_table *htab = spu_hash_table (info);
5171 extra = htab->num_overlays;
5172 }
5173
5174 if (extra)
5175 ++extra;
5176
5177 sec = bfd_get_section_by_name (abfd, ".toe");
5178 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5179 ++extra;
5180
5181 return extra;
5182 }
5183
5184 /* Remove .toe section from other PT_LOAD segments and put it in
5185 a segment of its own. Put overlays in separate segments too. */
5186
5187 static bfd_boolean
5188 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5189 {
5190 asection *toe, *s;
5191 struct elf_segment_map *m, *m_overlay;
5192 struct elf_segment_map **p, **p_overlay;
5193 unsigned int i;
5194
5195 if (info == NULL)
5196 return TRUE;
5197
5198 toe = bfd_get_section_by_name (abfd, ".toe");
5199 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5200 if (m->p_type == PT_LOAD && m->count > 1)
5201 for (i = 0; i < m->count; i++)
5202 if ((s = m->sections[i]) == toe
5203 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5204 {
5205 struct elf_segment_map *m2;
5206 bfd_vma amt;
5207
5208 if (i + 1 < m->count)
5209 {
5210 amt = sizeof (struct elf_segment_map);
5211 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5212 m2 = bfd_zalloc (abfd, amt);
5213 if (m2 == NULL)
5214 return FALSE;
5215 m2->count = m->count - (i + 1);
5216 memcpy (m2->sections, m->sections + i + 1,
5217 m2->count * sizeof (m->sections[0]));
5218 m2->p_type = PT_LOAD;
5219 m2->next = m->next;
5220 m->next = m2;
5221 }
5222 m->count = 1;
5223 if (i != 0)
5224 {
5225 m->count = i;
5226 amt = sizeof (struct elf_segment_map);
5227 m2 = bfd_zalloc (abfd, amt);
5228 if (m2 == NULL)
5229 return FALSE;
5230 m2->p_type = PT_LOAD;
5231 m2->count = 1;
5232 m2->sections[0] = s;
5233 m2->next = m->next;
5234 m->next = m2;
5235 }
5236 break;
5237 }
5238
5239
5240 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5241 PT_LOAD segments. This can cause the .ovl.init section to be
5242 overwritten with the contents of some overlay segment. To work
5243 around this issue, we ensure that all PF_OVERLAY segments are
5244 sorted first amongst the program headers; this ensures that even
5245 with a broken loader, the .ovl.init section (which is not marked
5246 as PF_OVERLAY) will be placed into SPU local store on startup. */
5247
5248 /* Move all overlay segments onto a separate list. */
5249 p = &elf_tdata (abfd)->segment_map;
5250 p_overlay = &m_overlay;
5251 while (*p != NULL)
5252 {
5253 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5254 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5255 {
5256 struct elf_segment_map *m = *p;
5257 *p = m->next;
5258 *p_overlay = m;
5259 p_overlay = &m->next;
5260 continue;
5261 }
5262
5263 p = &((*p)->next);
5264 }
5265
5266 /* Re-insert overlay segments at the head of the segment map. */
5267 *p_overlay = elf_tdata (abfd)->segment_map;
5268 elf_tdata (abfd)->segment_map = m_overlay;
5269
5270 return TRUE;
5271 }
5272
5273 /* Tweak the section type of .note.spu_name. */
5274
5275 static bfd_boolean
5276 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5277 Elf_Internal_Shdr *hdr,
5278 asection *sec)
5279 {
5280 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5281 hdr->sh_type = SHT_NOTE;
5282 return TRUE;
5283 }
5284
5285 /* Tweak phdrs before writing them out. */
5286
5287 static int
5288 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5289 {
5290 const struct elf_backend_data *bed;
5291 struct elf_obj_tdata *tdata;
5292 Elf_Internal_Phdr *phdr, *last;
5293 struct spu_link_hash_table *htab;
5294 unsigned int count;
5295 unsigned int i;
5296
5297 if (info == NULL)
5298 return TRUE;
5299
5300 bed = get_elf_backend_data (abfd);
5301 tdata = elf_tdata (abfd);
5302 phdr = tdata->phdr;
5303 count = tdata->program_header_size / bed->s->sizeof_phdr;
5304 htab = spu_hash_table (info);
5305 if (htab->num_overlays != 0)
5306 {
5307 struct elf_segment_map *m;
5308 unsigned int o;
5309
5310 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5311 if (m->count != 0
5312 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5313 {
5314 /* Mark this as an overlay header. */
5315 phdr[i].p_flags |= PF_OVERLAY;
5316
5317 if (htab->ovtab != NULL && htab->ovtab->size != 0
5318 && htab->params->ovly_flavour != ovly_soft_icache)
5319 {
5320 bfd_byte *p = htab->ovtab->contents;
5321 unsigned int off = o * 16 + 8;
5322
5323 /* Write file_off into _ovly_table. */
5324 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5325 }
5326 }
5327 /* Soft-icache has its file offset put in .ovl.init. */
5328 if (htab->init != NULL && htab->init->size != 0)
5329 {
5330 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5331
5332 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5333 }
5334 }
5335
5336 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5337 of 16. This should always be possible when using the standard
5338 linker scripts, but don't create overlapping segments if
5339 someone is playing games with linker scripts. */
5340 last = NULL;
5341 for (i = count; i-- != 0; )
5342 if (phdr[i].p_type == PT_LOAD)
5343 {
5344 unsigned adjust;
5345
5346 adjust = -phdr[i].p_filesz & 15;
5347 if (adjust != 0
5348 && last != NULL
5349 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5350 break;
5351
5352 adjust = -phdr[i].p_memsz & 15;
5353 if (adjust != 0
5354 && last != NULL
5355 && phdr[i].p_filesz != 0
5356 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5357 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5358 break;
5359
5360 if (phdr[i].p_filesz != 0)
5361 last = &phdr[i];
5362 }
5363
5364 if (i == (unsigned int) -1)
5365 for (i = count; i-- != 0; )
5366 if (phdr[i].p_type == PT_LOAD)
5367 {
5368 unsigned adjust;
5369
5370 adjust = -phdr[i].p_filesz & 15;
5371 phdr[i].p_filesz += adjust;
5372
5373 adjust = -phdr[i].p_memsz & 15;
5374 phdr[i].p_memsz += adjust;
5375 }
5376
5377 return TRUE;
5378 }
5379
5380 bfd_boolean
5381 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5382 {
5383 struct spu_link_hash_table *htab = spu_hash_table (info);
5384 if (htab->params->emit_fixups)
5385 {
5386 asection *sfixup = htab->sfixup;
5387 int fixup_count = 0;
5388 bfd *ibfd;
5389 size_t size;
5390
5391 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5392 {
5393 asection *isec;
5394
5395 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5396 continue;
5397
5398 /* Walk over each section attached to the input bfd. */
5399 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5400 {
5401 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5402 bfd_vma base_end;
5403
5404 /* If there aren't any relocs, then there's nothing more
5405 to do. */
5406 if ((isec->flags & SEC_RELOC) == 0
5407 || isec->reloc_count == 0)
5408 continue;
5409
5410 /* Get the relocs. */
5411 internal_relocs =
5412 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5413 info->keep_memory);
5414 if (internal_relocs == NULL)
5415 return FALSE;
5416
5417 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5418 relocations. They are stored in a single word by
5419 saving the upper 28 bits of the address and setting the
5420 lower 4 bits to a bit mask of the words that have the
5421 relocation. BASE_END keeps track of the next quadword. */
5422 irela = internal_relocs;
5423 irelaend = irela + isec->reloc_count;
5424 base_end = 0;
5425 for (; irela < irelaend; irela++)
5426 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5427 && irela->r_offset >= base_end)
5428 {
5429 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5430 fixup_count++;
5431 }
5432 }
5433 }
5434
5435 /* We always have a NULL fixup as a sentinel */
5436 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5437 if (!bfd_set_section_size (output_bfd, sfixup, size))
5438 return FALSE;
5439 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5440 if (sfixup->contents == NULL)
5441 return FALSE;
5442 }
5443 return TRUE;
5444 }
5445
5446 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5447 #define TARGET_BIG_NAME "elf32-spu"
5448 #define ELF_ARCH bfd_arch_spu
5449 #define ELF_MACHINE_CODE EM_SPU
5450 /* This matches the alignment need for DMA. */
5451 #define ELF_MAXPAGESIZE 0x80
5452 #define elf_backend_rela_normal 1
5453 #define elf_backend_can_gc_sections 1
5454
5455 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5456 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5457 #define elf_info_to_howto spu_elf_info_to_howto
5458 #define elf_backend_count_relocs spu_elf_count_relocs
5459 #define elf_backend_relocate_section spu_elf_relocate_section
5460 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5461 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5462 #define elf_backend_object_p spu_elf_object_p
5463 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5464 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5465
5466 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5467 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5468 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5469 #define elf_backend_post_process_headers spu_elf_post_process_headers
5470 #define elf_backend_fake_sections spu_elf_fake_sections
5471 #define elf_backend_special_sections spu_elf_special_sections
5472 #define bfd_elf32_bfd_final_link spu_elf_final_link
5473
5474 #include "elf32-target.h"
This page took 0.145635 seconds and 3 git commands to generate.