bfd/
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
1 /* SPU specific support for 32-bit ELF
2
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29
30 /* We use RELA style relocs. Don't define USE_REL. */
31
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 void *, asection *,
34 bfd *, char **);
35
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
38
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
90 FALSE, 0, -1, FALSE),
91 HOWTO (R_SPU_ADD_PIC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 FALSE, 0, 0x00000000, FALSE),
94 };
95
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 { NULL, 0, 0, 0, 0 }
100 };
101
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 {
105 switch (code)
106 {
107 default:
108 return R_SPU_NONE;
109 case BFD_RELOC_SPU_IMM10W:
110 return R_SPU_ADDR10;
111 case BFD_RELOC_SPU_IMM16W:
112 return R_SPU_ADDR16;
113 case BFD_RELOC_SPU_LO16:
114 return R_SPU_ADDR16_LO;
115 case BFD_RELOC_SPU_HI16:
116 return R_SPU_ADDR16_HI;
117 case BFD_RELOC_SPU_IMM18:
118 return R_SPU_ADDR18;
119 case BFD_RELOC_SPU_PCREL16:
120 return R_SPU_REL16;
121 case BFD_RELOC_SPU_IMM7:
122 return R_SPU_ADDR7;
123 case BFD_RELOC_SPU_IMM8:
124 return R_SPU_NONE;
125 case BFD_RELOC_SPU_PCREL9a:
126 return R_SPU_REL9;
127 case BFD_RELOC_SPU_PCREL9b:
128 return R_SPU_REL9I;
129 case BFD_RELOC_SPU_IMM10:
130 return R_SPU_ADDR10I;
131 case BFD_RELOC_SPU_IMM16:
132 return R_SPU_ADDR16I;
133 case BFD_RELOC_32:
134 return R_SPU_ADDR32;
135 case BFD_RELOC_32_PCREL:
136 return R_SPU_REL32;
137 case BFD_RELOC_SPU_PPU32:
138 return R_SPU_PPU32;
139 case BFD_RELOC_SPU_PPU64:
140 return R_SPU_PPU64;
141 case BFD_RELOC_SPU_ADD_PIC:
142 return R_SPU_ADD_PIC;
143 }
144 }
145
146 static void
147 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
148 arelent *cache_ptr,
149 Elf_Internal_Rela *dst)
150 {
151 enum elf_spu_reloc_type r_type;
152
153 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154 BFD_ASSERT (r_type < R_SPU_max);
155 cache_ptr->howto = &elf_howto_table[(int) r_type];
156 }
157
158 static reloc_howto_type *
159 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160 bfd_reloc_code_real_type code)
161 {
162 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
163
164 if (r_type == R_SPU_NONE)
165 return NULL;
166
167 return elf_howto_table + r_type;
168 }
169
170 static reloc_howto_type *
171 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
172 const char *r_name)
173 {
174 unsigned int i;
175
176 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177 if (elf_howto_table[i].name != NULL
178 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179 return &elf_howto_table[i];
180
181 return NULL;
182 }
183
184 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
185
186 static bfd_reloc_status_type
187 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188 void *data, asection *input_section,
189 bfd *output_bfd, char **error_message)
190 {
191 bfd_size_type octets;
192 bfd_vma val;
193 long insn;
194
195 /* If this is a relocatable link (output_bfd test tells us), just
196 call the generic function. Any adjustment will be done at final
197 link time. */
198 if (output_bfd != NULL)
199 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200 input_section, output_bfd, error_message);
201
202 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203 return bfd_reloc_outofrange;
204 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
205
206 /* Get symbol value. */
207 val = 0;
208 if (!bfd_is_com_section (symbol->section))
209 val = symbol->value;
210 if (symbol->section->output_section)
211 val += symbol->section->output_section->vma;
212
213 val += reloc_entry->addend;
214
215 /* Make it pc-relative. */
216 val -= input_section->output_section->vma + input_section->output_offset;
217
218 val >>= 2;
219 if (val + 256 >= 512)
220 return bfd_reloc_overflow;
221
222 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
223
224 /* Move two high bits of value to REL9I and REL9 position.
225 The mask will take care of selecting the right field. */
226 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227 insn &= ~reloc_entry->howto->dst_mask;
228 insn |= val & reloc_entry->howto->dst_mask;
229 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
230 return bfd_reloc_ok;
231 }
232
233 static bfd_boolean
234 spu_elf_new_section_hook (bfd *abfd, asection *sec)
235 {
236 if (!sec->used_by_bfd)
237 {
238 struct _spu_elf_section_data *sdata;
239
240 sdata = bfd_zalloc (abfd, sizeof (*sdata));
241 if (sdata == NULL)
242 return FALSE;
243 sec->used_by_bfd = sdata;
244 }
245
246 return _bfd_elf_new_section_hook (abfd, sec);
247 }
248
249 /* Set up overlay info for executables. */
250
251 static bfd_boolean
252 spu_elf_object_p (bfd *abfd)
253 {
254 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
255 {
256 unsigned int i, num_ovl, num_buf;
257 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259 Elf_Internal_Phdr *last_phdr = NULL;
260
261 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
263 {
264 unsigned int j;
265
266 ++num_ovl;
267 if (last_phdr == NULL
268 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
269 ++num_buf;
270 last_phdr = phdr;
271 for (j = 1; j < elf_numsections (abfd); j++)
272 {
273 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
274
275 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
276 {
277 asection *sec = shdr->bfd_section;
278 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
279 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
280 }
281 }
282 }
283 }
284 return TRUE;
285 }
286
287 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288 strip --strip-unneeded will not remove them. */
289
290 static void
291 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
292 {
293 if (sym->name != NULL
294 && sym->section != bfd_abs_section_ptr
295 && strncmp (sym->name, "_EAR_", 5) == 0)
296 sym->flags |= BSF_KEEP;
297 }
298
299 /* SPU ELF linker hash table. */
300
301 struct spu_link_hash_table
302 {
303 struct elf_link_hash_table elf;
304
305 struct spu_elf_params *params;
306
307 /* Shortcuts to overlay sections. */
308 asection *ovtab;
309 asection *init;
310 asection *toe;
311 asection **ovl_sec;
312
313 /* Count of stubs in each overlay section. */
314 unsigned int *stub_count;
315
316 /* The stub section for each overlay section. */
317 asection **stub_sec;
318
319 struct elf_link_hash_entry *ovly_entry[2];
320
321 /* Number of overlay buffers. */
322 unsigned int num_buf;
323
324 /* Total number of overlays. */
325 unsigned int num_overlays;
326
327 /* For soft icache. */
328 unsigned int line_size_log2;
329 unsigned int num_lines_log2;
330 unsigned int fromelem_size_log2;
331
332 /* How much memory we have. */
333 unsigned int local_store;
334 /* Local store --auto-overlay should reserve for non-overlay
335 functions and data. */
336 unsigned int overlay_fixed;
337 /* Local store --auto-overlay should reserve for stack and heap. */
338 unsigned int reserved;
339 /* If reserved is not specified, stack analysis will calculate a value
340 for the stack. This parameter adjusts that value to allow for
341 negative sp access (the ABI says 2000 bytes below sp are valid,
342 and the overlay manager uses some of this area). */
343 int extra_stack_space;
344 /* Count of overlay stubs needed in non-overlay area. */
345 unsigned int non_ovly_stub;
346
347 /* Pointer to the fixup section */
348 asection *sfixup;
349
350 /* Set on error. */
351 unsigned int stub_err : 1;
352 };
353
354 /* Hijack the generic got fields for overlay stub accounting. */
355
356 struct got_entry
357 {
358 struct got_entry *next;
359 unsigned int ovl;
360 union {
361 bfd_vma addend;
362 bfd_vma br_addr;
363 };
364 bfd_vma stub_addr;
365 };
366
367 #define spu_hash_table(p) \
368 ((struct spu_link_hash_table *) ((p)->hash))
369
370 struct call_info
371 {
372 struct function_info *fun;
373 struct call_info *next;
374 unsigned int count;
375 unsigned int max_depth;
376 unsigned int is_tail : 1;
377 unsigned int is_pasted : 1;
378 unsigned int broken_cycle : 1;
379 unsigned int priority : 13;
380 };
381
382 struct function_info
383 {
384 /* List of functions called. Also branches to hot/cold part of
385 function. */
386 struct call_info *call_list;
387 /* For hot/cold part of function, point to owner. */
388 struct function_info *start;
389 /* Symbol at start of function. */
390 union {
391 Elf_Internal_Sym *sym;
392 struct elf_link_hash_entry *h;
393 } u;
394 /* Function section. */
395 asection *sec;
396 asection *rodata;
397 /* Where last called from, and number of sections called from. */
398 asection *last_caller;
399 unsigned int call_count;
400 /* Address range of (this part of) function. */
401 bfd_vma lo, hi;
402 /* Offset where we found a store of lr, or -1 if none found. */
403 bfd_vma lr_store;
404 /* Offset where we found the stack adjustment insn. */
405 bfd_vma sp_adjust;
406 /* Stack usage. */
407 int stack;
408 /* Distance from root of call tree. Tail and hot/cold branches
409 count as one deeper. We aren't counting stack frames here. */
410 unsigned int depth;
411 /* Set if global symbol. */
412 unsigned int global : 1;
413 /* Set if known to be start of function (as distinct from a hunk
414 in hot/cold section. */
415 unsigned int is_func : 1;
416 /* Set if not a root node. */
417 unsigned int non_root : 1;
418 /* Flags used during call tree traversal. It's cheaper to replicate
419 the visit flags than have one which needs clearing after a traversal. */
420 unsigned int visit1 : 1;
421 unsigned int visit2 : 1;
422 unsigned int marking : 1;
423 unsigned int visit3 : 1;
424 unsigned int visit4 : 1;
425 unsigned int visit5 : 1;
426 unsigned int visit6 : 1;
427 unsigned int visit7 : 1;
428 };
429
430 struct spu_elf_stack_info
431 {
432 int num_fun;
433 int max_fun;
434 /* Variable size array describing functions, one per contiguous
435 address range belonging to a function. */
436 struct function_info fun[1];
437 };
438
439 static struct function_info *find_function (asection *, bfd_vma,
440 struct bfd_link_info *);
441
442 /* Create a spu ELF linker hash table. */
443
444 static struct bfd_link_hash_table *
445 spu_elf_link_hash_table_create (bfd *abfd)
446 {
447 struct spu_link_hash_table *htab;
448
449 htab = bfd_malloc (sizeof (*htab));
450 if (htab == NULL)
451 return NULL;
452
453 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
454 _bfd_elf_link_hash_newfunc,
455 sizeof (struct elf_link_hash_entry)))
456 {
457 free (htab);
458 return NULL;
459 }
460
461 memset (&htab->ovtab, 0,
462 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
463
464 htab->elf.init_got_refcount.refcount = 0;
465 htab->elf.init_got_refcount.glist = NULL;
466 htab->elf.init_got_offset.offset = 0;
467 htab->elf.init_got_offset.glist = NULL;
468 return &htab->elf.root;
469 }
470
471 void
472 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
473 {
474 bfd_vma max_branch_log2;
475
476 struct spu_link_hash_table *htab = spu_hash_table (info);
477 htab->params = params;
478 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
479 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
480
481 /* For the software i-cache, we provide a "from" list whose size
482 is a power-of-two number of quadwords, big enough to hold one
483 byte per outgoing branch. Compute this number here. */
484 max_branch_log2 = bfd_log2 (htab->params->max_branch);
485 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
486 }
487
488 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
489 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
490 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
491
492 static bfd_boolean
493 get_sym_h (struct elf_link_hash_entry **hp,
494 Elf_Internal_Sym **symp,
495 asection **symsecp,
496 Elf_Internal_Sym **locsymsp,
497 unsigned long r_symndx,
498 bfd *ibfd)
499 {
500 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
501
502 if (r_symndx >= symtab_hdr->sh_info)
503 {
504 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
505 struct elf_link_hash_entry *h;
506
507 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
508 while (h->root.type == bfd_link_hash_indirect
509 || h->root.type == bfd_link_hash_warning)
510 h = (struct elf_link_hash_entry *) h->root.u.i.link;
511
512 if (hp != NULL)
513 *hp = h;
514
515 if (symp != NULL)
516 *symp = NULL;
517
518 if (symsecp != NULL)
519 {
520 asection *symsec = NULL;
521 if (h->root.type == bfd_link_hash_defined
522 || h->root.type == bfd_link_hash_defweak)
523 symsec = h->root.u.def.section;
524 *symsecp = symsec;
525 }
526 }
527 else
528 {
529 Elf_Internal_Sym *sym;
530 Elf_Internal_Sym *locsyms = *locsymsp;
531
532 if (locsyms == NULL)
533 {
534 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
535 if (locsyms == NULL)
536 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
537 symtab_hdr->sh_info,
538 0, NULL, NULL, NULL);
539 if (locsyms == NULL)
540 return FALSE;
541 *locsymsp = locsyms;
542 }
543 sym = locsyms + r_symndx;
544
545 if (hp != NULL)
546 *hp = NULL;
547
548 if (symp != NULL)
549 *symp = sym;
550
551 if (symsecp != NULL)
552 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
553 }
554
555 return TRUE;
556 }
557
558 /* Create the note section if not already present. This is done early so
559 that the linker maps the sections to the right place in the output. */
560
561 bfd_boolean
562 spu_elf_create_sections (struct bfd_link_info *info)
563 {
564 struct spu_link_hash_table *htab = spu_hash_table (info);
565 bfd *ibfd;
566
567 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
568 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
569 break;
570
571 if (ibfd == NULL)
572 {
573 /* Make SPU_PTNOTE_SPUNAME section. */
574 asection *s;
575 size_t name_len;
576 size_t size;
577 bfd_byte *data;
578 flagword flags;
579
580 ibfd = info->input_bfds;
581 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
582 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
583 if (s == NULL
584 || !bfd_set_section_alignment (ibfd, s, 4))
585 return FALSE;
586
587 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
588 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
589 size += (name_len + 3) & -4;
590
591 if (!bfd_set_section_size (ibfd, s, size))
592 return FALSE;
593
594 data = bfd_zalloc (ibfd, size);
595 if (data == NULL)
596 return FALSE;
597
598 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
599 bfd_put_32 (ibfd, name_len, data + 4);
600 bfd_put_32 (ibfd, 1, data + 8);
601 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
602 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
603 bfd_get_filename (info->output_bfd), name_len);
604 s->contents = data;
605 }
606
607 if (htab->params->emit_fixups)
608 {
609 asection *s;
610 flagword flags;
611 ibfd = info->input_bfds;
612 flags = SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
613 | SEC_IN_MEMORY;
614 s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
615 if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
616 return FALSE;
617 htab->sfixup = s;
618 }
619
620 return TRUE;
621 }
622
623 /* qsort predicate to sort sections by vma. */
624
625 static int
626 sort_sections (const void *a, const void *b)
627 {
628 const asection *const *s1 = a;
629 const asection *const *s2 = b;
630 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
631
632 if (delta != 0)
633 return delta < 0 ? -1 : 1;
634
635 return (*s1)->index - (*s2)->index;
636 }
637
638 /* Identify overlays in the output bfd, and number them.
639 Returns 0 on error, 1 if no overlays, 2 if overlays. */
640
641 int
642 spu_elf_find_overlays (struct bfd_link_info *info)
643 {
644 struct spu_link_hash_table *htab = spu_hash_table (info);
645 asection **alloc_sec;
646 unsigned int i, n, ovl_index, num_buf;
647 asection *s;
648 bfd_vma ovl_end;
649 static const char *const entry_names[2][2] = {
650 { "__ovly_load", "__icache_br_handler" },
651 { "__ovly_return", "__icache_call_handler" }
652 };
653
654 if (info->output_bfd->section_count < 2)
655 return 1;
656
657 alloc_sec
658 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
659 if (alloc_sec == NULL)
660 return 0;
661
662 /* Pick out all the alloced sections. */
663 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
664 if ((s->flags & SEC_ALLOC) != 0
665 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
666 && s->size != 0)
667 alloc_sec[n++] = s;
668
669 if (n == 0)
670 {
671 free (alloc_sec);
672 return 1;
673 }
674
675 /* Sort them by vma. */
676 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
677
678 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
679 if (htab->params->ovly_flavour == ovly_soft_icache)
680 {
681 /* Look for an overlapping vma to find the first overlay section. */
682 bfd_vma vma_start = 0;
683 bfd_vma lma_start = 0;
684
685 for (i = 1; i < n; i++)
686 {
687 s = alloc_sec[i];
688 if (s->vma < ovl_end)
689 {
690 asection *s0 = alloc_sec[i - 1];
691 vma_start = s0->vma;
692 if (strncmp (s0->name, ".ovl.init", 9) != 0)
693 lma_start = s0->lma;
694 else
695 lma_start = s->lma;
696 ovl_end = (s0->vma
697 + ((bfd_vma) 1
698 << (htab->num_lines_log2 + htab->line_size_log2)));
699 --i;
700 break;
701 }
702 else
703 ovl_end = s->vma + s->size;
704 }
705
706 /* Now find any sections within the cache area. */
707 for (ovl_index = 0, num_buf = 0; i < n; i++)
708 {
709 s = alloc_sec[i];
710 if (s->vma >= ovl_end)
711 break;
712
713 /* A section in an overlay area called .ovl.init is not
714 an overlay, in the sense that it might be loaded in
715 by the overlay manager, but rather the initial
716 section contents for the overlay buffer. */
717 if (strncmp (s->name, ".ovl.init", 9) != 0)
718 {
719 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
720 if (((s->vma - vma_start) & (htab->params->line_size - 1))
721 || ((s->lma - lma_start) & (htab->params->line_size - 1)))
722 {
723 info->callbacks->einfo (_("%X%P: overlay section %A "
724 "does not start on a cache line.\n"),
725 s);
726 bfd_set_error (bfd_error_bad_value);
727 return 0;
728 }
729 else if (s->size > htab->params->line_size)
730 {
731 info->callbacks->einfo (_("%X%P: overlay section %A "
732 "is larger than a cache line.\n"),
733 s);
734 bfd_set_error (bfd_error_bad_value);
735 return 0;
736 }
737
738 alloc_sec[ovl_index++] = s;
739 spu_elf_section_data (s)->u.o.ovl_index
740 = ((s->lma - lma_start) >> htab->line_size_log2) + 1;
741 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
742 }
743 }
744
745 /* Ensure there are no more overlay sections. */
746 for ( ; i < n; i++)
747 {
748 s = alloc_sec[i];
749 if (s->vma < ovl_end)
750 {
751 info->callbacks->einfo (_("%X%P: overlay section %A "
752 "is not in cache area.\n"),
753 alloc_sec[i-1]);
754 bfd_set_error (bfd_error_bad_value);
755 return 0;
756 }
757 else
758 ovl_end = s->vma + s->size;
759 }
760 }
761 else
762 {
763 /* Look for overlapping vmas. Any with overlap must be overlays.
764 Count them. Also count the number of overlay regions. */
765 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
766 {
767 s = alloc_sec[i];
768 if (s->vma < ovl_end)
769 {
770 asection *s0 = alloc_sec[i - 1];
771
772 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
773 {
774 ++num_buf;
775 if (strncmp (s0->name, ".ovl.init", 9) != 0)
776 {
777 alloc_sec[ovl_index] = s0;
778 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
779 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
780 }
781 else
782 ovl_end = s->vma + s->size;
783 }
784 if (strncmp (s->name, ".ovl.init", 9) != 0)
785 {
786 alloc_sec[ovl_index] = s;
787 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
788 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
789 if (s0->vma != s->vma)
790 {
791 info->callbacks->einfo (_("%X%P: overlay sections %A "
792 "and %A do not start at the "
793 "same address.\n"),
794 s0, s);
795 bfd_set_error (bfd_error_bad_value);
796 return 0;
797 }
798 if (ovl_end < s->vma + s->size)
799 ovl_end = s->vma + s->size;
800 }
801 }
802 else
803 ovl_end = s->vma + s->size;
804 }
805 }
806
807 htab->num_overlays = ovl_index;
808 htab->num_buf = num_buf;
809 htab->ovl_sec = alloc_sec;
810
811 if (ovl_index == 0)
812 return 1;
813
814 for (i = 0; i < 2; i++)
815 {
816 const char *name;
817 struct elf_link_hash_entry *h;
818
819 name = entry_names[i][htab->params->ovly_flavour];
820 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
821 if (h == NULL)
822 return 0;
823
824 if (h->root.type == bfd_link_hash_new)
825 {
826 h->root.type = bfd_link_hash_undefined;
827 h->ref_regular = 1;
828 h->ref_regular_nonweak = 1;
829 h->non_elf = 0;
830 }
831 htab->ovly_entry[i] = h;
832 }
833
834 return 2;
835 }
836
837 /* Non-zero to use bra in overlay stubs rather than br. */
838 #define BRA_STUBS 0
839
840 #define BRA 0x30000000
841 #define BRASL 0x31000000
842 #define BR 0x32000000
843 #define BRSL 0x33000000
844 #define NOP 0x40200000
845 #define LNOP 0x00200000
846 #define ILA 0x42000000
847
848 /* Return true for all relative and absolute branch instructions.
849 bra 00110000 0..
850 brasl 00110001 0..
851 br 00110010 0..
852 brsl 00110011 0..
853 brz 00100000 0..
854 brnz 00100001 0..
855 brhz 00100010 0..
856 brhnz 00100011 0.. */
857
858 static bfd_boolean
859 is_branch (const unsigned char *insn)
860 {
861 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
862 }
863
864 /* Return true for all indirect branch instructions.
865 bi 00110101 000
866 bisl 00110101 001
867 iret 00110101 010
868 bisled 00110101 011
869 biz 00100101 000
870 binz 00100101 001
871 bihz 00100101 010
872 bihnz 00100101 011 */
873
874 static bfd_boolean
875 is_indirect_branch (const unsigned char *insn)
876 {
877 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
878 }
879
880 /* Return true for branch hint instructions.
881 hbra 0001000..
882 hbrr 0001001.. */
883
884 static bfd_boolean
885 is_hint (const unsigned char *insn)
886 {
887 return (insn[0] & 0xfc) == 0x10;
888 }
889
890 /* True if INPUT_SECTION might need overlay stubs. */
891
892 static bfd_boolean
893 maybe_needs_stubs (asection *input_section)
894 {
895 /* No stubs for debug sections and suchlike. */
896 if ((input_section->flags & SEC_ALLOC) == 0)
897 return FALSE;
898
899 /* No stubs for link-once sections that will be discarded. */
900 if (input_section->output_section == bfd_abs_section_ptr)
901 return FALSE;
902
903 /* Don't create stubs for .eh_frame references. */
904 if (strcmp (input_section->name, ".eh_frame") == 0)
905 return FALSE;
906
907 return TRUE;
908 }
909
910 enum _stub_type
911 {
912 no_stub,
913 call_ovl_stub,
914 br000_ovl_stub,
915 br001_ovl_stub,
916 br010_ovl_stub,
917 br011_ovl_stub,
918 br100_ovl_stub,
919 br101_ovl_stub,
920 br110_ovl_stub,
921 br111_ovl_stub,
922 nonovl_stub,
923 stub_error
924 };
925
926 /* Return non-zero if this reloc symbol should go via an overlay stub.
927 Return 2 if the stub must be in non-overlay area. */
928
929 static enum _stub_type
930 needs_ovl_stub (struct elf_link_hash_entry *h,
931 Elf_Internal_Sym *sym,
932 asection *sym_sec,
933 asection *input_section,
934 Elf_Internal_Rela *irela,
935 bfd_byte *contents,
936 struct bfd_link_info *info)
937 {
938 struct spu_link_hash_table *htab = spu_hash_table (info);
939 enum elf_spu_reloc_type r_type;
940 unsigned int sym_type;
941 bfd_boolean branch, hint, call;
942 enum _stub_type ret = no_stub;
943 bfd_byte insn[4];
944
945 if (sym_sec == NULL
946 || sym_sec->output_section == bfd_abs_section_ptr
947 || spu_elf_section_data (sym_sec->output_section) == NULL)
948 return ret;
949
950 if (h != NULL)
951 {
952 /* Ensure no stubs for user supplied overlay manager syms. */
953 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
954 return ret;
955
956 /* setjmp always goes via an overlay stub, because then the return
957 and hence the longjmp goes via __ovly_return. That magically
958 makes setjmp/longjmp between overlays work. */
959 if (strncmp (h->root.root.string, "setjmp", 6) == 0
960 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
961 ret = call_ovl_stub;
962 }
963
964 if (h != NULL)
965 sym_type = h->type;
966 else
967 sym_type = ELF_ST_TYPE (sym->st_info);
968
969 r_type = ELF32_R_TYPE (irela->r_info);
970 branch = FALSE;
971 hint = FALSE;
972 call = FALSE;
973 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
974 {
975 if (contents == NULL)
976 {
977 contents = insn;
978 if (!bfd_get_section_contents (input_section->owner,
979 input_section,
980 contents,
981 irela->r_offset, 4))
982 return stub_error;
983 }
984 else
985 contents += irela->r_offset;
986
987 branch = is_branch (contents);
988 hint = is_hint (contents);
989 if (branch || hint)
990 {
991 call = (contents[0] & 0xfd) == 0x31;
992 if (call
993 && sym_type != STT_FUNC
994 && contents != insn)
995 {
996 /* It's common for people to write assembly and forget
997 to give function symbols the right type. Handle
998 calls to such symbols, but warn so that (hopefully)
999 people will fix their code. We need the symbol
1000 type to be correct to distinguish function pointer
1001 initialisation from other pointer initialisations. */
1002 const char *sym_name;
1003
1004 if (h != NULL)
1005 sym_name = h->root.root.string;
1006 else
1007 {
1008 Elf_Internal_Shdr *symtab_hdr;
1009 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1010 sym_name = bfd_elf_sym_name (input_section->owner,
1011 symtab_hdr,
1012 sym,
1013 sym_sec);
1014 }
1015 (*_bfd_error_handler) (_("warning: call to non-function"
1016 " symbol %s defined in %B"),
1017 sym_sec->owner, sym_name);
1018
1019 }
1020 }
1021 }
1022
1023 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1024 || (sym_type != STT_FUNC
1025 && !(branch || hint)
1026 && (sym_sec->flags & SEC_CODE) == 0))
1027 return no_stub;
1028
1029 /* Usually, symbols in non-overlay sections don't need stubs. */
1030 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1031 && !htab->params->non_overlay_stubs)
1032 return ret;
1033
1034 /* A reference from some other section to a symbol in an overlay
1035 section needs a stub. */
1036 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1037 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1038 {
1039 unsigned int lrlive = 0;
1040 if (branch)
1041 lrlive = (contents[1] & 0x70) >> 4;
1042
1043 if (!lrlive && (call || sym_type == STT_FUNC))
1044 ret = call_ovl_stub;
1045 else
1046 ret = br000_ovl_stub + lrlive;
1047 }
1048
1049 /* If this insn isn't a branch then we are possibly taking the
1050 address of a function and passing it out somehow. Soft-icache code
1051 always generates inline code to do indirect branches. */
1052 if (!(branch || hint)
1053 && sym_type == STT_FUNC
1054 && htab->params->ovly_flavour != ovly_soft_icache)
1055 ret = nonovl_stub;
1056
1057 return ret;
1058 }
1059
1060 static bfd_boolean
1061 count_stub (struct spu_link_hash_table *htab,
1062 bfd *ibfd,
1063 asection *isec,
1064 enum _stub_type stub_type,
1065 struct elf_link_hash_entry *h,
1066 const Elf_Internal_Rela *irela)
1067 {
1068 unsigned int ovl = 0;
1069 struct got_entry *g, **head;
1070 bfd_vma addend;
1071
1072 /* If this instruction is a branch or call, we need a stub
1073 for it. One stub per function per overlay.
1074 If it isn't a branch, then we are taking the address of
1075 this function so need a stub in the non-overlay area
1076 for it. One stub per function. */
1077 if (stub_type != nonovl_stub)
1078 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1079
1080 if (h != NULL)
1081 head = &h->got.glist;
1082 else
1083 {
1084 if (elf_local_got_ents (ibfd) == NULL)
1085 {
1086 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1087 * sizeof (*elf_local_got_ents (ibfd)));
1088 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1089 if (elf_local_got_ents (ibfd) == NULL)
1090 return FALSE;
1091 }
1092 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1093 }
1094
1095 if (htab->params->ovly_flavour == ovly_soft_icache)
1096 {
1097 htab->stub_count[ovl] += 1;
1098 return TRUE;
1099 }
1100
1101 addend = 0;
1102 if (irela != NULL)
1103 addend = irela->r_addend;
1104
1105 if (ovl == 0)
1106 {
1107 struct got_entry *gnext;
1108
1109 for (g = *head; g != NULL; g = g->next)
1110 if (g->addend == addend && g->ovl == 0)
1111 break;
1112
1113 if (g == NULL)
1114 {
1115 /* Need a new non-overlay area stub. Zap other stubs. */
1116 for (g = *head; g != NULL; g = gnext)
1117 {
1118 gnext = g->next;
1119 if (g->addend == addend)
1120 {
1121 htab->stub_count[g->ovl] -= 1;
1122 free (g);
1123 }
1124 }
1125 }
1126 }
1127 else
1128 {
1129 for (g = *head; g != NULL; g = g->next)
1130 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1131 break;
1132 }
1133
1134 if (g == NULL)
1135 {
1136 g = bfd_malloc (sizeof *g);
1137 if (g == NULL)
1138 return FALSE;
1139 g->ovl = ovl;
1140 g->addend = addend;
1141 g->stub_addr = (bfd_vma) -1;
1142 g->next = *head;
1143 *head = g;
1144
1145 htab->stub_count[ovl] += 1;
1146 }
1147
1148 return TRUE;
1149 }
1150
1151 /* Support two sizes of overlay stubs, a slower more compact stub of two
1152 intructions, and a faster stub of four instructions.
1153 Soft-icache stubs are four or eight words. */
1154
1155 static unsigned int
1156 ovl_stub_size (struct spu_elf_params *params)
1157 {
1158 return 16 << params->ovly_flavour >> params->compact_stub;
1159 }
1160
1161 static unsigned int
1162 ovl_stub_size_log2 (struct spu_elf_params *params)
1163 {
1164 return 4 + params->ovly_flavour - params->compact_stub;
1165 }
1166
1167 /* Two instruction overlay stubs look like:
1168
1169 brsl $75,__ovly_load
1170 .word target_ovl_and_address
1171
1172 ovl_and_address is a word with the overlay number in the top 14 bits
1173 and local store address in the bottom 18 bits.
1174
1175 Four instruction overlay stubs look like:
1176
1177 ila $78,ovl_number
1178 lnop
1179 ila $79,target_address
1180 br __ovly_load
1181
1182 Software icache stubs are:
1183
1184 .word target_index
1185 .word target_ia;
1186 .word lrlive_branchlocalstoreaddr;
1187 brasl $75,__icache_br_handler
1188 .quad xor_pattern
1189 */
1190
1191 static bfd_boolean
1192 build_stub (struct bfd_link_info *info,
1193 bfd *ibfd,
1194 asection *isec,
1195 enum _stub_type stub_type,
1196 struct elf_link_hash_entry *h,
1197 const Elf_Internal_Rela *irela,
1198 bfd_vma dest,
1199 asection *dest_sec)
1200 {
1201 struct spu_link_hash_table *htab = spu_hash_table (info);
1202 unsigned int ovl, dest_ovl, set_id;
1203 struct got_entry *g, **head;
1204 asection *sec;
1205 bfd_vma addend, from, to, br_dest, patt;
1206 unsigned int lrlive;
1207
1208 ovl = 0;
1209 if (stub_type != nonovl_stub)
1210 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1211
1212 if (h != NULL)
1213 head = &h->got.glist;
1214 else
1215 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1216
1217 addend = 0;
1218 if (irela != NULL)
1219 addend = irela->r_addend;
1220
1221 if (htab->params->ovly_flavour == ovly_soft_icache)
1222 {
1223 g = bfd_malloc (sizeof *g);
1224 if (g == NULL)
1225 return FALSE;
1226 g->ovl = ovl;
1227 g->br_addr = 0;
1228 if (irela != NULL)
1229 g->br_addr = (irela->r_offset
1230 + isec->output_offset
1231 + isec->output_section->vma);
1232 g->next = *head;
1233 *head = g;
1234 }
1235 else
1236 {
1237 for (g = *head; g != NULL; g = g->next)
1238 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1239 break;
1240 if (g == NULL)
1241 abort ();
1242
1243 if (g->ovl == 0 && ovl != 0)
1244 return TRUE;
1245
1246 if (g->stub_addr != (bfd_vma) -1)
1247 return TRUE;
1248 }
1249
1250 sec = htab->stub_sec[ovl];
1251 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1252 from = sec->size + sec->output_offset + sec->output_section->vma;
1253 g->stub_addr = from;
1254 to = (htab->ovly_entry[0]->root.u.def.value
1255 + htab->ovly_entry[0]->root.u.def.section->output_offset
1256 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1257
1258 if (((dest | to | from) & 3) != 0)
1259 {
1260 htab->stub_err = 1;
1261 return FALSE;
1262 }
1263 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1264
1265 if (htab->params->ovly_flavour == ovly_normal
1266 && !htab->params->compact_stub)
1267 {
1268 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1269 sec->contents + sec->size);
1270 bfd_put_32 (sec->owner, LNOP,
1271 sec->contents + sec->size + 4);
1272 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1273 sec->contents + sec->size + 8);
1274 if (!BRA_STUBS)
1275 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1276 sec->contents + sec->size + 12);
1277 else
1278 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1279 sec->contents + sec->size + 12);
1280 }
1281 else if (htab->params->ovly_flavour == ovly_normal
1282 && htab->params->compact_stub)
1283 {
1284 if (!BRA_STUBS)
1285 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1286 sec->contents + sec->size);
1287 else
1288 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1289 sec->contents + sec->size);
1290 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1291 sec->contents + sec->size + 4);
1292 }
1293 else if (htab->params->ovly_flavour == ovly_soft_icache
1294 && htab->params->compact_stub)
1295 {
1296 lrlive = 0;
1297 if (stub_type == nonovl_stub)
1298 ;
1299 else if (stub_type == call_ovl_stub)
1300 /* A brsl makes lr live and *(*sp+16) is live.
1301 Tail calls have the same liveness. */
1302 lrlive = 5;
1303 else if (!htab->params->lrlive_analysis)
1304 /* Assume stack frame and lr save. */
1305 lrlive = 1;
1306 else if (irela != NULL)
1307 {
1308 /* Analyse branch instructions. */
1309 struct function_info *caller;
1310 bfd_vma off;
1311
1312 caller = find_function (isec, irela->r_offset, info);
1313 if (caller->start == NULL)
1314 off = irela->r_offset;
1315 else
1316 {
1317 struct function_info *found = NULL;
1318
1319 /* Find the earliest piece of this function that
1320 has frame adjusting instructions. We might
1321 see dynamic frame adjustment (eg. for alloca)
1322 in some later piece, but functions using
1323 alloca always set up a frame earlier. Frame
1324 setup instructions are always in one piece. */
1325 if (caller->lr_store != (bfd_vma) -1
1326 || caller->sp_adjust != (bfd_vma) -1)
1327 found = caller;
1328 while (caller->start != NULL)
1329 {
1330 caller = caller->start;
1331 if (caller->lr_store != (bfd_vma) -1
1332 || caller->sp_adjust != (bfd_vma) -1)
1333 found = caller;
1334 }
1335 if (found != NULL)
1336 caller = found;
1337 off = (bfd_vma) -1;
1338 }
1339
1340 if (off > caller->sp_adjust)
1341 {
1342 if (off > caller->lr_store)
1343 /* Only *(*sp+16) is live. */
1344 lrlive = 1;
1345 else
1346 /* If no lr save, then we must be in a
1347 leaf function with a frame.
1348 lr is still live. */
1349 lrlive = 4;
1350 }
1351 else if (off > caller->lr_store)
1352 {
1353 /* Between lr save and stack adjust. */
1354 lrlive = 3;
1355 /* This should never happen since prologues won't
1356 be split here. */
1357 BFD_ASSERT (0);
1358 }
1359 else
1360 /* On entry to function. */
1361 lrlive = 5;
1362
1363 if (stub_type != br000_ovl_stub
1364 && lrlive != stub_type - br000_ovl_stub)
1365 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1366 "from analysis (%u)\n"),
1367 isec, irela->r_offset, lrlive,
1368 stub_type - br000_ovl_stub);
1369 }
1370
1371 /* If given lrlive info via .brinfo, use it. */
1372 if (stub_type > br000_ovl_stub)
1373 lrlive = stub_type - br000_ovl_stub;
1374
1375 if (ovl == 0)
1376 to = (htab->ovly_entry[1]->root.u.def.value
1377 + htab->ovly_entry[1]->root.u.def.section->output_offset
1378 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1379
1380 /* The branch that uses this stub goes to stub_addr + 4. We'll
1381 set up an xor pattern that can be used by the icache manager
1382 to modify this branch to go directly to its destination. */
1383 g->stub_addr += 4;
1384 br_dest = g->stub_addr;
1385 if (irela == NULL)
1386 {
1387 /* Except in the case of _SPUEAR_ stubs, the branch in
1388 question is the one in the stub itself. */
1389 BFD_ASSERT (stub_type == nonovl_stub);
1390 g->br_addr = g->stub_addr;
1391 br_dest = to;
1392 }
1393
1394 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1395 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1396 sec->contents + sec->size);
1397 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1398 sec->contents + sec->size + 4);
1399 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1400 sec->contents + sec->size + 8);
1401 patt = dest ^ br_dest;
1402 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1403 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1404 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1405 sec->contents + sec->size + 12);
1406
1407 if (ovl == 0)
1408 /* Extra space for linked list entries. */
1409 sec->size += 16;
1410 }
1411 else
1412 abort ();
1413
1414 sec->size += ovl_stub_size (htab->params);
1415
1416 if (htab->params->emit_stub_syms)
1417 {
1418 size_t len;
1419 char *name;
1420 int add;
1421
1422 len = 8 + sizeof (".ovl_call.") - 1;
1423 if (h != NULL)
1424 len += strlen (h->root.root.string);
1425 else
1426 len += 8 + 1 + 8;
1427 add = 0;
1428 if (irela != NULL)
1429 add = (int) irela->r_addend & 0xffffffff;
1430 if (add != 0)
1431 len += 1 + 8;
1432 name = bfd_malloc (len);
1433 if (name == NULL)
1434 return FALSE;
1435
1436 sprintf (name, "%08x.ovl_call.", g->ovl);
1437 if (h != NULL)
1438 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1439 else
1440 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1441 dest_sec->id & 0xffffffff,
1442 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1443 if (add != 0)
1444 sprintf (name + len - 9, "+%x", add);
1445
1446 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1447 free (name);
1448 if (h == NULL)
1449 return FALSE;
1450 if (h->root.type == bfd_link_hash_new)
1451 {
1452 h->root.type = bfd_link_hash_defined;
1453 h->root.u.def.section = sec;
1454 h->size = ovl_stub_size (htab->params);
1455 h->root.u.def.value = sec->size - h->size;
1456 h->type = STT_FUNC;
1457 h->ref_regular = 1;
1458 h->def_regular = 1;
1459 h->ref_regular_nonweak = 1;
1460 h->forced_local = 1;
1461 h->non_elf = 0;
1462 }
1463 }
1464
1465 return TRUE;
1466 }
1467
1468 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1469 symbols. */
1470
1471 static bfd_boolean
1472 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1473 {
1474 /* Symbols starting with _SPUEAR_ need a stub because they may be
1475 invoked by the PPU. */
1476 struct bfd_link_info *info = inf;
1477 struct spu_link_hash_table *htab = spu_hash_table (info);
1478 asection *sym_sec;
1479
1480 if ((h->root.type == bfd_link_hash_defined
1481 || h->root.type == bfd_link_hash_defweak)
1482 && h->def_regular
1483 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1484 && (sym_sec = h->root.u.def.section) != NULL
1485 && sym_sec->output_section != bfd_abs_section_ptr
1486 && spu_elf_section_data (sym_sec->output_section) != NULL
1487 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1488 || htab->params->non_overlay_stubs))
1489 {
1490 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1491 }
1492
1493 return TRUE;
1494 }
1495
1496 static bfd_boolean
1497 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1498 {
1499 /* Symbols starting with _SPUEAR_ need a stub because they may be
1500 invoked by the PPU. */
1501 struct bfd_link_info *info = inf;
1502 struct spu_link_hash_table *htab = spu_hash_table (info);
1503 asection *sym_sec;
1504
1505 if ((h->root.type == bfd_link_hash_defined
1506 || h->root.type == bfd_link_hash_defweak)
1507 && h->def_regular
1508 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1509 && (sym_sec = h->root.u.def.section) != NULL
1510 && sym_sec->output_section != bfd_abs_section_ptr
1511 && spu_elf_section_data (sym_sec->output_section) != NULL
1512 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1513 || htab->params->non_overlay_stubs))
1514 {
1515 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1516 h->root.u.def.value, sym_sec);
1517 }
1518
1519 return TRUE;
1520 }
1521
1522 /* Size or build stubs. */
1523
1524 static bfd_boolean
1525 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1526 {
1527 struct spu_link_hash_table *htab = spu_hash_table (info);
1528 bfd *ibfd;
1529
1530 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1531 {
1532 extern const bfd_target bfd_elf32_spu_vec;
1533 Elf_Internal_Shdr *symtab_hdr;
1534 asection *isec;
1535 Elf_Internal_Sym *local_syms = NULL;
1536
1537 if (ibfd->xvec != &bfd_elf32_spu_vec)
1538 continue;
1539
1540 /* We'll need the symbol table in a second. */
1541 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1542 if (symtab_hdr->sh_info == 0)
1543 continue;
1544
1545 /* Walk over each section attached to the input bfd. */
1546 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1547 {
1548 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1549
1550 /* If there aren't any relocs, then there's nothing more to do. */
1551 if ((isec->flags & SEC_RELOC) == 0
1552 || isec->reloc_count == 0)
1553 continue;
1554
1555 if (!maybe_needs_stubs (isec))
1556 continue;
1557
1558 /* Get the relocs. */
1559 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1560 info->keep_memory);
1561 if (internal_relocs == NULL)
1562 goto error_ret_free_local;
1563
1564 /* Now examine each relocation. */
1565 irela = internal_relocs;
1566 irelaend = irela + isec->reloc_count;
1567 for (; irela < irelaend; irela++)
1568 {
1569 enum elf_spu_reloc_type r_type;
1570 unsigned int r_indx;
1571 asection *sym_sec;
1572 Elf_Internal_Sym *sym;
1573 struct elf_link_hash_entry *h;
1574 enum _stub_type stub_type;
1575
1576 r_type = ELF32_R_TYPE (irela->r_info);
1577 r_indx = ELF32_R_SYM (irela->r_info);
1578
1579 if (r_type >= R_SPU_max)
1580 {
1581 bfd_set_error (bfd_error_bad_value);
1582 error_ret_free_internal:
1583 if (elf_section_data (isec)->relocs != internal_relocs)
1584 free (internal_relocs);
1585 error_ret_free_local:
1586 if (local_syms != NULL
1587 && (symtab_hdr->contents
1588 != (unsigned char *) local_syms))
1589 free (local_syms);
1590 return FALSE;
1591 }
1592
1593 /* Determine the reloc target section. */
1594 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1595 goto error_ret_free_internal;
1596
1597 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1598 NULL, info);
1599 if (stub_type == no_stub)
1600 continue;
1601 else if (stub_type == stub_error)
1602 goto error_ret_free_internal;
1603
1604 if (htab->stub_count == NULL)
1605 {
1606 bfd_size_type amt;
1607 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1608 htab->stub_count = bfd_zmalloc (amt);
1609 if (htab->stub_count == NULL)
1610 goto error_ret_free_internal;
1611 }
1612
1613 if (!build)
1614 {
1615 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1616 goto error_ret_free_internal;
1617 }
1618 else
1619 {
1620 bfd_vma dest;
1621
1622 if (h != NULL)
1623 dest = h->root.u.def.value;
1624 else
1625 dest = sym->st_value;
1626 dest += irela->r_addend;
1627 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1628 dest, sym_sec))
1629 goto error_ret_free_internal;
1630 }
1631 }
1632
1633 /* We're done with the internal relocs, free them. */
1634 if (elf_section_data (isec)->relocs != internal_relocs)
1635 free (internal_relocs);
1636 }
1637
1638 if (local_syms != NULL
1639 && symtab_hdr->contents != (unsigned char *) local_syms)
1640 {
1641 if (!info->keep_memory)
1642 free (local_syms);
1643 else
1644 symtab_hdr->contents = (unsigned char *) local_syms;
1645 }
1646 }
1647
1648 return TRUE;
1649 }
1650
1651 /* Allocate space for overlay call and return stubs.
1652 Return 0 on error, 1 if no overlays, 2 otherwise. */
1653
1654 int
1655 spu_elf_size_stubs (struct bfd_link_info *info)
1656 {
1657 struct spu_link_hash_table *htab;
1658 bfd *ibfd;
1659 bfd_size_type amt;
1660 flagword flags;
1661 unsigned int i;
1662 asection *stub;
1663
1664 if (!process_stubs (info, FALSE))
1665 return 0;
1666
1667 htab = spu_hash_table (info);
1668 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1669 if (htab->stub_err)
1670 return 0;
1671
1672 ibfd = info->input_bfds;
1673 if (htab->stub_count != NULL)
1674 {
1675 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1676 htab->stub_sec = bfd_zmalloc (amt);
1677 if (htab->stub_sec == NULL)
1678 return 0;
1679
1680 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1681 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1682 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1683 htab->stub_sec[0] = stub;
1684 if (stub == NULL
1685 || !bfd_set_section_alignment (ibfd, stub,
1686 ovl_stub_size_log2 (htab->params)))
1687 return 0;
1688 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1689 if (htab->params->ovly_flavour == ovly_soft_icache)
1690 /* Extra space for linked list entries. */
1691 stub->size += htab->stub_count[0] * 16;
1692
1693 for (i = 0; i < htab->num_overlays; ++i)
1694 {
1695 asection *osec = htab->ovl_sec[i];
1696 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1697 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1698 htab->stub_sec[ovl] = stub;
1699 if (stub == NULL
1700 || !bfd_set_section_alignment (ibfd, stub,
1701 ovl_stub_size_log2 (htab->params)))
1702 return 0;
1703 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1704 }
1705 }
1706
1707 if (htab->params->ovly_flavour == ovly_soft_icache)
1708 {
1709 /* Space for icache manager tables.
1710 a) Tag array, one quadword per cache line.
1711 b) Rewrite "to" list, one quadword per cache line.
1712 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1713 a power-of-two number of full quadwords) per cache line. */
1714
1715 flags = SEC_ALLOC;
1716 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1717 if (htab->ovtab == NULL
1718 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1719 return 0;
1720
1721 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1722 << htab->num_lines_log2;
1723
1724 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1725 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1726 if (htab->init == NULL
1727 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1728 return 0;
1729
1730 htab->init->size = 16;
1731 }
1732 else if (htab->stub_count == NULL)
1733 return 1;
1734 else
1735 {
1736 /* htab->ovtab consists of two arrays.
1737 . struct {
1738 . u32 vma;
1739 . u32 size;
1740 . u32 file_off;
1741 . u32 buf;
1742 . } _ovly_table[];
1743 .
1744 . struct {
1745 . u32 mapped;
1746 . } _ovly_buf_table[];
1747 . */
1748
1749 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1750 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1751 if (htab->ovtab == NULL
1752 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1753 return 0;
1754
1755 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1756 }
1757
1758 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1759 if (htab->toe == NULL
1760 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1761 return 0;
1762 htab->toe->size = 16;
1763
1764 return 2;
1765 }
1766
1767 /* Called from ld to place overlay manager data sections. This is done
1768 after the overlay manager itself is loaded, mainly so that the
1769 linker's htab->init section is placed after any other .ovl.init
1770 sections. */
1771
1772 void
1773 spu_elf_place_overlay_data (struct bfd_link_info *info)
1774 {
1775 struct spu_link_hash_table *htab = spu_hash_table (info);
1776 unsigned int i;
1777
1778 if (htab->stub_sec != NULL)
1779 {
1780 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1781
1782 for (i = 0; i < htab->num_overlays; ++i)
1783 {
1784 asection *osec = htab->ovl_sec[i];
1785 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1786 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1787 }
1788 }
1789
1790 if (htab->params->ovly_flavour == ovly_soft_icache)
1791 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1792
1793 if (htab->ovtab != NULL)
1794 {
1795 const char *ovout = ".data";
1796 if (htab->params->ovly_flavour == ovly_soft_icache)
1797 ovout = ".bss";
1798 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1799 }
1800
1801 if (htab->toe != NULL)
1802 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1803 }
1804
1805 /* Functions to handle embedded spu_ovl.o object. */
1806
1807 static void *
1808 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1809 {
1810 return stream;
1811 }
1812
1813 static file_ptr
1814 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1815 void *stream,
1816 void *buf,
1817 file_ptr nbytes,
1818 file_ptr offset)
1819 {
1820 struct _ovl_stream *os;
1821 size_t count;
1822 size_t max;
1823
1824 os = (struct _ovl_stream *) stream;
1825 max = (const char *) os->end - (const char *) os->start;
1826
1827 if ((ufile_ptr) offset >= max)
1828 return 0;
1829
1830 count = nbytes;
1831 if (count > max - offset)
1832 count = max - offset;
1833
1834 memcpy (buf, (const char *) os->start + offset, count);
1835 return count;
1836 }
1837
1838 bfd_boolean
1839 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1840 {
1841 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1842 "elf32-spu",
1843 ovl_mgr_open,
1844 (void *) stream,
1845 ovl_mgr_pread,
1846 NULL,
1847 NULL);
1848 return *ovl_bfd != NULL;
1849 }
1850
1851 static unsigned int
1852 overlay_index (asection *sec)
1853 {
1854 if (sec == NULL
1855 || sec->output_section == bfd_abs_section_ptr)
1856 return 0;
1857 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1858 }
1859
1860 /* Define an STT_OBJECT symbol. */
1861
1862 static struct elf_link_hash_entry *
1863 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1864 {
1865 struct elf_link_hash_entry *h;
1866
1867 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1868 if (h == NULL)
1869 return NULL;
1870
1871 if (h->root.type != bfd_link_hash_defined
1872 || !h->def_regular)
1873 {
1874 h->root.type = bfd_link_hash_defined;
1875 h->root.u.def.section = htab->ovtab;
1876 h->type = STT_OBJECT;
1877 h->ref_regular = 1;
1878 h->def_regular = 1;
1879 h->ref_regular_nonweak = 1;
1880 h->non_elf = 0;
1881 }
1882 else if (h->root.u.def.section->owner != NULL)
1883 {
1884 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1885 h->root.u.def.section->owner,
1886 h->root.root.string);
1887 bfd_set_error (bfd_error_bad_value);
1888 return NULL;
1889 }
1890 else
1891 {
1892 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1893 h->root.root.string);
1894 bfd_set_error (bfd_error_bad_value);
1895 return NULL;
1896 }
1897
1898 return h;
1899 }
1900
1901 /* Fill in all stubs and the overlay tables. */
1902
1903 static bfd_boolean
1904 spu_elf_build_stubs (struct bfd_link_info *info)
1905 {
1906 struct spu_link_hash_table *htab = spu_hash_table (info);
1907 struct elf_link_hash_entry *h;
1908 bfd_byte *p;
1909 asection *s;
1910 bfd *obfd;
1911 unsigned int i;
1912
1913 if (htab->num_overlays != 0)
1914 {
1915 for (i = 0; i < 2; i++)
1916 {
1917 h = htab->ovly_entry[i];
1918 if (h != NULL
1919 && (h->root.type == bfd_link_hash_defined
1920 || h->root.type == bfd_link_hash_defweak)
1921 && h->def_regular)
1922 {
1923 s = h->root.u.def.section->output_section;
1924 if (spu_elf_section_data (s)->u.o.ovl_index)
1925 {
1926 (*_bfd_error_handler) (_("%s in overlay section"),
1927 h->root.root.string);
1928 bfd_set_error (bfd_error_bad_value);
1929 return FALSE;
1930 }
1931 }
1932 }
1933 }
1934
1935 if (htab->stub_sec != NULL)
1936 {
1937 for (i = 0; i <= htab->num_overlays; i++)
1938 if (htab->stub_sec[i]->size != 0)
1939 {
1940 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1941 htab->stub_sec[i]->size);
1942 if (htab->stub_sec[i]->contents == NULL)
1943 return FALSE;
1944 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1945 htab->stub_sec[i]->size = 0;
1946 }
1947
1948 /* Fill in all the stubs. */
1949 process_stubs (info, TRUE);
1950 if (!htab->stub_err)
1951 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1952
1953 if (htab->stub_err)
1954 {
1955 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1956 bfd_set_error (bfd_error_bad_value);
1957 return FALSE;
1958 }
1959
1960 for (i = 0; i <= htab->num_overlays; i++)
1961 {
1962 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1963 {
1964 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1965 bfd_set_error (bfd_error_bad_value);
1966 return FALSE;
1967 }
1968 htab->stub_sec[i]->rawsize = 0;
1969 }
1970 }
1971
1972 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1973 return TRUE;
1974
1975 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1976 if (htab->ovtab->contents == NULL)
1977 return FALSE;
1978
1979 p = htab->ovtab->contents;
1980 if (htab->params->ovly_flavour == ovly_soft_icache)
1981 {
1982 bfd_vma off;
1983
1984 h = define_ovtab_symbol (htab, "__icache_tag_array");
1985 if (h == NULL)
1986 return FALSE;
1987 h->root.u.def.value = 0;
1988 h->size = 16 << htab->num_lines_log2;
1989 off = h->size;
1990
1991 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1992 if (h == NULL)
1993 return FALSE;
1994 h->root.u.def.value = 16 << htab->num_lines_log2;
1995 h->root.u.def.section = bfd_abs_section_ptr;
1996
1997 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1998 if (h == NULL)
1999 return FALSE;
2000 h->root.u.def.value = off;
2001 h->size = 16 << htab->num_lines_log2;
2002 off += h->size;
2003
2004 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2005 if (h == NULL)
2006 return FALSE;
2007 h->root.u.def.value = 16 << htab->num_lines_log2;
2008 h->root.u.def.section = bfd_abs_section_ptr;
2009
2010 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2011 if (h == NULL)
2012 return FALSE;
2013 h->root.u.def.value = off;
2014 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2015 off += h->size;
2016
2017 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2018 if (h == NULL)
2019 return FALSE;
2020 h->root.u.def.value = 16 << (htab->fromelem_size_log2
2021 + htab->num_lines_log2);
2022 h->root.u.def.section = bfd_abs_section_ptr;
2023
2024 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2025 if (h == NULL)
2026 return FALSE;
2027 h->root.u.def.value = htab->fromelem_size_log2;
2028 h->root.u.def.section = bfd_abs_section_ptr;
2029
2030 h = define_ovtab_symbol (htab, "__icache_base");
2031 if (h == NULL)
2032 return FALSE;
2033 h->root.u.def.value = htab->ovl_sec[0]->vma;
2034 h->root.u.def.section = bfd_abs_section_ptr;
2035 h->size = htab->num_buf << htab->line_size_log2;
2036
2037 h = define_ovtab_symbol (htab, "__icache_linesize");
2038 if (h == NULL)
2039 return FALSE;
2040 h->root.u.def.value = 1 << htab->line_size_log2;
2041 h->root.u.def.section = bfd_abs_section_ptr;
2042
2043 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2044 if (h == NULL)
2045 return FALSE;
2046 h->root.u.def.value = htab->line_size_log2;
2047 h->root.u.def.section = bfd_abs_section_ptr;
2048
2049 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2050 if (h == NULL)
2051 return FALSE;
2052 h->root.u.def.value = -htab->line_size_log2;
2053 h->root.u.def.section = bfd_abs_section_ptr;
2054
2055 h = define_ovtab_symbol (htab, "__icache_cachesize");
2056 if (h == NULL)
2057 return FALSE;
2058 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2059 h->root.u.def.section = bfd_abs_section_ptr;
2060
2061 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2062 if (h == NULL)
2063 return FALSE;
2064 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2065 h->root.u.def.section = bfd_abs_section_ptr;
2066
2067 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2068 if (h == NULL)
2069 return FALSE;
2070 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2071 h->root.u.def.section = bfd_abs_section_ptr;
2072
2073 if (htab->init != NULL && htab->init->size != 0)
2074 {
2075 htab->init->contents = bfd_zalloc (htab->init->owner,
2076 htab->init->size);
2077 if (htab->init->contents == NULL)
2078 return FALSE;
2079
2080 h = define_ovtab_symbol (htab, "__icache_fileoff");
2081 if (h == NULL)
2082 return FALSE;
2083 h->root.u.def.value = 0;
2084 h->root.u.def.section = htab->init;
2085 h->size = 8;
2086 }
2087 }
2088 else
2089 {
2090 /* Write out _ovly_table. */
2091 /* set low bit of .size to mark non-overlay area as present. */
2092 p[7] = 1;
2093 obfd = htab->ovtab->output_section->owner;
2094 for (s = obfd->sections; s != NULL; s = s->next)
2095 {
2096 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2097
2098 if (ovl_index != 0)
2099 {
2100 unsigned long off = ovl_index * 16;
2101 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2102
2103 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2104 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2105 p + off + 4);
2106 /* file_off written later in spu_elf_modify_program_headers. */
2107 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2108 }
2109 }
2110
2111 h = define_ovtab_symbol (htab, "_ovly_table");
2112 if (h == NULL)
2113 return FALSE;
2114 h->root.u.def.value = 16;
2115 h->size = htab->num_overlays * 16;
2116
2117 h = define_ovtab_symbol (htab, "_ovly_table_end");
2118 if (h == NULL)
2119 return FALSE;
2120 h->root.u.def.value = htab->num_overlays * 16 + 16;
2121 h->size = 0;
2122
2123 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2124 if (h == NULL)
2125 return FALSE;
2126 h->root.u.def.value = htab->num_overlays * 16 + 16;
2127 h->size = htab->num_buf * 4;
2128
2129 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2130 if (h == NULL)
2131 return FALSE;
2132 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2133 h->size = 0;
2134 }
2135
2136 h = define_ovtab_symbol (htab, "_EAR_");
2137 if (h == NULL)
2138 return FALSE;
2139 h->root.u.def.section = htab->toe;
2140 h->root.u.def.value = 0;
2141 h->size = 16;
2142
2143 return TRUE;
2144 }
2145
2146 /* Check that all loadable section VMAs lie in the range
2147 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2148
2149 asection *
2150 spu_elf_check_vma (struct bfd_link_info *info)
2151 {
2152 struct elf_segment_map *m;
2153 unsigned int i;
2154 struct spu_link_hash_table *htab = spu_hash_table (info);
2155 bfd *abfd = info->output_bfd;
2156 bfd_vma hi = htab->params->local_store_hi;
2157 bfd_vma lo = htab->params->local_store_lo;
2158
2159 htab->local_store = hi + 1 - lo;
2160
2161 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2162 if (m->p_type == PT_LOAD)
2163 for (i = 0; i < m->count; i++)
2164 if (m->sections[i]->size != 0
2165 && (m->sections[i]->vma < lo
2166 || m->sections[i]->vma > hi
2167 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2168 return m->sections[i];
2169
2170 return NULL;
2171 }
2172
2173 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2174 Search for stack adjusting insns, and return the sp delta.
2175 If a store of lr is found save the instruction offset to *LR_STORE.
2176 If a stack adjusting instruction is found, save that offset to
2177 *SP_ADJUST. */
2178
2179 static int
2180 find_function_stack_adjust (asection *sec,
2181 bfd_vma offset,
2182 bfd_vma *lr_store,
2183 bfd_vma *sp_adjust)
2184 {
2185 int reg[128];
2186
2187 memset (reg, 0, sizeof (reg));
2188 for ( ; offset + 4 <= sec->size; offset += 4)
2189 {
2190 unsigned char buf[4];
2191 int rt, ra;
2192 int imm;
2193
2194 /* Assume no relocs on stack adjusing insns. */
2195 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2196 break;
2197
2198 rt = buf[3] & 0x7f;
2199 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2200
2201 if (buf[0] == 0x24 /* stqd */)
2202 {
2203 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2204 *lr_store = offset;
2205 continue;
2206 }
2207
2208 /* Partly decoded immediate field. */
2209 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2210
2211 if (buf[0] == 0x1c /* ai */)
2212 {
2213 imm >>= 7;
2214 imm = (imm ^ 0x200) - 0x200;
2215 reg[rt] = reg[ra] + imm;
2216
2217 if (rt == 1 /* sp */)
2218 {
2219 if (reg[rt] > 0)
2220 break;
2221 *sp_adjust = offset;
2222 return reg[rt];
2223 }
2224 }
2225 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2226 {
2227 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2228
2229 reg[rt] = reg[ra] + reg[rb];
2230 if (rt == 1)
2231 {
2232 if (reg[rt] > 0)
2233 break;
2234 *sp_adjust = offset;
2235 return reg[rt];
2236 }
2237 }
2238 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2239 {
2240 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2241
2242 reg[rt] = reg[rb] - reg[ra];
2243 if (rt == 1)
2244 {
2245 if (reg[rt] > 0)
2246 break;
2247 *sp_adjust = offset;
2248 return reg[rt];
2249 }
2250 }
2251 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2252 {
2253 if (buf[0] >= 0x42 /* ila */)
2254 imm |= (buf[0] & 1) << 17;
2255 else
2256 {
2257 imm &= 0xffff;
2258
2259 if (buf[0] == 0x40 /* il */)
2260 {
2261 if ((buf[1] & 0x80) == 0)
2262 continue;
2263 imm = (imm ^ 0x8000) - 0x8000;
2264 }
2265 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2266 imm <<= 16;
2267 }
2268 reg[rt] = imm;
2269 continue;
2270 }
2271 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2272 {
2273 reg[rt] |= imm & 0xffff;
2274 continue;
2275 }
2276 else if (buf[0] == 0x04 /* ori */)
2277 {
2278 imm >>= 7;
2279 imm = (imm ^ 0x200) - 0x200;
2280 reg[rt] = reg[ra] | imm;
2281 continue;
2282 }
2283 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2284 {
2285 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2286 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2287 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2288 | ((imm & 0x1000) ? 0x000000ff : 0));
2289 continue;
2290 }
2291 else if (buf[0] == 0x16 /* andbi */)
2292 {
2293 imm >>= 7;
2294 imm &= 0xff;
2295 imm |= imm << 8;
2296 imm |= imm << 16;
2297 reg[rt] = reg[ra] & imm;
2298 continue;
2299 }
2300 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2301 {
2302 /* Used in pic reg load. Say rt is trashed. Won't be used
2303 in stack adjust, but we need to continue past this branch. */
2304 reg[rt] = 0;
2305 continue;
2306 }
2307 else if (is_branch (buf) || is_indirect_branch (buf))
2308 /* If we hit a branch then we must be out of the prologue. */
2309 break;
2310 }
2311
2312 return 0;
2313 }
2314
2315 /* qsort predicate to sort symbols by section and value. */
2316
2317 static Elf_Internal_Sym *sort_syms_syms;
2318 static asection **sort_syms_psecs;
2319
2320 static int
2321 sort_syms (const void *a, const void *b)
2322 {
2323 Elf_Internal_Sym *const *s1 = a;
2324 Elf_Internal_Sym *const *s2 = b;
2325 asection *sec1,*sec2;
2326 bfd_signed_vma delta;
2327
2328 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2329 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2330
2331 if (sec1 != sec2)
2332 return sec1->index - sec2->index;
2333
2334 delta = (*s1)->st_value - (*s2)->st_value;
2335 if (delta != 0)
2336 return delta < 0 ? -1 : 1;
2337
2338 delta = (*s2)->st_size - (*s1)->st_size;
2339 if (delta != 0)
2340 return delta < 0 ? -1 : 1;
2341
2342 return *s1 < *s2 ? -1 : 1;
2343 }
2344
2345 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2346 entries for section SEC. */
2347
2348 static struct spu_elf_stack_info *
2349 alloc_stack_info (asection *sec, int max_fun)
2350 {
2351 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2352 bfd_size_type amt;
2353
2354 amt = sizeof (struct spu_elf_stack_info);
2355 amt += (max_fun - 1) * sizeof (struct function_info);
2356 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2357 if (sec_data->u.i.stack_info != NULL)
2358 sec_data->u.i.stack_info->max_fun = max_fun;
2359 return sec_data->u.i.stack_info;
2360 }
2361
2362 /* Add a new struct function_info describing a (part of a) function
2363 starting at SYM_H. Keep the array sorted by address. */
2364
2365 static struct function_info *
2366 maybe_insert_function (asection *sec,
2367 void *sym_h,
2368 bfd_boolean global,
2369 bfd_boolean is_func)
2370 {
2371 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2372 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2373 int i;
2374 bfd_vma off, size;
2375
2376 if (sinfo == NULL)
2377 {
2378 sinfo = alloc_stack_info (sec, 20);
2379 if (sinfo == NULL)
2380 return NULL;
2381 }
2382
2383 if (!global)
2384 {
2385 Elf_Internal_Sym *sym = sym_h;
2386 off = sym->st_value;
2387 size = sym->st_size;
2388 }
2389 else
2390 {
2391 struct elf_link_hash_entry *h = sym_h;
2392 off = h->root.u.def.value;
2393 size = h->size;
2394 }
2395
2396 for (i = sinfo->num_fun; --i >= 0; )
2397 if (sinfo->fun[i].lo <= off)
2398 break;
2399
2400 if (i >= 0)
2401 {
2402 /* Don't add another entry for an alias, but do update some
2403 info. */
2404 if (sinfo->fun[i].lo == off)
2405 {
2406 /* Prefer globals over local syms. */
2407 if (global && !sinfo->fun[i].global)
2408 {
2409 sinfo->fun[i].global = TRUE;
2410 sinfo->fun[i].u.h = sym_h;
2411 }
2412 if (is_func)
2413 sinfo->fun[i].is_func = TRUE;
2414 return &sinfo->fun[i];
2415 }
2416 /* Ignore a zero-size symbol inside an existing function. */
2417 else if (sinfo->fun[i].hi > off && size == 0)
2418 return &sinfo->fun[i];
2419 }
2420
2421 if (sinfo->num_fun >= sinfo->max_fun)
2422 {
2423 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2424 bfd_size_type old = amt;
2425
2426 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2427 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2428 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2429 sinfo = bfd_realloc (sinfo, amt);
2430 if (sinfo == NULL)
2431 return NULL;
2432 memset ((char *) sinfo + old, 0, amt - old);
2433 sec_data->u.i.stack_info = sinfo;
2434 }
2435
2436 if (++i < sinfo->num_fun)
2437 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2438 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2439 sinfo->fun[i].is_func = is_func;
2440 sinfo->fun[i].global = global;
2441 sinfo->fun[i].sec = sec;
2442 if (global)
2443 sinfo->fun[i].u.h = sym_h;
2444 else
2445 sinfo->fun[i].u.sym = sym_h;
2446 sinfo->fun[i].lo = off;
2447 sinfo->fun[i].hi = off + size;
2448 sinfo->fun[i].lr_store = -1;
2449 sinfo->fun[i].sp_adjust = -1;
2450 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2451 &sinfo->fun[i].lr_store,
2452 &sinfo->fun[i].sp_adjust);
2453 sinfo->num_fun += 1;
2454 return &sinfo->fun[i];
2455 }
2456
2457 /* Return the name of FUN. */
2458
2459 static const char *
2460 func_name (struct function_info *fun)
2461 {
2462 asection *sec;
2463 bfd *ibfd;
2464 Elf_Internal_Shdr *symtab_hdr;
2465
2466 while (fun->start != NULL)
2467 fun = fun->start;
2468
2469 if (fun->global)
2470 return fun->u.h->root.root.string;
2471
2472 sec = fun->sec;
2473 if (fun->u.sym->st_name == 0)
2474 {
2475 size_t len = strlen (sec->name);
2476 char *name = bfd_malloc (len + 10);
2477 if (name == NULL)
2478 return "(null)";
2479 sprintf (name, "%s+%lx", sec->name,
2480 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2481 return name;
2482 }
2483 ibfd = sec->owner;
2484 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2485 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2486 }
2487
2488 /* Read the instruction at OFF in SEC. Return true iff the instruction
2489 is a nop, lnop, or stop 0 (all zero insn). */
2490
2491 static bfd_boolean
2492 is_nop (asection *sec, bfd_vma off)
2493 {
2494 unsigned char insn[4];
2495
2496 if (off + 4 > sec->size
2497 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2498 return FALSE;
2499 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2500 return TRUE;
2501 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2502 return TRUE;
2503 return FALSE;
2504 }
2505
2506 /* Extend the range of FUN to cover nop padding up to LIMIT.
2507 Return TRUE iff some instruction other than a NOP was found. */
2508
2509 static bfd_boolean
2510 insns_at_end (struct function_info *fun, bfd_vma limit)
2511 {
2512 bfd_vma off = (fun->hi + 3) & -4;
2513
2514 while (off < limit && is_nop (fun->sec, off))
2515 off += 4;
2516 if (off < limit)
2517 {
2518 fun->hi = off;
2519 return TRUE;
2520 }
2521 fun->hi = limit;
2522 return FALSE;
2523 }
2524
2525 /* Check and fix overlapping function ranges. Return TRUE iff there
2526 are gaps in the current info we have about functions in SEC. */
2527
2528 static bfd_boolean
2529 check_function_ranges (asection *sec, struct bfd_link_info *info)
2530 {
2531 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2532 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2533 int i;
2534 bfd_boolean gaps = FALSE;
2535
2536 if (sinfo == NULL)
2537 return FALSE;
2538
2539 for (i = 1; i < sinfo->num_fun; i++)
2540 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2541 {
2542 /* Fix overlapping symbols. */
2543 const char *f1 = func_name (&sinfo->fun[i - 1]);
2544 const char *f2 = func_name (&sinfo->fun[i]);
2545
2546 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2547 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2548 }
2549 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2550 gaps = TRUE;
2551
2552 if (sinfo->num_fun == 0)
2553 gaps = TRUE;
2554 else
2555 {
2556 if (sinfo->fun[0].lo != 0)
2557 gaps = TRUE;
2558 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2559 {
2560 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2561
2562 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2563 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2564 }
2565 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2566 gaps = TRUE;
2567 }
2568 return gaps;
2569 }
2570
2571 /* Search current function info for a function that contains address
2572 OFFSET in section SEC. */
2573
2574 static struct function_info *
2575 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2576 {
2577 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2578 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2579 int lo, hi, mid;
2580
2581 lo = 0;
2582 hi = sinfo->num_fun;
2583 while (lo < hi)
2584 {
2585 mid = (lo + hi) / 2;
2586 if (offset < sinfo->fun[mid].lo)
2587 hi = mid;
2588 else if (offset >= sinfo->fun[mid].hi)
2589 lo = mid + 1;
2590 else
2591 return &sinfo->fun[mid];
2592 }
2593 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2594 sec, offset);
2595 bfd_set_error (bfd_error_bad_value);
2596 return NULL;
2597 }
2598
2599 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2600 if CALLEE was new. If this function return FALSE, CALLEE should
2601 be freed. */
2602
2603 static bfd_boolean
2604 insert_callee (struct function_info *caller, struct call_info *callee)
2605 {
2606 struct call_info **pp, *p;
2607
2608 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2609 if (p->fun == callee->fun)
2610 {
2611 /* Tail calls use less stack than normal calls. Retain entry
2612 for normal call over one for tail call. */
2613 p->is_tail &= callee->is_tail;
2614 if (!p->is_tail)
2615 {
2616 p->fun->start = NULL;
2617 p->fun->is_func = TRUE;
2618 }
2619 p->count += callee->count;
2620 /* Reorder list so most recent call is first. */
2621 *pp = p->next;
2622 p->next = caller->call_list;
2623 caller->call_list = p;
2624 return FALSE;
2625 }
2626 callee->next = caller->call_list;
2627 caller->call_list = callee;
2628 return TRUE;
2629 }
2630
2631 /* Copy CALL and insert the copy into CALLER. */
2632
2633 static bfd_boolean
2634 copy_callee (struct function_info *caller, const struct call_info *call)
2635 {
2636 struct call_info *callee;
2637 callee = bfd_malloc (sizeof (*callee));
2638 if (callee == NULL)
2639 return FALSE;
2640 *callee = *call;
2641 if (!insert_callee (caller, callee))
2642 free (callee);
2643 return TRUE;
2644 }
2645
2646 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2647 overlay stub sections. */
2648
2649 static bfd_boolean
2650 interesting_section (asection *s)
2651 {
2652 return (s->output_section != bfd_abs_section_ptr
2653 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2654 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2655 && s->size != 0);
2656 }
2657
2658 /* Rummage through the relocs for SEC, looking for function calls.
2659 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2660 mark destination symbols on calls as being functions. Also
2661 look at branches, which may be tail calls or go to hot/cold
2662 section part of same function. */
2663
2664 static bfd_boolean
2665 mark_functions_via_relocs (asection *sec,
2666 struct bfd_link_info *info,
2667 int call_tree)
2668 {
2669 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2670 Elf_Internal_Shdr *symtab_hdr;
2671 void *psyms;
2672 unsigned int priority = 0;
2673 static bfd_boolean warned;
2674
2675 if (!interesting_section (sec)
2676 || sec->reloc_count == 0)
2677 return TRUE;
2678
2679 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2680 info->keep_memory);
2681 if (internal_relocs == NULL)
2682 return FALSE;
2683
2684 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2685 psyms = &symtab_hdr->contents;
2686 irela = internal_relocs;
2687 irelaend = irela + sec->reloc_count;
2688 for (; irela < irelaend; irela++)
2689 {
2690 enum elf_spu_reloc_type r_type;
2691 unsigned int r_indx;
2692 asection *sym_sec;
2693 Elf_Internal_Sym *sym;
2694 struct elf_link_hash_entry *h;
2695 bfd_vma val;
2696 bfd_boolean reject, is_call;
2697 struct function_info *caller;
2698 struct call_info *callee;
2699
2700 reject = FALSE;
2701 r_type = ELF32_R_TYPE (irela->r_info);
2702 if (r_type != R_SPU_REL16
2703 && r_type != R_SPU_ADDR16)
2704 {
2705 reject = TRUE;
2706 if (!(call_tree && spu_hash_table (info)->params->auto_overlay))
2707 continue;
2708 }
2709
2710 r_indx = ELF32_R_SYM (irela->r_info);
2711 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2712 return FALSE;
2713
2714 if (sym_sec == NULL
2715 || sym_sec->output_section == bfd_abs_section_ptr)
2716 continue;
2717
2718 is_call = FALSE;
2719 if (!reject)
2720 {
2721 unsigned char insn[4];
2722
2723 if (!bfd_get_section_contents (sec->owner, sec, insn,
2724 irela->r_offset, 4))
2725 return FALSE;
2726 if (is_branch (insn))
2727 {
2728 is_call = (insn[0] & 0xfd) == 0x31;
2729 priority = insn[1] & 0x0f;
2730 priority <<= 8;
2731 priority |= insn[2];
2732 priority <<= 8;
2733 priority |= insn[3];
2734 priority >>= 7;
2735 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2736 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2737 {
2738 if (!warned)
2739 info->callbacks->einfo
2740 (_("%B(%A+0x%v): call to non-code section"
2741 " %B(%A), analysis incomplete\n"),
2742 sec->owner, sec, irela->r_offset,
2743 sym_sec->owner, sym_sec);
2744 warned = TRUE;
2745 continue;
2746 }
2747 }
2748 else
2749 {
2750 reject = TRUE;
2751 if (!(call_tree && spu_hash_table (info)->params->auto_overlay)
2752 || is_hint (insn))
2753 continue;
2754 }
2755 }
2756
2757 if (reject)
2758 {
2759 /* For --auto-overlay, count possible stubs we need for
2760 function pointer references. */
2761 unsigned int sym_type;
2762 if (h)
2763 sym_type = h->type;
2764 else
2765 sym_type = ELF_ST_TYPE (sym->st_info);
2766 if (sym_type == STT_FUNC)
2767 spu_hash_table (info)->non_ovly_stub += 1;
2768 continue;
2769 }
2770
2771 if (h)
2772 val = h->root.u.def.value;
2773 else
2774 val = sym->st_value;
2775 val += irela->r_addend;
2776
2777 if (!call_tree)
2778 {
2779 struct function_info *fun;
2780
2781 if (irela->r_addend != 0)
2782 {
2783 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2784 if (fake == NULL)
2785 return FALSE;
2786 fake->st_value = val;
2787 fake->st_shndx
2788 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2789 sym = fake;
2790 }
2791 if (sym)
2792 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2793 else
2794 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2795 if (fun == NULL)
2796 return FALSE;
2797 if (irela->r_addend != 0
2798 && fun->u.sym != sym)
2799 free (sym);
2800 continue;
2801 }
2802
2803 caller = find_function (sec, irela->r_offset, info);
2804 if (caller == NULL)
2805 return FALSE;
2806 callee = bfd_malloc (sizeof *callee);
2807 if (callee == NULL)
2808 return FALSE;
2809
2810 callee->fun = find_function (sym_sec, val, info);
2811 if (callee->fun == NULL)
2812 return FALSE;
2813 callee->is_tail = !is_call;
2814 callee->is_pasted = FALSE;
2815 callee->broken_cycle = FALSE;
2816 callee->priority = priority;
2817 callee->count = 1;
2818 if (callee->fun->last_caller != sec)
2819 {
2820 callee->fun->last_caller = sec;
2821 callee->fun->call_count += 1;
2822 }
2823 if (!insert_callee (caller, callee))
2824 free (callee);
2825 else if (!is_call
2826 && !callee->fun->is_func
2827 && callee->fun->stack == 0)
2828 {
2829 /* This is either a tail call or a branch from one part of
2830 the function to another, ie. hot/cold section. If the
2831 destination has been called by some other function then
2832 it is a separate function. We also assume that functions
2833 are not split across input files. */
2834 if (sec->owner != sym_sec->owner)
2835 {
2836 callee->fun->start = NULL;
2837 callee->fun->is_func = TRUE;
2838 }
2839 else if (callee->fun->start == NULL)
2840 {
2841 struct function_info *caller_start = caller;
2842 while (caller_start->start)
2843 caller_start = caller_start->start;
2844
2845 if (caller_start != callee->fun)
2846 callee->fun->start = caller_start;
2847 }
2848 else
2849 {
2850 struct function_info *callee_start;
2851 struct function_info *caller_start;
2852 callee_start = callee->fun;
2853 while (callee_start->start)
2854 callee_start = callee_start->start;
2855 caller_start = caller;
2856 while (caller_start->start)
2857 caller_start = caller_start->start;
2858 if (caller_start != callee_start)
2859 {
2860 callee->fun->start = NULL;
2861 callee->fun->is_func = TRUE;
2862 }
2863 }
2864 }
2865 }
2866
2867 return TRUE;
2868 }
2869
2870 /* Handle something like .init or .fini, which has a piece of a function.
2871 These sections are pasted together to form a single function. */
2872
2873 static bfd_boolean
2874 pasted_function (asection *sec)
2875 {
2876 struct bfd_link_order *l;
2877 struct _spu_elf_section_data *sec_data;
2878 struct spu_elf_stack_info *sinfo;
2879 Elf_Internal_Sym *fake;
2880 struct function_info *fun, *fun_start;
2881
2882 fake = bfd_zmalloc (sizeof (*fake));
2883 if (fake == NULL)
2884 return FALSE;
2885 fake->st_value = 0;
2886 fake->st_size = sec->size;
2887 fake->st_shndx
2888 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2889 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2890 if (!fun)
2891 return FALSE;
2892
2893 /* Find a function immediately preceding this section. */
2894 fun_start = NULL;
2895 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2896 {
2897 if (l->u.indirect.section == sec)
2898 {
2899 if (fun_start != NULL)
2900 {
2901 struct call_info *callee = bfd_malloc (sizeof *callee);
2902 if (callee == NULL)
2903 return FALSE;
2904
2905 fun->start = fun_start;
2906 callee->fun = fun;
2907 callee->is_tail = TRUE;
2908 callee->is_pasted = TRUE;
2909 callee->broken_cycle = FALSE;
2910 callee->priority = 0;
2911 callee->count = 1;
2912 if (!insert_callee (fun_start, callee))
2913 free (callee);
2914 return TRUE;
2915 }
2916 break;
2917 }
2918 if (l->type == bfd_indirect_link_order
2919 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2920 && (sinfo = sec_data->u.i.stack_info) != NULL
2921 && sinfo->num_fun != 0)
2922 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2923 }
2924
2925 /* Don't return an error if we did not find a function preceding this
2926 section. The section may have incorrect flags. */
2927 return TRUE;
2928 }
2929
2930 /* Map address ranges in code sections to functions. */
2931
2932 static bfd_boolean
2933 discover_functions (struct bfd_link_info *info)
2934 {
2935 bfd *ibfd;
2936 int bfd_idx;
2937 Elf_Internal_Sym ***psym_arr;
2938 asection ***sec_arr;
2939 bfd_boolean gaps = FALSE;
2940
2941 bfd_idx = 0;
2942 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2943 bfd_idx++;
2944
2945 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2946 if (psym_arr == NULL)
2947 return FALSE;
2948 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2949 if (sec_arr == NULL)
2950 return FALSE;
2951
2952 for (ibfd = info->input_bfds, bfd_idx = 0;
2953 ibfd != NULL;
2954 ibfd = ibfd->link_next, bfd_idx++)
2955 {
2956 extern const bfd_target bfd_elf32_spu_vec;
2957 Elf_Internal_Shdr *symtab_hdr;
2958 asection *sec;
2959 size_t symcount;
2960 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2961 asection **psecs, **p;
2962
2963 if (ibfd->xvec != &bfd_elf32_spu_vec)
2964 continue;
2965
2966 /* Read all the symbols. */
2967 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2968 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2969 if (symcount == 0)
2970 {
2971 if (!gaps)
2972 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2973 if (interesting_section (sec))
2974 {
2975 gaps = TRUE;
2976 break;
2977 }
2978 continue;
2979 }
2980
2981 if (symtab_hdr->contents != NULL)
2982 {
2983 /* Don't use cached symbols since the generic ELF linker
2984 code only reads local symbols, and we need globals too. */
2985 free (symtab_hdr->contents);
2986 symtab_hdr->contents = NULL;
2987 }
2988 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2989 NULL, NULL, NULL);
2990 symtab_hdr->contents = (void *) syms;
2991 if (syms == NULL)
2992 return FALSE;
2993
2994 /* Select defined function symbols that are going to be output. */
2995 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2996 if (psyms == NULL)
2997 return FALSE;
2998 psym_arr[bfd_idx] = psyms;
2999 psecs = bfd_malloc (symcount * sizeof (*psecs));
3000 if (psecs == NULL)
3001 return FALSE;
3002 sec_arr[bfd_idx] = psecs;
3003 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3004 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3005 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3006 {
3007 asection *s;
3008
3009 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3010 if (s != NULL && interesting_section (s))
3011 *psy++ = sy;
3012 }
3013 symcount = psy - psyms;
3014 *psy = NULL;
3015
3016 /* Sort them by section and offset within section. */
3017 sort_syms_syms = syms;
3018 sort_syms_psecs = psecs;
3019 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3020
3021 /* Now inspect the function symbols. */
3022 for (psy = psyms; psy < psyms + symcount; )
3023 {
3024 asection *s = psecs[*psy - syms];
3025 Elf_Internal_Sym **psy2;
3026
3027 for (psy2 = psy; ++psy2 < psyms + symcount; )
3028 if (psecs[*psy2 - syms] != s)
3029 break;
3030
3031 if (!alloc_stack_info (s, psy2 - psy))
3032 return FALSE;
3033 psy = psy2;
3034 }
3035
3036 /* First install info about properly typed and sized functions.
3037 In an ideal world this will cover all code sections, except
3038 when partitioning functions into hot and cold sections,
3039 and the horrible pasted together .init and .fini functions. */
3040 for (psy = psyms; psy < psyms + symcount; ++psy)
3041 {
3042 sy = *psy;
3043 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3044 {
3045 asection *s = psecs[sy - syms];
3046 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3047 return FALSE;
3048 }
3049 }
3050
3051 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3052 if (interesting_section (sec))
3053 gaps |= check_function_ranges (sec, info);
3054 }
3055
3056 if (gaps)
3057 {
3058 /* See if we can discover more function symbols by looking at
3059 relocations. */
3060 for (ibfd = info->input_bfds, bfd_idx = 0;
3061 ibfd != NULL;
3062 ibfd = ibfd->link_next, bfd_idx++)
3063 {
3064 asection *sec;
3065
3066 if (psym_arr[bfd_idx] == NULL)
3067 continue;
3068
3069 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3070 if (!mark_functions_via_relocs (sec, info, FALSE))
3071 return FALSE;
3072 }
3073
3074 for (ibfd = info->input_bfds, bfd_idx = 0;
3075 ibfd != NULL;
3076 ibfd = ibfd->link_next, bfd_idx++)
3077 {
3078 Elf_Internal_Shdr *symtab_hdr;
3079 asection *sec;
3080 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3081 asection **psecs;
3082
3083 if ((psyms = psym_arr[bfd_idx]) == NULL)
3084 continue;
3085
3086 psecs = sec_arr[bfd_idx];
3087
3088 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3089 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3090
3091 gaps = FALSE;
3092 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3093 if (interesting_section (sec))
3094 gaps |= check_function_ranges (sec, info);
3095 if (!gaps)
3096 continue;
3097
3098 /* Finally, install all globals. */
3099 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3100 {
3101 asection *s;
3102
3103 s = psecs[sy - syms];
3104
3105 /* Global syms might be improperly typed functions. */
3106 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3107 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3108 {
3109 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3110 return FALSE;
3111 }
3112 }
3113 }
3114
3115 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3116 {
3117 extern const bfd_target bfd_elf32_spu_vec;
3118 asection *sec;
3119
3120 if (ibfd->xvec != &bfd_elf32_spu_vec)
3121 continue;
3122
3123 /* Some of the symbols we've installed as marking the
3124 beginning of functions may have a size of zero. Extend
3125 the range of such functions to the beginning of the
3126 next symbol of interest. */
3127 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3128 if (interesting_section (sec))
3129 {
3130 struct _spu_elf_section_data *sec_data;
3131 struct spu_elf_stack_info *sinfo;
3132
3133 sec_data = spu_elf_section_data (sec);
3134 sinfo = sec_data->u.i.stack_info;
3135 if (sinfo != NULL && sinfo->num_fun != 0)
3136 {
3137 int fun_idx;
3138 bfd_vma hi = sec->size;
3139
3140 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3141 {
3142 sinfo->fun[fun_idx].hi = hi;
3143 hi = sinfo->fun[fun_idx].lo;
3144 }
3145
3146 sinfo->fun[0].lo = 0;
3147 }
3148 /* No symbols in this section. Must be .init or .fini
3149 or something similar. */
3150 else if (!pasted_function (sec))
3151 return FALSE;
3152 }
3153 }
3154 }
3155
3156 for (ibfd = info->input_bfds, bfd_idx = 0;
3157 ibfd != NULL;
3158 ibfd = ibfd->link_next, bfd_idx++)
3159 {
3160 if (psym_arr[bfd_idx] == NULL)
3161 continue;
3162
3163 free (psym_arr[bfd_idx]);
3164 free (sec_arr[bfd_idx]);
3165 }
3166
3167 free (psym_arr);
3168 free (sec_arr);
3169
3170 return TRUE;
3171 }
3172
3173 /* Iterate over all function_info we have collected, calling DOIT on
3174 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3175 if ROOT_ONLY. */
3176
3177 static bfd_boolean
3178 for_each_node (bfd_boolean (*doit) (struct function_info *,
3179 struct bfd_link_info *,
3180 void *),
3181 struct bfd_link_info *info,
3182 void *param,
3183 int root_only)
3184 {
3185 bfd *ibfd;
3186
3187 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3188 {
3189 extern const bfd_target bfd_elf32_spu_vec;
3190 asection *sec;
3191
3192 if (ibfd->xvec != &bfd_elf32_spu_vec)
3193 continue;
3194
3195 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3196 {
3197 struct _spu_elf_section_data *sec_data;
3198 struct spu_elf_stack_info *sinfo;
3199
3200 if ((sec_data = spu_elf_section_data (sec)) != NULL
3201 && (sinfo = sec_data->u.i.stack_info) != NULL)
3202 {
3203 int i;
3204 for (i = 0; i < sinfo->num_fun; ++i)
3205 if (!root_only || !sinfo->fun[i].non_root)
3206 if (!doit (&sinfo->fun[i], info, param))
3207 return FALSE;
3208 }
3209 }
3210 }
3211 return TRUE;
3212 }
3213
3214 /* Transfer call info attached to struct function_info entries for
3215 all of a given function's sections to the first entry. */
3216
3217 static bfd_boolean
3218 transfer_calls (struct function_info *fun,
3219 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3220 void *param ATTRIBUTE_UNUSED)
3221 {
3222 struct function_info *start = fun->start;
3223
3224 if (start != NULL)
3225 {
3226 struct call_info *call, *call_next;
3227
3228 while (start->start != NULL)
3229 start = start->start;
3230 for (call = fun->call_list; call != NULL; call = call_next)
3231 {
3232 call_next = call->next;
3233 if (!insert_callee (start, call))
3234 free (call);
3235 }
3236 fun->call_list = NULL;
3237 }
3238 return TRUE;
3239 }
3240
3241 /* Mark nodes in the call graph that are called by some other node. */
3242
3243 static bfd_boolean
3244 mark_non_root (struct function_info *fun,
3245 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3246 void *param ATTRIBUTE_UNUSED)
3247 {
3248 struct call_info *call;
3249
3250 if (fun->visit1)
3251 return TRUE;
3252 fun->visit1 = TRUE;
3253 for (call = fun->call_list; call; call = call->next)
3254 {
3255 call->fun->non_root = TRUE;
3256 mark_non_root (call->fun, 0, 0);
3257 }
3258 return TRUE;
3259 }
3260
3261 /* Remove cycles from the call graph. Set depth of nodes. */
3262
3263 static bfd_boolean
3264 remove_cycles (struct function_info *fun,
3265 struct bfd_link_info *info,
3266 void *param)
3267 {
3268 struct call_info **callp, *call;
3269 unsigned int depth = *(unsigned int *) param;
3270 unsigned int max_depth = depth;
3271
3272 fun->depth = depth;
3273 fun->visit2 = TRUE;
3274 fun->marking = TRUE;
3275
3276 callp = &fun->call_list;
3277 while ((call = *callp) != NULL)
3278 {
3279 call->max_depth = depth + !call->is_pasted;
3280 if (!call->fun->visit2)
3281 {
3282 if (!remove_cycles (call->fun, info, &call->max_depth))
3283 return FALSE;
3284 if (max_depth < call->max_depth)
3285 max_depth = call->max_depth;
3286 }
3287 else if (call->fun->marking)
3288 {
3289 struct spu_link_hash_table *htab = spu_hash_table (info);
3290
3291 if (!htab->params->auto_overlay
3292 && htab->params->stack_analysis)
3293 {
3294 const char *f1 = func_name (fun);
3295 const char *f2 = func_name (call->fun);
3296
3297 info->callbacks->info (_("Stack analysis will ignore the call "
3298 "from %s to %s\n"),
3299 f1, f2);
3300 }
3301
3302 call->broken_cycle = TRUE;
3303 }
3304 callp = &call->next;
3305 }
3306 fun->marking = FALSE;
3307 *(unsigned int *) param = max_depth;
3308 return TRUE;
3309 }
3310
3311 /* Check that we actually visited all nodes in remove_cycles. If we
3312 didn't, then there is some cycle in the call graph not attached to
3313 any root node. Arbitrarily choose a node in the cycle as a new
3314 root and break the cycle. */
3315
3316 static bfd_boolean
3317 mark_detached_root (struct function_info *fun,
3318 struct bfd_link_info *info,
3319 void *param)
3320 {
3321 if (fun->visit2)
3322 return TRUE;
3323 fun->non_root = FALSE;
3324 *(unsigned int *) param = 0;
3325 return remove_cycles (fun, info, param);
3326 }
3327
3328 /* Populate call_list for each function. */
3329
3330 static bfd_boolean
3331 build_call_tree (struct bfd_link_info *info)
3332 {
3333 bfd *ibfd;
3334 unsigned int depth;
3335
3336 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3337 {
3338 extern const bfd_target bfd_elf32_spu_vec;
3339 asection *sec;
3340
3341 if (ibfd->xvec != &bfd_elf32_spu_vec)
3342 continue;
3343
3344 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3345 if (!mark_functions_via_relocs (sec, info, TRUE))
3346 return FALSE;
3347 }
3348
3349 /* Transfer call info from hot/cold section part of function
3350 to main entry. */
3351 if (!spu_hash_table (info)->params->auto_overlay
3352 && !for_each_node (transfer_calls, info, 0, FALSE))
3353 return FALSE;
3354
3355 /* Find the call graph root(s). */
3356 if (!for_each_node (mark_non_root, info, 0, FALSE))
3357 return FALSE;
3358
3359 /* Remove cycles from the call graph. We start from the root node(s)
3360 so that we break cycles in a reasonable place. */
3361 depth = 0;
3362 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3363 return FALSE;
3364
3365 return for_each_node (mark_detached_root, info, &depth, FALSE);
3366 }
3367
3368 /* qsort predicate to sort calls by priority, max_depth then count. */
3369
3370 static int
3371 sort_calls (const void *a, const void *b)
3372 {
3373 struct call_info *const *c1 = a;
3374 struct call_info *const *c2 = b;
3375 int delta;
3376
3377 delta = (*c2)->priority - (*c1)->priority;
3378 if (delta != 0)
3379 return delta;
3380
3381 delta = (*c2)->max_depth - (*c1)->max_depth;
3382 if (delta != 0)
3383 return delta;
3384
3385 delta = (*c2)->count - (*c1)->count;
3386 if (delta != 0)
3387 return delta;
3388
3389 return (char *) c1 - (char *) c2;
3390 }
3391
3392 struct _mos_param {
3393 unsigned int max_overlay_size;
3394 };
3395
3396 /* Set linker_mark and gc_mark on any sections that we will put in
3397 overlays. These flags are used by the generic ELF linker, but we
3398 won't be continuing on to bfd_elf_final_link so it is OK to use
3399 them. linker_mark is clear before we get here. Set segment_mark
3400 on sections that are part of a pasted function (excluding the last
3401 section).
3402
3403 Set up function rodata section if --overlay-rodata. We don't
3404 currently include merged string constant rodata sections since
3405
3406 Sort the call graph so that the deepest nodes will be visited
3407 first. */
3408
3409 static bfd_boolean
3410 mark_overlay_section (struct function_info *fun,
3411 struct bfd_link_info *info,
3412 void *param)
3413 {
3414 struct call_info *call;
3415 unsigned int count;
3416 struct _mos_param *mos_param = param;
3417 struct spu_link_hash_table *htab = spu_hash_table (info);
3418
3419 if (fun->visit4)
3420 return TRUE;
3421
3422 fun->visit4 = TRUE;
3423 if (!fun->sec->linker_mark
3424 && (htab->params->ovly_flavour != ovly_soft_icache
3425 || htab->params->non_ia_text
3426 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3427 || strcmp (fun->sec->name, ".init") == 0
3428 || strcmp (fun->sec->name, ".fini") == 0))
3429 {
3430 unsigned int size;
3431
3432 fun->sec->linker_mark = 1;
3433 fun->sec->gc_mark = 1;
3434 fun->sec->segment_mark = 0;
3435 /* Ensure SEC_CODE is set on this text section (it ought to
3436 be!), and SEC_CODE is clear on rodata sections. We use
3437 this flag to differentiate the two overlay section types. */
3438 fun->sec->flags |= SEC_CODE;
3439
3440 size = fun->sec->size;
3441 if (htab->params->auto_overlay & OVERLAY_RODATA)
3442 {
3443 char *name = NULL;
3444
3445 /* Find the rodata section corresponding to this function's
3446 text section. */
3447 if (strcmp (fun->sec->name, ".text") == 0)
3448 {
3449 name = bfd_malloc (sizeof (".rodata"));
3450 if (name == NULL)
3451 return FALSE;
3452 memcpy (name, ".rodata", sizeof (".rodata"));
3453 }
3454 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3455 {
3456 size_t len = strlen (fun->sec->name);
3457 name = bfd_malloc (len + 3);
3458 if (name == NULL)
3459 return FALSE;
3460 memcpy (name, ".rodata", sizeof (".rodata"));
3461 memcpy (name + 7, fun->sec->name + 5, len - 4);
3462 }
3463 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3464 {
3465 size_t len = strlen (fun->sec->name) + 1;
3466 name = bfd_malloc (len);
3467 if (name == NULL)
3468 return FALSE;
3469 memcpy (name, fun->sec->name, len);
3470 name[14] = 'r';
3471 }
3472
3473 if (name != NULL)
3474 {
3475 asection *rodata = NULL;
3476 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3477 if (group_sec == NULL)
3478 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3479 else
3480 while (group_sec != NULL && group_sec != fun->sec)
3481 {
3482 if (strcmp (group_sec->name, name) == 0)
3483 {
3484 rodata = group_sec;
3485 break;
3486 }
3487 group_sec = elf_section_data (group_sec)->next_in_group;
3488 }
3489 fun->rodata = rodata;
3490 if (fun->rodata)
3491 {
3492 size += fun->rodata->size;
3493 if (htab->params->line_size != 0
3494 && size > htab->params->line_size)
3495 {
3496 size -= fun->rodata->size;
3497 fun->rodata = NULL;
3498 }
3499 else
3500 {
3501 fun->rodata->linker_mark = 1;
3502 fun->rodata->gc_mark = 1;
3503 fun->rodata->flags &= ~SEC_CODE;
3504 }
3505 }
3506 free (name);
3507 }
3508 }
3509 if (mos_param->max_overlay_size < size)
3510 mos_param->max_overlay_size = size;
3511 }
3512
3513 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3514 count += 1;
3515
3516 if (count > 1)
3517 {
3518 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3519 if (calls == NULL)
3520 return FALSE;
3521
3522 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3523 calls[count++] = call;
3524
3525 qsort (calls, count, sizeof (*calls), sort_calls);
3526
3527 fun->call_list = NULL;
3528 while (count != 0)
3529 {
3530 --count;
3531 calls[count]->next = fun->call_list;
3532 fun->call_list = calls[count];
3533 }
3534 free (calls);
3535 }
3536
3537 for (call = fun->call_list; call != NULL; call = call->next)
3538 {
3539 if (call->is_pasted)
3540 {
3541 /* There can only be one is_pasted call per function_info. */
3542 BFD_ASSERT (!fun->sec->segment_mark);
3543 fun->sec->segment_mark = 1;
3544 }
3545 if (!call->broken_cycle
3546 && !mark_overlay_section (call->fun, info, param))
3547 return FALSE;
3548 }
3549
3550 /* Don't put entry code into an overlay. The overlay manager needs
3551 a stack! Also, don't mark .ovl.init as an overlay. */
3552 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3553 == info->output_bfd->start_address
3554 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3555 {
3556 fun->sec->linker_mark = 0;
3557 if (fun->rodata != NULL)
3558 fun->rodata->linker_mark = 0;
3559 }
3560 return TRUE;
3561 }
3562
3563 /* If non-zero then unmark functions called from those within sections
3564 that we need to unmark. Unfortunately this isn't reliable since the
3565 call graph cannot know the destination of function pointer calls. */
3566 #define RECURSE_UNMARK 0
3567
3568 struct _uos_param {
3569 asection *exclude_input_section;
3570 asection *exclude_output_section;
3571 unsigned long clearing;
3572 };
3573
3574 /* Undo some of mark_overlay_section's work. */
3575
3576 static bfd_boolean
3577 unmark_overlay_section (struct function_info *fun,
3578 struct bfd_link_info *info,
3579 void *param)
3580 {
3581 struct call_info *call;
3582 struct _uos_param *uos_param = param;
3583 unsigned int excluded = 0;
3584
3585 if (fun->visit5)
3586 return TRUE;
3587
3588 fun->visit5 = TRUE;
3589
3590 excluded = 0;
3591 if (fun->sec == uos_param->exclude_input_section
3592 || fun->sec->output_section == uos_param->exclude_output_section)
3593 excluded = 1;
3594
3595 if (RECURSE_UNMARK)
3596 uos_param->clearing += excluded;
3597
3598 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3599 {
3600 fun->sec->linker_mark = 0;
3601 if (fun->rodata)
3602 fun->rodata->linker_mark = 0;
3603 }
3604
3605 for (call = fun->call_list; call != NULL; call = call->next)
3606 if (!call->broken_cycle
3607 && !unmark_overlay_section (call->fun, info, param))
3608 return FALSE;
3609
3610 if (RECURSE_UNMARK)
3611 uos_param->clearing -= excluded;
3612 return TRUE;
3613 }
3614
3615 struct _cl_param {
3616 unsigned int lib_size;
3617 asection **lib_sections;
3618 };
3619
3620 /* Add sections we have marked as belonging to overlays to an array
3621 for consideration as non-overlay sections. The array consist of
3622 pairs of sections, (text,rodata), for functions in the call graph. */
3623
3624 static bfd_boolean
3625 collect_lib_sections (struct function_info *fun,
3626 struct bfd_link_info *info,
3627 void *param)
3628 {
3629 struct _cl_param *lib_param = param;
3630 struct call_info *call;
3631 unsigned int size;
3632
3633 if (fun->visit6)
3634 return TRUE;
3635
3636 fun->visit6 = TRUE;
3637 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3638 return TRUE;
3639
3640 size = fun->sec->size;
3641 if (fun->rodata)
3642 size += fun->rodata->size;
3643
3644 if (size <= lib_param->lib_size)
3645 {
3646 *lib_param->lib_sections++ = fun->sec;
3647 fun->sec->gc_mark = 0;
3648 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3649 {
3650 *lib_param->lib_sections++ = fun->rodata;
3651 fun->rodata->gc_mark = 0;
3652 }
3653 else
3654 *lib_param->lib_sections++ = NULL;
3655 }
3656
3657 for (call = fun->call_list; call != NULL; call = call->next)
3658 if (!call->broken_cycle)
3659 collect_lib_sections (call->fun, info, param);
3660
3661 return TRUE;
3662 }
3663
3664 /* qsort predicate to sort sections by call count. */
3665
3666 static int
3667 sort_lib (const void *a, const void *b)
3668 {
3669 asection *const *s1 = a;
3670 asection *const *s2 = b;
3671 struct _spu_elf_section_data *sec_data;
3672 struct spu_elf_stack_info *sinfo;
3673 int delta;
3674
3675 delta = 0;
3676 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3677 && (sinfo = sec_data->u.i.stack_info) != NULL)
3678 {
3679 int i;
3680 for (i = 0; i < sinfo->num_fun; ++i)
3681 delta -= sinfo->fun[i].call_count;
3682 }
3683
3684 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3685 && (sinfo = sec_data->u.i.stack_info) != NULL)
3686 {
3687 int i;
3688 for (i = 0; i < sinfo->num_fun; ++i)
3689 delta += sinfo->fun[i].call_count;
3690 }
3691
3692 if (delta != 0)
3693 return delta;
3694
3695 return s1 - s2;
3696 }
3697
3698 /* Remove some sections from those marked to be in overlays. Choose
3699 those that are called from many places, likely library functions. */
3700
3701 static unsigned int
3702 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3703 {
3704 bfd *ibfd;
3705 asection **lib_sections;
3706 unsigned int i, lib_count;
3707 struct _cl_param collect_lib_param;
3708 struct function_info dummy_caller;
3709 struct spu_link_hash_table *htab;
3710
3711 memset (&dummy_caller, 0, sizeof (dummy_caller));
3712 lib_count = 0;
3713 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3714 {
3715 extern const bfd_target bfd_elf32_spu_vec;
3716 asection *sec;
3717
3718 if (ibfd->xvec != &bfd_elf32_spu_vec)
3719 continue;
3720
3721 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3722 if (sec->linker_mark
3723 && sec->size < lib_size
3724 && (sec->flags & SEC_CODE) != 0)
3725 lib_count += 1;
3726 }
3727 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3728 if (lib_sections == NULL)
3729 return (unsigned int) -1;
3730 collect_lib_param.lib_size = lib_size;
3731 collect_lib_param.lib_sections = lib_sections;
3732 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3733 TRUE))
3734 return (unsigned int) -1;
3735 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3736
3737 /* Sort sections so that those with the most calls are first. */
3738 if (lib_count > 1)
3739 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3740
3741 htab = spu_hash_table (info);
3742 for (i = 0; i < lib_count; i++)
3743 {
3744 unsigned int tmp, stub_size;
3745 asection *sec;
3746 struct _spu_elf_section_data *sec_data;
3747 struct spu_elf_stack_info *sinfo;
3748
3749 sec = lib_sections[2 * i];
3750 /* If this section is OK, its size must be less than lib_size. */
3751 tmp = sec->size;
3752 /* If it has a rodata section, then add that too. */
3753 if (lib_sections[2 * i + 1])
3754 tmp += lib_sections[2 * i + 1]->size;
3755 /* Add any new overlay call stubs needed by the section. */
3756 stub_size = 0;
3757 if (tmp < lib_size
3758 && (sec_data = spu_elf_section_data (sec)) != NULL
3759 && (sinfo = sec_data->u.i.stack_info) != NULL)
3760 {
3761 int k;
3762 struct call_info *call;
3763
3764 for (k = 0; k < sinfo->num_fun; ++k)
3765 for (call = sinfo->fun[k].call_list; call; call = call->next)
3766 if (call->fun->sec->linker_mark)
3767 {
3768 struct call_info *p;
3769 for (p = dummy_caller.call_list; p; p = p->next)
3770 if (p->fun == call->fun)
3771 break;
3772 if (!p)
3773 stub_size += ovl_stub_size (htab->params);
3774 }
3775 }
3776 if (tmp + stub_size < lib_size)
3777 {
3778 struct call_info **pp, *p;
3779
3780 /* This section fits. Mark it as non-overlay. */
3781 lib_sections[2 * i]->linker_mark = 0;
3782 if (lib_sections[2 * i + 1])
3783 lib_sections[2 * i + 1]->linker_mark = 0;
3784 lib_size -= tmp + stub_size;
3785 /* Call stubs to the section we just added are no longer
3786 needed. */
3787 pp = &dummy_caller.call_list;
3788 while ((p = *pp) != NULL)
3789 if (!p->fun->sec->linker_mark)
3790 {
3791 lib_size += ovl_stub_size (htab->params);
3792 *pp = p->next;
3793 free (p);
3794 }
3795 else
3796 pp = &p->next;
3797 /* Add new call stubs to dummy_caller. */
3798 if ((sec_data = spu_elf_section_data (sec)) != NULL
3799 && (sinfo = sec_data->u.i.stack_info) != NULL)
3800 {
3801 int k;
3802 struct call_info *call;
3803
3804 for (k = 0; k < sinfo->num_fun; ++k)
3805 for (call = sinfo->fun[k].call_list;
3806 call;
3807 call = call->next)
3808 if (call->fun->sec->linker_mark)
3809 {
3810 struct call_info *callee;
3811 callee = bfd_malloc (sizeof (*callee));
3812 if (callee == NULL)
3813 return (unsigned int) -1;
3814 *callee = *call;
3815 if (!insert_callee (&dummy_caller, callee))
3816 free (callee);
3817 }
3818 }
3819 }
3820 }
3821 while (dummy_caller.call_list != NULL)
3822 {
3823 struct call_info *call = dummy_caller.call_list;
3824 dummy_caller.call_list = call->next;
3825 free (call);
3826 }
3827 for (i = 0; i < 2 * lib_count; i++)
3828 if (lib_sections[i])
3829 lib_sections[i]->gc_mark = 1;
3830 free (lib_sections);
3831 return lib_size;
3832 }
3833
3834 /* Build an array of overlay sections. The deepest node's section is
3835 added first, then its parent node's section, then everything called
3836 from the parent section. The idea being to group sections to
3837 minimise calls between different overlays. */
3838
3839 static bfd_boolean
3840 collect_overlays (struct function_info *fun,
3841 struct bfd_link_info *info,
3842 void *param)
3843 {
3844 struct call_info *call;
3845 bfd_boolean added_fun;
3846 asection ***ovly_sections = param;
3847
3848 if (fun->visit7)
3849 return TRUE;
3850
3851 fun->visit7 = TRUE;
3852 for (call = fun->call_list; call != NULL; call = call->next)
3853 if (!call->is_pasted && !call->broken_cycle)
3854 {
3855 if (!collect_overlays (call->fun, info, ovly_sections))
3856 return FALSE;
3857 break;
3858 }
3859
3860 added_fun = FALSE;
3861 if (fun->sec->linker_mark && fun->sec->gc_mark)
3862 {
3863 fun->sec->gc_mark = 0;
3864 *(*ovly_sections)++ = fun->sec;
3865 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3866 {
3867 fun->rodata->gc_mark = 0;
3868 *(*ovly_sections)++ = fun->rodata;
3869 }
3870 else
3871 *(*ovly_sections)++ = NULL;
3872 added_fun = TRUE;
3873
3874 /* Pasted sections must stay with the first section. We don't
3875 put pasted sections in the array, just the first section.
3876 Mark subsequent sections as already considered. */
3877 if (fun->sec->segment_mark)
3878 {
3879 struct function_info *call_fun = fun;
3880 do
3881 {
3882 for (call = call_fun->call_list; call != NULL; call = call->next)
3883 if (call->is_pasted)
3884 {
3885 call_fun = call->fun;
3886 call_fun->sec->gc_mark = 0;
3887 if (call_fun->rodata)
3888 call_fun->rodata->gc_mark = 0;
3889 break;
3890 }
3891 if (call == NULL)
3892 abort ();
3893 }
3894 while (call_fun->sec->segment_mark);
3895 }
3896 }
3897
3898 for (call = fun->call_list; call != NULL; call = call->next)
3899 if (!call->broken_cycle
3900 && !collect_overlays (call->fun, info, ovly_sections))
3901 return FALSE;
3902
3903 if (added_fun)
3904 {
3905 struct _spu_elf_section_data *sec_data;
3906 struct spu_elf_stack_info *sinfo;
3907
3908 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3909 && (sinfo = sec_data->u.i.stack_info) != NULL)
3910 {
3911 int i;
3912 for (i = 0; i < sinfo->num_fun; ++i)
3913 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3914 return FALSE;
3915 }
3916 }
3917
3918 return TRUE;
3919 }
3920
3921 struct _sum_stack_param {
3922 size_t cum_stack;
3923 size_t overall_stack;
3924 bfd_boolean emit_stack_syms;
3925 };
3926
3927 /* Descend the call graph for FUN, accumulating total stack required. */
3928
3929 static bfd_boolean
3930 sum_stack (struct function_info *fun,
3931 struct bfd_link_info *info,
3932 void *param)
3933 {
3934 struct call_info *call;
3935 struct function_info *max;
3936 size_t stack, cum_stack;
3937 const char *f1;
3938 bfd_boolean has_call;
3939 struct _sum_stack_param *sum_stack_param = param;
3940 struct spu_link_hash_table *htab;
3941
3942 cum_stack = fun->stack;
3943 sum_stack_param->cum_stack = cum_stack;
3944 if (fun->visit3)
3945 return TRUE;
3946
3947 has_call = FALSE;
3948 max = NULL;
3949 for (call = fun->call_list; call; call = call->next)
3950 {
3951 if (call->broken_cycle)
3952 continue;
3953 if (!call->is_pasted)
3954 has_call = TRUE;
3955 if (!sum_stack (call->fun, info, sum_stack_param))
3956 return FALSE;
3957 stack = sum_stack_param->cum_stack;
3958 /* Include caller stack for normal calls, don't do so for
3959 tail calls. fun->stack here is local stack usage for
3960 this function. */
3961 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3962 stack += fun->stack;
3963 if (cum_stack < stack)
3964 {
3965 cum_stack = stack;
3966 max = call->fun;
3967 }
3968 }
3969
3970 sum_stack_param->cum_stack = cum_stack;
3971 stack = fun->stack;
3972 /* Now fun->stack holds cumulative stack. */
3973 fun->stack = cum_stack;
3974 fun->visit3 = TRUE;
3975
3976 if (!fun->non_root
3977 && sum_stack_param->overall_stack < cum_stack)
3978 sum_stack_param->overall_stack = cum_stack;
3979
3980 htab = spu_hash_table (info);
3981 if (htab->params->auto_overlay)
3982 return TRUE;
3983
3984 f1 = func_name (fun);
3985 if (htab->params->stack_analysis)
3986 {
3987 if (!fun->non_root)
3988 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3989 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3990 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3991
3992 if (has_call)
3993 {
3994 info->callbacks->minfo (_(" calls:\n"));
3995 for (call = fun->call_list; call; call = call->next)
3996 if (!call->is_pasted && !call->broken_cycle)
3997 {
3998 const char *f2 = func_name (call->fun);
3999 const char *ann1 = call->fun == max ? "*" : " ";
4000 const char *ann2 = call->is_tail ? "t" : " ";
4001
4002 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
4003 }
4004 }
4005 }
4006
4007 if (sum_stack_param->emit_stack_syms)
4008 {
4009 char *name = bfd_malloc (18 + strlen (f1));
4010 struct elf_link_hash_entry *h;
4011
4012 if (name == NULL)
4013 return FALSE;
4014
4015 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4016 sprintf (name, "__stack_%s", f1);
4017 else
4018 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4019
4020 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4021 free (name);
4022 if (h != NULL
4023 && (h->root.type == bfd_link_hash_new
4024 || h->root.type == bfd_link_hash_undefined
4025 || h->root.type == bfd_link_hash_undefweak))
4026 {
4027 h->root.type = bfd_link_hash_defined;
4028 h->root.u.def.section = bfd_abs_section_ptr;
4029 h->root.u.def.value = cum_stack;
4030 h->size = 0;
4031 h->type = 0;
4032 h->ref_regular = 1;
4033 h->def_regular = 1;
4034 h->ref_regular_nonweak = 1;
4035 h->forced_local = 1;
4036 h->non_elf = 0;
4037 }
4038 }
4039
4040 return TRUE;
4041 }
4042
4043 /* SEC is part of a pasted function. Return the call_info for the
4044 next section of this function. */
4045
4046 static struct call_info *
4047 find_pasted_call (asection *sec)
4048 {
4049 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4050 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4051 struct call_info *call;
4052 int k;
4053
4054 for (k = 0; k < sinfo->num_fun; ++k)
4055 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4056 if (call->is_pasted)
4057 return call;
4058 abort ();
4059 return 0;
4060 }
4061
4062 /* qsort predicate to sort bfds by file name. */
4063
4064 static int
4065 sort_bfds (const void *a, const void *b)
4066 {
4067 bfd *const *abfd1 = a;
4068 bfd *const *abfd2 = b;
4069
4070 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4071 }
4072
4073 static unsigned int
4074 print_one_overlay_section (FILE *script,
4075 unsigned int base,
4076 unsigned int count,
4077 unsigned int ovlynum,
4078 unsigned int *ovly_map,
4079 asection **ovly_sections,
4080 struct bfd_link_info *info)
4081 {
4082 unsigned int j;
4083
4084 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4085 {
4086 asection *sec = ovly_sections[2 * j];
4087
4088 if (fprintf (script, " %s%c%s (%s)\n",
4089 (sec->owner->my_archive != NULL
4090 ? sec->owner->my_archive->filename : ""),
4091 info->path_separator,
4092 sec->owner->filename,
4093 sec->name) <= 0)
4094 return -1;
4095 if (sec->segment_mark)
4096 {
4097 struct call_info *call = find_pasted_call (sec);
4098 while (call != NULL)
4099 {
4100 struct function_info *call_fun = call->fun;
4101 sec = call_fun->sec;
4102 if (fprintf (script, " %s%c%s (%s)\n",
4103 (sec->owner->my_archive != NULL
4104 ? sec->owner->my_archive->filename : ""),
4105 info->path_separator,
4106 sec->owner->filename,
4107 sec->name) <= 0)
4108 return -1;
4109 for (call = call_fun->call_list; call; call = call->next)
4110 if (call->is_pasted)
4111 break;
4112 }
4113 }
4114 }
4115
4116 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4117 {
4118 asection *sec = ovly_sections[2 * j + 1];
4119 if (sec != NULL
4120 && fprintf (script, " %s%c%s (%s)\n",
4121 (sec->owner->my_archive != NULL
4122 ? sec->owner->my_archive->filename : ""),
4123 info->path_separator,
4124 sec->owner->filename,
4125 sec->name) <= 0)
4126 return -1;
4127
4128 sec = ovly_sections[2 * j];
4129 if (sec->segment_mark)
4130 {
4131 struct call_info *call = find_pasted_call (sec);
4132 while (call != NULL)
4133 {
4134 struct function_info *call_fun = call->fun;
4135 sec = call_fun->rodata;
4136 if (sec != NULL
4137 && fprintf (script, " %s%c%s (%s)\n",
4138 (sec->owner->my_archive != NULL
4139 ? sec->owner->my_archive->filename : ""),
4140 info->path_separator,
4141 sec->owner->filename,
4142 sec->name) <= 0)
4143 return -1;
4144 for (call = call_fun->call_list; call; call = call->next)
4145 if (call->is_pasted)
4146 break;
4147 }
4148 }
4149 }
4150
4151 return j;
4152 }
4153
4154 /* Handle --auto-overlay. */
4155
4156 static void
4157 spu_elf_auto_overlay (struct bfd_link_info *info)
4158 {
4159 bfd *ibfd;
4160 bfd **bfd_arr;
4161 struct elf_segment_map *m;
4162 unsigned int fixed_size, lo, hi;
4163 struct spu_link_hash_table *htab;
4164 unsigned int base, i, count, bfd_count;
4165 unsigned int region, ovlynum;
4166 asection **ovly_sections, **ovly_p;
4167 unsigned int *ovly_map;
4168 FILE *script;
4169 unsigned int total_overlay_size, overlay_size;
4170 const char *ovly_mgr_entry;
4171 struct elf_link_hash_entry *h;
4172 struct _mos_param mos_param;
4173 struct _uos_param uos_param;
4174 struct function_info dummy_caller;
4175
4176 /* Find the extents of our loadable image. */
4177 lo = (unsigned int) -1;
4178 hi = 0;
4179 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4180 if (m->p_type == PT_LOAD)
4181 for (i = 0; i < m->count; i++)
4182 if (m->sections[i]->size != 0)
4183 {
4184 if (m->sections[i]->vma < lo)
4185 lo = m->sections[i]->vma;
4186 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4187 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4188 }
4189 fixed_size = hi + 1 - lo;
4190
4191 if (!discover_functions (info))
4192 goto err_exit;
4193
4194 if (!build_call_tree (info))
4195 goto err_exit;
4196
4197 htab = spu_hash_table (info);
4198 if (htab->reserved == 0)
4199 {
4200 struct _sum_stack_param sum_stack_param;
4201
4202 sum_stack_param.emit_stack_syms = 0;
4203 sum_stack_param.overall_stack = 0;
4204 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4205 goto err_exit;
4206 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
4207 }
4208
4209 /* No need for overlays if everything already fits. */
4210 if (fixed_size + htab->reserved <= htab->local_store
4211 && htab->params->ovly_flavour != ovly_soft_icache)
4212 {
4213 htab->params->auto_overlay = 0;
4214 return;
4215 }
4216
4217 uos_param.exclude_input_section = 0;
4218 uos_param.exclude_output_section
4219 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4220
4221 ovly_mgr_entry = "__ovly_load";
4222 if (htab->params->ovly_flavour == ovly_soft_icache)
4223 ovly_mgr_entry = "__icache_br_handler";
4224 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4225 FALSE, FALSE, FALSE);
4226 if (h != NULL
4227 && (h->root.type == bfd_link_hash_defined
4228 || h->root.type == bfd_link_hash_defweak)
4229 && h->def_regular)
4230 {
4231 /* We have a user supplied overlay manager. */
4232 uos_param.exclude_input_section = h->root.u.def.section;
4233 }
4234 else
4235 {
4236 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4237 builtin version to .text, and will adjust .text size. */
4238 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4239 }
4240
4241 /* Mark overlay sections, and find max overlay section size. */
4242 mos_param.max_overlay_size = 0;
4243 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4244 goto err_exit;
4245
4246 /* We can't put the overlay manager or interrupt routines in
4247 overlays. */
4248 uos_param.clearing = 0;
4249 if ((uos_param.exclude_input_section
4250 || uos_param.exclude_output_section)
4251 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4252 goto err_exit;
4253
4254 bfd_count = 0;
4255 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4256 ++bfd_count;
4257 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4258 if (bfd_arr == NULL)
4259 goto err_exit;
4260
4261 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4262 count = 0;
4263 bfd_count = 0;
4264 total_overlay_size = 0;
4265 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4266 {
4267 extern const bfd_target bfd_elf32_spu_vec;
4268 asection *sec;
4269 unsigned int old_count;
4270
4271 if (ibfd->xvec != &bfd_elf32_spu_vec)
4272 continue;
4273
4274 old_count = count;
4275 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4276 if (sec->linker_mark)
4277 {
4278 if ((sec->flags & SEC_CODE) != 0)
4279 count += 1;
4280 fixed_size -= sec->size;
4281 total_overlay_size += sec->size;
4282 }
4283 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4284 && sec->output_section->owner == info->output_bfd
4285 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4286 fixed_size -= sec->size;
4287 if (count != old_count)
4288 bfd_arr[bfd_count++] = ibfd;
4289 }
4290
4291 /* Since the overlay link script selects sections by file name and
4292 section name, ensure that file names are unique. */
4293 if (bfd_count > 1)
4294 {
4295 bfd_boolean ok = TRUE;
4296
4297 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4298 for (i = 1; i < bfd_count; ++i)
4299 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4300 {
4301 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4302 {
4303 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4304 info->callbacks->einfo (_("%s duplicated in %s\n"),
4305 bfd_arr[i]->filename,
4306 bfd_arr[i]->my_archive->filename);
4307 else
4308 info->callbacks->einfo (_("%s duplicated\n"),
4309 bfd_arr[i]->filename);
4310 ok = FALSE;
4311 }
4312 }
4313 if (!ok)
4314 {
4315 info->callbacks->einfo (_("sorry, no support for duplicate "
4316 "object files in auto-overlay script\n"));
4317 bfd_set_error (bfd_error_bad_value);
4318 goto err_exit;
4319 }
4320 }
4321 free (bfd_arr);
4322
4323 fixed_size += htab->reserved;
4324 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4325 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4326 {
4327 if (htab->params->ovly_flavour == ovly_soft_icache)
4328 {
4329 /* Stubs in the non-icache area are bigger. */
4330 fixed_size += htab->non_ovly_stub * 16;
4331 /* Space for icache manager tables.
4332 a) Tag array, one quadword per cache line.
4333 - word 0: ia address of present line, init to zero. */
4334 fixed_size += 16 << htab->num_lines_log2;
4335 /* b) Rewrite "to" list, one quadword per cache line. */
4336 fixed_size += 16 << htab->num_lines_log2;
4337 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4338 to a power-of-two number of full quadwords) per cache line. */
4339 fixed_size += 16 << (htab->fromelem_size_log2
4340 + htab->num_lines_log2);
4341 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4342 fixed_size += 16;
4343 }
4344 else
4345 {
4346 /* Guess number of overlays. Assuming overlay buffer is on
4347 average only half full should be conservative. */
4348 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4349 / (htab->local_store - fixed_size));
4350 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4351 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4352 }
4353 }
4354
4355 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4356 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4357 "size of 0x%v exceeds local store\n"),
4358 (bfd_vma) fixed_size,
4359 (bfd_vma) mos_param.max_overlay_size);
4360
4361 /* Now see if we should put some functions in the non-overlay area. */
4362 else if (fixed_size < htab->overlay_fixed)
4363 {
4364 unsigned int max_fixed, lib_size;
4365
4366 max_fixed = htab->local_store - mos_param.max_overlay_size;
4367 if (max_fixed > htab->overlay_fixed)
4368 max_fixed = htab->overlay_fixed;
4369 lib_size = max_fixed - fixed_size;
4370 lib_size = auto_ovl_lib_functions (info, lib_size);
4371 if (lib_size == (unsigned int) -1)
4372 goto err_exit;
4373 fixed_size = max_fixed - lib_size;
4374 }
4375
4376 /* Build an array of sections, suitably sorted to place into
4377 overlays. */
4378 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4379 if (ovly_sections == NULL)
4380 goto err_exit;
4381 ovly_p = ovly_sections;
4382 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4383 goto err_exit;
4384 count = (size_t) (ovly_p - ovly_sections) / 2;
4385 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4386 if (ovly_map == NULL)
4387 goto err_exit;
4388
4389 memset (&dummy_caller, 0, sizeof (dummy_caller));
4390 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4391 if (htab->params->line_size != 0)
4392 overlay_size = htab->params->line_size;
4393 base = 0;
4394 ovlynum = 0;
4395 while (base < count)
4396 {
4397 unsigned int size = 0, rosize = 0, roalign = 0;
4398
4399 for (i = base; i < count; i++)
4400 {
4401 asection *sec, *rosec;
4402 unsigned int tmp, rotmp;
4403 unsigned int num_stubs;
4404 struct call_info *call, *pasty;
4405 struct _spu_elf_section_data *sec_data;
4406 struct spu_elf_stack_info *sinfo;
4407 int k;
4408
4409 /* See whether we can add this section to the current
4410 overlay without overflowing our overlay buffer. */
4411 sec = ovly_sections[2 * i];
4412 tmp = align_power (size, sec->alignment_power) + sec->size;
4413 rotmp = rosize;
4414 rosec = ovly_sections[2 * i + 1];
4415 if (rosec != NULL)
4416 {
4417 rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4418 if (roalign < rosec->alignment_power)
4419 roalign = rosec->alignment_power;
4420 }
4421 if (align_power (tmp, roalign) + rotmp > overlay_size)
4422 break;
4423 if (sec->segment_mark)
4424 {
4425 /* Pasted sections must stay together, so add their
4426 sizes too. */
4427 struct call_info *pasty = find_pasted_call (sec);
4428 while (pasty != NULL)
4429 {
4430 struct function_info *call_fun = pasty->fun;
4431 tmp = (align_power (tmp, call_fun->sec->alignment_power)
4432 + call_fun->sec->size);
4433 if (call_fun->rodata)
4434 {
4435 rotmp = (align_power (rotmp,
4436 call_fun->rodata->alignment_power)
4437 + call_fun->rodata->size);
4438 if (roalign < rosec->alignment_power)
4439 roalign = rosec->alignment_power;
4440 }
4441 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4442 if (pasty->is_pasted)
4443 break;
4444 }
4445 }
4446 if (align_power (tmp, roalign) + rotmp > overlay_size)
4447 break;
4448
4449 /* If we add this section, we might need new overlay call
4450 stubs. Add any overlay section calls to dummy_call. */
4451 pasty = NULL;
4452 sec_data = spu_elf_section_data (sec);
4453 sinfo = sec_data->u.i.stack_info;
4454 for (k = 0; k < sinfo->num_fun; ++k)
4455 for (call = sinfo->fun[k].call_list; call; call = call->next)
4456 if (call->is_pasted)
4457 {
4458 BFD_ASSERT (pasty == NULL);
4459 pasty = call;
4460 }
4461 else if (call->fun->sec->linker_mark)
4462 {
4463 if (!copy_callee (&dummy_caller, call))
4464 goto err_exit;
4465 }
4466 while (pasty != NULL)
4467 {
4468 struct function_info *call_fun = pasty->fun;
4469 pasty = NULL;
4470 for (call = call_fun->call_list; call; call = call->next)
4471 if (call->is_pasted)
4472 {
4473 BFD_ASSERT (pasty == NULL);
4474 pasty = call;
4475 }
4476 else if (!copy_callee (&dummy_caller, call))
4477 goto err_exit;
4478 }
4479
4480 /* Calculate call stub size. */
4481 num_stubs = 0;
4482 for (call = dummy_caller.call_list; call; call = call->next)
4483 {
4484 unsigned int k;
4485 unsigned int stub_delta = 1;
4486
4487 if (htab->params->ovly_flavour == ovly_soft_icache)
4488 stub_delta = call->count;
4489 num_stubs += stub_delta;
4490
4491 /* If the call is within this overlay, we won't need a
4492 stub. */
4493 for (k = base; k < i + 1; k++)
4494 if (call->fun->sec == ovly_sections[2 * k])
4495 {
4496 num_stubs -= stub_delta;
4497 break;
4498 }
4499 }
4500 if (htab->params->ovly_flavour == ovly_soft_icache
4501 && num_stubs > htab->params->max_branch)
4502 break;
4503 if (align_power (tmp, roalign) + rotmp
4504 + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4505 break;
4506 size = tmp;
4507 rosize = rotmp;
4508 }
4509
4510 if (i == base)
4511 {
4512 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4513 ovly_sections[2 * i]->owner,
4514 ovly_sections[2 * i],
4515 ovly_sections[2 * i + 1] ? " + rodata" : "");
4516 bfd_set_error (bfd_error_bad_value);
4517 goto err_exit;
4518 }
4519
4520 while (dummy_caller.call_list != NULL)
4521 {
4522 struct call_info *call = dummy_caller.call_list;
4523 dummy_caller.call_list = call->next;
4524 free (call);
4525 }
4526
4527 ++ovlynum;
4528 while (base < i)
4529 ovly_map[base++] = ovlynum;
4530 }
4531
4532 script = htab->params->spu_elf_open_overlay_script ();
4533
4534 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4535 goto file_err;
4536
4537 if (htab->params->ovly_flavour == ovly_soft_icache)
4538 {
4539 if (fprintf (script,
4540 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4541 " . = ALIGN (%u);\n"
4542 " .ovl.init : { *(.ovl.init) }\n"
4543 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4544 htab->params->line_size) <= 0)
4545 goto file_err;
4546
4547 base = 0;
4548 ovlynum = 1;
4549 while (base < count)
4550 {
4551 unsigned int indx = ovlynum - 1;
4552 unsigned int vma, lma;
4553
4554 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4555 lma = indx << htab->line_size_log2;
4556
4557 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4558 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4559 ovlynum, vma, lma) <= 0)
4560 goto file_err;
4561
4562 base = print_one_overlay_section (script, base, count, ovlynum,
4563 ovly_map, ovly_sections, info);
4564 if (base == (unsigned) -1)
4565 goto file_err;
4566
4567 if (fprintf (script, " }\n") <= 0)
4568 goto file_err;
4569
4570 ovlynum++;
4571 }
4572
4573 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4574 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4575 goto file_err;
4576 }
4577 else
4578 {
4579 if (fprintf (script,
4580 " . = ALIGN (16);\n"
4581 " .ovl.init : { *(.ovl.init) }\n"
4582 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4583 goto file_err;
4584
4585 for (region = 1; region <= htab->params->num_lines; region++)
4586 {
4587 ovlynum = region;
4588 base = 0;
4589 while (base < count && ovly_map[base] < ovlynum)
4590 base++;
4591
4592 if (base == count)
4593 break;
4594
4595 if (region == 1)
4596 {
4597 /* We need to set lma since we are overlaying .ovl.init. */
4598 if (fprintf (script,
4599 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4600 goto file_err;
4601 }
4602 else
4603 {
4604 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4605 goto file_err;
4606 }
4607
4608 while (base < count)
4609 {
4610 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4611 goto file_err;
4612
4613 base = print_one_overlay_section (script, base, count, ovlynum,
4614 ovly_map, ovly_sections, info);
4615 if (base == (unsigned) -1)
4616 goto file_err;
4617
4618 if (fprintf (script, " }\n") <= 0)
4619 goto file_err;
4620
4621 ovlynum += htab->params->num_lines;
4622 while (base < count && ovly_map[base] < ovlynum)
4623 base++;
4624 }
4625
4626 if (fprintf (script, " }\n") <= 0)
4627 goto file_err;
4628 }
4629
4630 }
4631
4632 free (ovly_map);
4633 free (ovly_sections);
4634
4635 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4636 goto file_err;
4637 if (fclose (script) != 0)
4638 goto file_err;
4639
4640 if (htab->params->auto_overlay & AUTO_RELINK)
4641 (*htab->params->spu_elf_relink) ();
4642
4643 xexit (0);
4644
4645 file_err:
4646 bfd_set_error (bfd_error_system_call);
4647 err_exit:
4648 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4649 xexit (1);
4650 }
4651
4652 /* Provide an estimate of total stack required. */
4653
4654 static bfd_boolean
4655 spu_elf_stack_analysis (struct bfd_link_info *info)
4656 {
4657 struct spu_link_hash_table *htab;
4658 struct _sum_stack_param sum_stack_param;
4659
4660 if (!discover_functions (info))
4661 return FALSE;
4662
4663 if (!build_call_tree (info))
4664 return FALSE;
4665
4666 htab = spu_hash_table (info);
4667 if (htab->params->stack_analysis)
4668 {
4669 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4670 info->callbacks->minfo (_("\nStack size for functions. "
4671 "Annotations: '*' max stack, 't' tail call\n"));
4672 }
4673
4674 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4675 sum_stack_param.overall_stack = 0;
4676 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4677 return FALSE;
4678
4679 if (htab->params->stack_analysis)
4680 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4681 (bfd_vma) sum_stack_param.overall_stack);
4682 return TRUE;
4683 }
4684
4685 /* Perform a final link. */
4686
4687 static bfd_boolean
4688 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4689 {
4690 struct spu_link_hash_table *htab = spu_hash_table (info);
4691
4692 if (htab->params->auto_overlay)
4693 spu_elf_auto_overlay (info);
4694
4695 if ((htab->params->stack_analysis
4696 || (htab->params->ovly_flavour == ovly_soft_icache
4697 && htab->params->lrlive_analysis))
4698 && !spu_elf_stack_analysis (info))
4699 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4700
4701 if (!spu_elf_build_stubs (info))
4702 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4703
4704 return bfd_elf_final_link (output_bfd, info);
4705 }
4706
4707 /* Called when not normally emitting relocs, ie. !info->relocatable
4708 and !info->emitrelocations. Returns a count of special relocs
4709 that need to be emitted. */
4710
4711 static unsigned int
4712 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4713 {
4714 Elf_Internal_Rela *relocs;
4715 unsigned int count = 0;
4716
4717 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4718 info->keep_memory);
4719 if (relocs != NULL)
4720 {
4721 Elf_Internal_Rela *rel;
4722 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4723
4724 for (rel = relocs; rel < relend; rel++)
4725 {
4726 int r_type = ELF32_R_TYPE (rel->r_info);
4727 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4728 ++count;
4729 }
4730
4731 if (elf_section_data (sec)->relocs != relocs)
4732 free (relocs);
4733 }
4734
4735 return count;
4736 }
4737
4738 /* Functions for adding fixup records to .fixup */
4739
4740 #define FIXUP_RECORD_SIZE 4
4741
4742 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4743 bfd_put_32 (output_bfd, addr, \
4744 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4745 #define FIXUP_GET(output_bfd,htab,index) \
4746 bfd_get_32 (output_bfd, \
4747 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4748
4749 /* Store OFFSET in .fixup. This assumes it will be called with an
4750 increasing OFFSET. When this OFFSET fits with the last base offset,
4751 it just sets a bit, otherwise it adds a new fixup record. */
4752 static void
4753 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4754 bfd_vma offset)
4755 {
4756 struct spu_link_hash_table *htab = spu_hash_table (info);
4757 asection *sfixup = htab->sfixup;
4758 bfd_vma qaddr = offset & ~(bfd_vma) 15;
4759 bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4760 if (sfixup->reloc_count == 0)
4761 {
4762 FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4763 sfixup->reloc_count++;
4764 }
4765 else
4766 {
4767 bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4768 if (qaddr != (base & ~(bfd_vma) 15))
4769 {
4770 if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4771 (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4772 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4773 sfixup->reloc_count++;
4774 }
4775 else
4776 FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4777 }
4778 }
4779
4780 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4781
4782 static int
4783 spu_elf_relocate_section (bfd *output_bfd,
4784 struct bfd_link_info *info,
4785 bfd *input_bfd,
4786 asection *input_section,
4787 bfd_byte *contents,
4788 Elf_Internal_Rela *relocs,
4789 Elf_Internal_Sym *local_syms,
4790 asection **local_sections)
4791 {
4792 Elf_Internal_Shdr *symtab_hdr;
4793 struct elf_link_hash_entry **sym_hashes;
4794 Elf_Internal_Rela *rel, *relend;
4795 struct spu_link_hash_table *htab;
4796 asection *ea;
4797 int ret = TRUE;
4798 bfd_boolean emit_these_relocs = FALSE;
4799 bfd_boolean is_ea_sym;
4800 bfd_boolean stubs;
4801 unsigned int iovl = 0;
4802
4803 htab = spu_hash_table (info);
4804 stubs = (htab->stub_sec != NULL
4805 && maybe_needs_stubs (input_section));
4806 iovl = overlay_index (input_section);
4807 ea = bfd_get_section_by_name (output_bfd, "._ea");
4808 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4809 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4810
4811 rel = relocs;
4812 relend = relocs + input_section->reloc_count;
4813 for (; rel < relend; rel++)
4814 {
4815 int r_type;
4816 reloc_howto_type *howto;
4817 unsigned int r_symndx;
4818 Elf_Internal_Sym *sym;
4819 asection *sec;
4820 struct elf_link_hash_entry *h;
4821 const char *sym_name;
4822 bfd_vma relocation;
4823 bfd_vma addend;
4824 bfd_reloc_status_type r;
4825 bfd_boolean unresolved_reloc;
4826 bfd_boolean warned;
4827 enum _stub_type stub_type;
4828
4829 r_symndx = ELF32_R_SYM (rel->r_info);
4830 r_type = ELF32_R_TYPE (rel->r_info);
4831 howto = elf_howto_table + r_type;
4832 unresolved_reloc = FALSE;
4833 warned = FALSE;
4834 h = NULL;
4835 sym = NULL;
4836 sec = NULL;
4837 if (r_symndx < symtab_hdr->sh_info)
4838 {
4839 sym = local_syms + r_symndx;
4840 sec = local_sections[r_symndx];
4841 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4842 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4843 }
4844 else
4845 {
4846 if (sym_hashes == NULL)
4847 return FALSE;
4848
4849 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4850
4851 while (h->root.type == bfd_link_hash_indirect
4852 || h->root.type == bfd_link_hash_warning)
4853 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4854
4855 relocation = 0;
4856 if (h->root.type == bfd_link_hash_defined
4857 || h->root.type == bfd_link_hash_defweak)
4858 {
4859 sec = h->root.u.def.section;
4860 if (sec == NULL
4861 || sec->output_section == NULL)
4862 /* Set a flag that will be cleared later if we find a
4863 relocation value for this symbol. output_section
4864 is typically NULL for symbols satisfied by a shared
4865 library. */
4866 unresolved_reloc = TRUE;
4867 else
4868 relocation = (h->root.u.def.value
4869 + sec->output_section->vma
4870 + sec->output_offset);
4871 }
4872 else if (h->root.type == bfd_link_hash_undefweak)
4873 ;
4874 else if (info->unresolved_syms_in_objects == RM_IGNORE
4875 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4876 ;
4877 else if (!info->relocatable
4878 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4879 {
4880 bfd_boolean err;
4881 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4882 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4883 if (!info->callbacks->undefined_symbol (info,
4884 h->root.root.string,
4885 input_bfd,
4886 input_section,
4887 rel->r_offset, err))
4888 return FALSE;
4889 warned = TRUE;
4890 }
4891 sym_name = h->root.root.string;
4892 }
4893
4894 if (sec != NULL && elf_discarded_section (sec))
4895 {
4896 /* For relocs against symbols from removed linkonce sections,
4897 or sections discarded by a linker script, we just want the
4898 section contents zeroed. Avoid any special processing. */
4899 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4900 rel->r_info = 0;
4901 rel->r_addend = 0;
4902 continue;
4903 }
4904
4905 if (info->relocatable)
4906 continue;
4907
4908 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4909 if (r_type == R_SPU_ADD_PIC && h != NULL
4910 && (h->def_regular || ELF_COMMON_DEF_P (h)))
4911 {
4912 bfd_byte *loc = contents + rel->r_offset;
4913 loc[0] = 0x1c;
4914 loc[1] = 0x00;
4915 loc[2] &= 0x3f;
4916 }
4917
4918 is_ea_sym = (ea != NULL
4919 && sec != NULL
4920 && sec->output_section == ea);
4921
4922 /* If this symbol is in an overlay area, we may need to relocate
4923 to the overlay stub. */
4924 addend = rel->r_addend;
4925 if (stubs
4926 && !is_ea_sym
4927 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4928 contents, info)) != no_stub)
4929 {
4930 unsigned int ovl = 0;
4931 struct got_entry *g, **head;
4932
4933 if (stub_type != nonovl_stub)
4934 ovl = iovl;
4935
4936 if (h != NULL)
4937 head = &h->got.glist;
4938 else
4939 head = elf_local_got_ents (input_bfd) + r_symndx;
4940
4941 for (g = *head; g != NULL; g = g->next)
4942 if (htab->params->ovly_flavour == ovly_soft_icache
4943 ? (g->ovl == ovl
4944 && g->br_addr == (rel->r_offset
4945 + input_section->output_offset
4946 + input_section->output_section->vma))
4947 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4948 break;
4949 if (g == NULL)
4950 abort ();
4951
4952 relocation = g->stub_addr;
4953 addend = 0;
4954 }
4955 else
4956 {
4957 /* For soft icache, encode the overlay index into addresses. */
4958 if (htab->params->ovly_flavour == ovly_soft_icache
4959 && (r_type == R_SPU_ADDR16_HI
4960 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4961 && !is_ea_sym)
4962 {
4963 unsigned int ovl = overlay_index (sec);
4964 if (ovl != 0)
4965 {
4966 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4967 relocation += set_id << 18;
4968 }
4969 }
4970 }
4971
4972 if (htab->params->emit_fixups && !info->relocatable
4973 && (input_section->flags & SEC_ALLOC) != 0
4974 && r_type == R_SPU_ADDR32)
4975 {
4976 bfd_vma offset;
4977 offset = rel->r_offset + input_section->output_section->vma
4978 + input_section->output_offset;
4979 spu_elf_emit_fixup (output_bfd, info, offset);
4980 }
4981
4982 if (unresolved_reloc)
4983 ;
4984 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4985 {
4986 if (is_ea_sym)
4987 {
4988 /* ._ea is a special section that isn't allocated in SPU
4989 memory, but rather occupies space in PPU memory as
4990 part of an embedded ELF image. If this reloc is
4991 against a symbol defined in ._ea, then transform the
4992 reloc into an equivalent one without a symbol
4993 relative to the start of the ELF image. */
4994 rel->r_addend += (relocation
4995 - ea->vma
4996 + elf_section_data (ea)->this_hdr.sh_offset);
4997 rel->r_info = ELF32_R_INFO (0, r_type);
4998 }
4999 emit_these_relocs = TRUE;
5000 continue;
5001 }
5002 else if (is_ea_sym)
5003 unresolved_reloc = TRUE;
5004
5005 if (unresolved_reloc)
5006 {
5007 (*_bfd_error_handler)
5008 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5009 input_bfd,
5010 bfd_get_section_name (input_bfd, input_section),
5011 (long) rel->r_offset,
5012 howto->name,
5013 sym_name);
5014 ret = FALSE;
5015 }
5016
5017 r = _bfd_final_link_relocate (howto,
5018 input_bfd,
5019 input_section,
5020 contents,
5021 rel->r_offset, relocation, addend);
5022
5023 if (r != bfd_reloc_ok)
5024 {
5025 const char *msg = (const char *) 0;
5026
5027 switch (r)
5028 {
5029 case bfd_reloc_overflow:
5030 if (!((*info->callbacks->reloc_overflow)
5031 (info, (h ? &h->root : NULL), sym_name, howto->name,
5032 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5033 return FALSE;
5034 break;
5035
5036 case bfd_reloc_undefined:
5037 if (!((*info->callbacks->undefined_symbol)
5038 (info, sym_name, input_bfd, input_section,
5039 rel->r_offset, TRUE)))
5040 return FALSE;
5041 break;
5042
5043 case bfd_reloc_outofrange:
5044 msg = _("internal error: out of range error");
5045 goto common_error;
5046
5047 case bfd_reloc_notsupported:
5048 msg = _("internal error: unsupported relocation error");
5049 goto common_error;
5050
5051 case bfd_reloc_dangerous:
5052 msg = _("internal error: dangerous error");
5053 goto common_error;
5054
5055 default:
5056 msg = _("internal error: unknown error");
5057 /* fall through */
5058
5059 common_error:
5060 ret = FALSE;
5061 if (!((*info->callbacks->warning)
5062 (info, msg, sym_name, input_bfd, input_section,
5063 rel->r_offset)))
5064 return FALSE;
5065 break;
5066 }
5067 }
5068 }
5069
5070 if (ret
5071 && emit_these_relocs
5072 && !info->emitrelocations)
5073 {
5074 Elf_Internal_Rela *wrel;
5075 Elf_Internal_Shdr *rel_hdr;
5076
5077 wrel = rel = relocs;
5078 relend = relocs + input_section->reloc_count;
5079 for (; rel < relend; rel++)
5080 {
5081 int r_type;
5082
5083 r_type = ELF32_R_TYPE (rel->r_info);
5084 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5085 *wrel++ = *rel;
5086 }
5087 input_section->reloc_count = wrel - relocs;
5088 /* Backflips for _bfd_elf_link_output_relocs. */
5089 rel_hdr = &elf_section_data (input_section)->rel_hdr;
5090 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5091 ret = 2;
5092 }
5093
5094 return ret;
5095 }
5096
5097 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5098
5099 static int
5100 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5101 const char *sym_name ATTRIBUTE_UNUSED,
5102 Elf_Internal_Sym *sym,
5103 asection *sym_sec ATTRIBUTE_UNUSED,
5104 struct elf_link_hash_entry *h)
5105 {
5106 struct spu_link_hash_table *htab = spu_hash_table (info);
5107
5108 if (!info->relocatable
5109 && htab->stub_sec != NULL
5110 && h != NULL
5111 && (h->root.type == bfd_link_hash_defined
5112 || h->root.type == bfd_link_hash_defweak)
5113 && h->def_regular
5114 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5115 {
5116 struct got_entry *g;
5117
5118 for (g = h->got.glist; g != NULL; g = g->next)
5119 if (htab->params->ovly_flavour == ovly_soft_icache
5120 ? g->br_addr == g->stub_addr
5121 : g->addend == 0 && g->ovl == 0)
5122 {
5123 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5124 (htab->stub_sec[0]->output_section->owner,
5125 htab->stub_sec[0]->output_section));
5126 sym->st_value = g->stub_addr;
5127 break;
5128 }
5129 }
5130
5131 return 1;
5132 }
5133
5134 static int spu_plugin = 0;
5135
5136 void
5137 spu_elf_plugin (int val)
5138 {
5139 spu_plugin = val;
5140 }
5141
5142 /* Set ELF header e_type for plugins. */
5143
5144 static void
5145 spu_elf_post_process_headers (bfd *abfd,
5146 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5147 {
5148 if (spu_plugin)
5149 {
5150 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5151
5152 i_ehdrp->e_type = ET_DYN;
5153 }
5154 }
5155
5156 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5157 segments for overlays. */
5158
5159 static int
5160 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5161 {
5162 int extra = 0;
5163 asection *sec;
5164
5165 if (info != NULL)
5166 {
5167 struct spu_link_hash_table *htab = spu_hash_table (info);
5168 extra = htab->num_overlays;
5169 }
5170
5171 if (extra)
5172 ++extra;
5173
5174 sec = bfd_get_section_by_name (abfd, ".toe");
5175 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5176 ++extra;
5177
5178 return extra;
5179 }
5180
5181 /* Remove .toe section from other PT_LOAD segments and put it in
5182 a segment of its own. Put overlays in separate segments too. */
5183
5184 static bfd_boolean
5185 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5186 {
5187 asection *toe, *s;
5188 struct elf_segment_map *m, *m_overlay;
5189 struct elf_segment_map **p, **p_overlay;
5190 unsigned int i;
5191
5192 if (info == NULL)
5193 return TRUE;
5194
5195 toe = bfd_get_section_by_name (abfd, ".toe");
5196 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5197 if (m->p_type == PT_LOAD && m->count > 1)
5198 for (i = 0; i < m->count; i++)
5199 if ((s = m->sections[i]) == toe
5200 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5201 {
5202 struct elf_segment_map *m2;
5203 bfd_vma amt;
5204
5205 if (i + 1 < m->count)
5206 {
5207 amt = sizeof (struct elf_segment_map);
5208 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5209 m2 = bfd_zalloc (abfd, amt);
5210 if (m2 == NULL)
5211 return FALSE;
5212 m2->count = m->count - (i + 1);
5213 memcpy (m2->sections, m->sections + i + 1,
5214 m2->count * sizeof (m->sections[0]));
5215 m2->p_type = PT_LOAD;
5216 m2->next = m->next;
5217 m->next = m2;
5218 }
5219 m->count = 1;
5220 if (i != 0)
5221 {
5222 m->count = i;
5223 amt = sizeof (struct elf_segment_map);
5224 m2 = bfd_zalloc (abfd, amt);
5225 if (m2 == NULL)
5226 return FALSE;
5227 m2->p_type = PT_LOAD;
5228 m2->count = 1;
5229 m2->sections[0] = s;
5230 m2->next = m->next;
5231 m->next = m2;
5232 }
5233 break;
5234 }
5235
5236
5237 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5238 PT_LOAD segments. This can cause the .ovl.init section to be
5239 overwritten with the contents of some overlay segment. To work
5240 around this issue, we ensure that all PF_OVERLAY segments are
5241 sorted first amongst the program headers; this ensures that even
5242 with a broken loader, the .ovl.init section (which is not marked
5243 as PF_OVERLAY) will be placed into SPU local store on startup. */
5244
5245 /* Move all overlay segments onto a separate list. */
5246 p = &elf_tdata (abfd)->segment_map;
5247 p_overlay = &m_overlay;
5248 while (*p != NULL)
5249 {
5250 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5251 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5252 {
5253 struct elf_segment_map *m = *p;
5254 *p = m->next;
5255 *p_overlay = m;
5256 p_overlay = &m->next;
5257 continue;
5258 }
5259
5260 p = &((*p)->next);
5261 }
5262
5263 /* Re-insert overlay segments at the head of the segment map. */
5264 *p_overlay = elf_tdata (abfd)->segment_map;
5265 elf_tdata (abfd)->segment_map = m_overlay;
5266
5267 return TRUE;
5268 }
5269
5270 /* Tweak the section type of .note.spu_name. */
5271
5272 static bfd_boolean
5273 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5274 Elf_Internal_Shdr *hdr,
5275 asection *sec)
5276 {
5277 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5278 hdr->sh_type = SHT_NOTE;
5279 return TRUE;
5280 }
5281
5282 /* Tweak phdrs before writing them out. */
5283
5284 static int
5285 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5286 {
5287 const struct elf_backend_data *bed;
5288 struct elf_obj_tdata *tdata;
5289 Elf_Internal_Phdr *phdr, *last;
5290 struct spu_link_hash_table *htab;
5291 unsigned int count;
5292 unsigned int i;
5293
5294 if (info == NULL)
5295 return TRUE;
5296
5297 bed = get_elf_backend_data (abfd);
5298 tdata = elf_tdata (abfd);
5299 phdr = tdata->phdr;
5300 count = tdata->program_header_size / bed->s->sizeof_phdr;
5301 htab = spu_hash_table (info);
5302 if (htab->num_overlays != 0)
5303 {
5304 struct elf_segment_map *m;
5305 unsigned int o;
5306
5307 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5308 if (m->count != 0
5309 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5310 {
5311 /* Mark this as an overlay header. */
5312 phdr[i].p_flags |= PF_OVERLAY;
5313
5314 if (htab->ovtab != NULL && htab->ovtab->size != 0
5315 && htab->params->ovly_flavour != ovly_soft_icache)
5316 {
5317 bfd_byte *p = htab->ovtab->contents;
5318 unsigned int off = o * 16 + 8;
5319
5320 /* Write file_off into _ovly_table. */
5321 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5322 }
5323 }
5324 /* Soft-icache has its file offset put in .ovl.init. */
5325 if (htab->init != NULL && htab->init->size != 0)
5326 {
5327 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5328
5329 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5330 }
5331 }
5332
5333 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5334 of 16. This should always be possible when using the standard
5335 linker scripts, but don't create overlapping segments if
5336 someone is playing games with linker scripts. */
5337 last = NULL;
5338 for (i = count; i-- != 0; )
5339 if (phdr[i].p_type == PT_LOAD)
5340 {
5341 unsigned adjust;
5342
5343 adjust = -phdr[i].p_filesz & 15;
5344 if (adjust != 0
5345 && last != NULL
5346 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5347 break;
5348
5349 adjust = -phdr[i].p_memsz & 15;
5350 if (adjust != 0
5351 && last != NULL
5352 && phdr[i].p_filesz != 0
5353 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5354 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5355 break;
5356
5357 if (phdr[i].p_filesz != 0)
5358 last = &phdr[i];
5359 }
5360
5361 if (i == (unsigned int) -1)
5362 for (i = count; i-- != 0; )
5363 if (phdr[i].p_type == PT_LOAD)
5364 {
5365 unsigned adjust;
5366
5367 adjust = -phdr[i].p_filesz & 15;
5368 phdr[i].p_filesz += adjust;
5369
5370 adjust = -phdr[i].p_memsz & 15;
5371 phdr[i].p_memsz += adjust;
5372 }
5373
5374 return TRUE;
5375 }
5376
5377 bfd_boolean
5378 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5379 {
5380 struct spu_link_hash_table *htab = spu_hash_table (info);
5381 if (htab->params->emit_fixups)
5382 {
5383 asection *sfixup = htab->sfixup;
5384 int fixup_count = 0;
5385 bfd *ibfd;
5386 size_t size;
5387
5388 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5389 {
5390 asection *isec;
5391
5392 if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5393 continue;
5394
5395 /* Walk over each section attached to the input bfd. */
5396 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5397 {
5398 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5399 bfd_vma base_end;
5400
5401 /* If there aren't any relocs, then there's nothing more
5402 to do. */
5403 if ((isec->flags & SEC_RELOC) == 0
5404 || isec->reloc_count == 0)
5405 continue;
5406
5407 /* Get the relocs. */
5408 internal_relocs =
5409 _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5410 info->keep_memory);
5411 if (internal_relocs == NULL)
5412 return FALSE;
5413
5414 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5415 relocations. They are stored in a single word by
5416 saving the upper 28 bits of the address and setting the
5417 lower 4 bits to a bit mask of the words that have the
5418 relocation. BASE_END keeps track of the next quadword. */
5419 irela = internal_relocs;
5420 irelaend = irela + isec->reloc_count;
5421 base_end = 0;
5422 for (; irela < irelaend; irela++)
5423 if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5424 && irela->r_offset >= base_end)
5425 {
5426 base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5427 fixup_count++;
5428 }
5429 }
5430 }
5431
5432 /* We always have a NULL fixup as a sentinel */
5433 size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5434 if (!bfd_set_section_size (output_bfd, sfixup, size))
5435 return FALSE;
5436 sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5437 if (sfixup->contents == NULL)
5438 return FALSE;
5439 }
5440 return TRUE;
5441 }
5442
5443 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5444 #define TARGET_BIG_NAME "elf32-spu"
5445 #define ELF_ARCH bfd_arch_spu
5446 #define ELF_MACHINE_CODE EM_SPU
5447 /* This matches the alignment need for DMA. */
5448 #define ELF_MAXPAGESIZE 0x80
5449 #define elf_backend_rela_normal 1
5450 #define elf_backend_can_gc_sections 1
5451
5452 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5453 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5454 #define elf_info_to_howto spu_elf_info_to_howto
5455 #define elf_backend_count_relocs spu_elf_count_relocs
5456 #define elf_backend_relocate_section spu_elf_relocate_section
5457 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5458 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5459 #define elf_backend_object_p spu_elf_object_p
5460 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5461 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5462
5463 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5464 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5465 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5466 #define elf_backend_post_process_headers spu_elf_post_process_headers
5467 #define elf_backend_fake_sections spu_elf_fake_sections
5468 #define elf_backend_special_sections spu_elf_special_sections
5469 #define bfd_elf32_bfd_final_link spu_elf_final_link
5470
5471 #include "elf32-target.h"
This page took 0.144191 seconds and 5 git commands to generate.