* Makefile.in (c-lang.o, gnu-v3-abi.o): Update.
[deliverable/binutils-gdb.git] / bfd / elf32-spu.c
CommitLineData
e9f53129
AM
1/* SPU specific support for 32-bit ELF
2
78859468 3 Copyright 2006, 2007 Free Software Foundation, Inc.
e9f53129
AM
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20
e9f53129 21#include "sysdep.h"
3db64b00 22#include "bfd.h"
e9f53129
AM
23#include "bfdlink.h"
24#include "libbfd.h"
25#include "elf-bfd.h"
26#include "elf/spu.h"
27#include "elf32-spu.h"
28
29/* We use RELA style relocs. Don't define USE_REL. */
30
31static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
32 void *, asection *,
33 bfd *, char **);
34
35/* Values of type 'enum elf_spu_reloc_type' are used to index this
36 array, so it must be declared in the order of that type. */
37
38static reloc_howto_type elf_howto_table[] = {
39 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
40 bfd_elf_generic_reloc, "SPU_NONE",
41 FALSE, 0, 0x00000000, FALSE),
42 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
43 bfd_elf_generic_reloc, "SPU_ADDR10",
44 FALSE, 0, 0x00ffc000, FALSE),
45 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
46 bfd_elf_generic_reloc, "SPU_ADDR16",
47 FALSE, 0, 0x007fff80, FALSE),
48 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
49 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
50 FALSE, 0, 0x007fff80, FALSE),
51 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
52 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
53 FALSE, 0, 0x007fff80, FALSE),
54 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
55 bfd_elf_generic_reloc, "SPU_ADDR18",
56 FALSE, 0, 0x01ffff80, FALSE),
57 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
58 bfd_elf_generic_reloc, "SPU_ADDR32",
59 FALSE, 0, 0xffffffff, FALSE),
60 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
61 bfd_elf_generic_reloc, "SPU_REL16",
62 FALSE, 0, 0x007fff80, TRUE),
63 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
64 bfd_elf_generic_reloc, "SPU_ADDR7",
65 FALSE, 0, 0x001fc000, FALSE),
66 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
67 spu_elf_rel9, "SPU_REL9",
68 FALSE, 0, 0x0180007f, TRUE),
69 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
70 spu_elf_rel9, "SPU_REL9I",
71 FALSE, 0, 0x0000c07f, TRUE),
72 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
73 bfd_elf_generic_reloc, "SPU_ADDR10I",
74 FALSE, 0, 0x00ffc000, FALSE),
75 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
76 bfd_elf_generic_reloc, "SPU_ADDR16I",
77 FALSE, 0, 0x007fff80, FALSE),
78 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
79 bfd_elf_generic_reloc, "SPU_REL32",
80 FALSE, 0, 0xffffffff, TRUE),
81};
82
83static struct bfd_elf_special_section const spu_elf_special_sections[] = {
84 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
85 { NULL, 0, 0, 0, 0 }
86};
87
88static enum elf_spu_reloc_type
89spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
90{
91 switch (code)
92 {
93 default:
94 return R_SPU_NONE;
95 case BFD_RELOC_SPU_IMM10W:
96 return R_SPU_ADDR10;
97 case BFD_RELOC_SPU_IMM16W:
98 return R_SPU_ADDR16;
99 case BFD_RELOC_SPU_LO16:
100 return R_SPU_ADDR16_LO;
101 case BFD_RELOC_SPU_HI16:
102 return R_SPU_ADDR16_HI;
103 case BFD_RELOC_SPU_IMM18:
104 return R_SPU_ADDR18;
105 case BFD_RELOC_SPU_PCREL16:
106 return R_SPU_REL16;
107 case BFD_RELOC_SPU_IMM7:
108 return R_SPU_ADDR7;
109 case BFD_RELOC_SPU_IMM8:
110 return R_SPU_NONE;
111 case BFD_RELOC_SPU_PCREL9a:
112 return R_SPU_REL9;
113 case BFD_RELOC_SPU_PCREL9b:
114 return R_SPU_REL9I;
115 case BFD_RELOC_SPU_IMM10:
116 return R_SPU_ADDR10I;
117 case BFD_RELOC_SPU_IMM16:
118 return R_SPU_ADDR16I;
119 case BFD_RELOC_32:
120 return R_SPU_ADDR32;
121 case BFD_RELOC_32_PCREL:
122 return R_SPU_REL32;
123 }
124}
125
126static void
127spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
128 arelent *cache_ptr,
129 Elf_Internal_Rela *dst)
130{
131 enum elf_spu_reloc_type r_type;
132
133 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
134 BFD_ASSERT (r_type < R_SPU_max);
135 cache_ptr->howto = &elf_howto_table[(int) r_type];
136}
137
138static reloc_howto_type *
139spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
140 bfd_reloc_code_real_type code)
141{
142 return elf_howto_table + spu_elf_bfd_to_reloc_type (code);
143}
144
157090f7
AM
145static reloc_howto_type *
146spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
147 const char *r_name)
148{
149 unsigned int i;
150
151 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
152 if (elf_howto_table[i].name != NULL
153 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
154 return &elf_howto_table[i];
155
156 return NULL;
157}
158
e9f53129
AM
159/* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
160
161static bfd_reloc_status_type
162spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
163 void *data, asection *input_section,
164 bfd *output_bfd, char **error_message)
165{
166 bfd_size_type octets;
167 bfd_vma val;
168 long insn;
169
170 /* If this is a relocatable link (output_bfd test tells us), just
171 call the generic function. Any adjustment will be done at final
172 link time. */
173 if (output_bfd != NULL)
174 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
175 input_section, output_bfd, error_message);
176
177 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
178 return bfd_reloc_outofrange;
179 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
180
181 /* Get symbol value. */
182 val = 0;
183 if (!bfd_is_com_section (symbol->section))
184 val = symbol->value;
185 if (symbol->section->output_section)
186 val += symbol->section->output_section->vma;
187
188 val += reloc_entry->addend;
189
190 /* Make it pc-relative. */
191 val -= input_section->output_section->vma + input_section->output_offset;
192
193 val >>= 2;
194 if (val + 256 >= 512)
195 return bfd_reloc_overflow;
196
197 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
198
199 /* Move two high bits of value to REL9I and REL9 position.
200 The mask will take care of selecting the right field. */
201 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
202 insn &= ~reloc_entry->howto->dst_mask;
203 insn |= val & reloc_entry->howto->dst_mask;
204 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
205 return bfd_reloc_ok;
206}
207
208static bfd_boolean
209spu_elf_new_section_hook (bfd *abfd, asection *sec)
210{
211 if (!sec->used_by_bfd)
212 {
213 struct _spu_elf_section_data *sdata;
214
215 sdata = bfd_zalloc (abfd, sizeof (*sdata));
216 if (sdata == NULL)
217 return FALSE;
218 sec->used_by_bfd = sdata;
219 }
220
221 return _bfd_elf_new_section_hook (abfd, sec);
222}
223
224/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
225 strip --strip-unneeded will not remove them. */
226
227static void
228spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
229{
230 if (sym->name != NULL
231 && sym->section != bfd_abs_section_ptr
232 && strncmp (sym->name, "_EAR_", 5) == 0)
233 sym->flags |= BSF_KEEP;
234}
235
236/* SPU ELF linker hash table. */
237
238struct spu_link_hash_table
239{
240 struct elf_link_hash_table elf;
241
242 /* The stub hash table. */
243 struct bfd_hash_table stub_hash_table;
244
245 /* Shortcuts to overlay sections. */
246 asection *stub;
247 asection *ovtab;
248
249 struct elf_link_hash_entry *ovly_load;
250
251 /* An array of two output sections per overlay region, chosen such that
252 the first section vma is the overlay buffer vma (ie. the section has
253 the lowest vma in the group that occupy the region), and the second
254 section vma+size specifies the end of the region. We keep pointers
255 to sections like this because section vmas may change when laying
256 them out. */
257 asection **ovl_region;
258
259 /* Number of overlay buffers. */
260 unsigned int num_buf;
261
262 /* Total number of overlays. */
263 unsigned int num_overlays;
264
265 /* Set if we should emit symbols for stubs. */
266 unsigned int emit_stub_syms:1;
267
268 /* Set if we want stubs on calls out of overlay regions to
269 non-overlay regions. */
270 unsigned int non_overlay_stubs : 1;
271
272 /* Set on error. */
273 unsigned int stub_overflow : 1;
49fa1e15
AM
274
275 /* Set if stack size analysis should be done. */
276 unsigned int stack_analysis : 1;
277
278 /* Set if __stack_* syms will be emitted. */
279 unsigned int emit_stack_syms : 1;
e9f53129
AM
280};
281
282#define spu_hash_table(p) \
283 ((struct spu_link_hash_table *) ((p)->hash))
284
285struct spu_stub_hash_entry
286{
287 struct bfd_hash_entry root;
288
289 /* Destination of this stub. */
290 asection *target_section;
291 bfd_vma target_off;
292
293 /* Offset of entry in stub section. */
294 bfd_vma off;
295
296 /* Offset from this stub to stub that loads the overlay index. */
297 bfd_vma delta;
298};
299
300/* Create an entry in a spu stub hash table. */
301
302static struct bfd_hash_entry *
303stub_hash_newfunc (struct bfd_hash_entry *entry,
304 struct bfd_hash_table *table,
305 const char *string)
306{
307 /* Allocate the structure if it has not already been allocated by a
308 subclass. */
309 if (entry == NULL)
310 {
311 entry = bfd_hash_allocate (table, sizeof (struct spu_stub_hash_entry));
312 if (entry == NULL)
313 return entry;
314 }
315
316 /* Call the allocation method of the superclass. */
317 entry = bfd_hash_newfunc (entry, table, string);
318 if (entry != NULL)
319 {
320 struct spu_stub_hash_entry *sh = (struct spu_stub_hash_entry *) entry;
321
322 sh->target_section = NULL;
323 sh->target_off = 0;
324 sh->off = 0;
325 sh->delta = 0;
326 }
327
328 return entry;
329}
330
331/* Create a spu ELF linker hash table. */
332
333static struct bfd_link_hash_table *
334spu_elf_link_hash_table_create (bfd *abfd)
335{
336 struct spu_link_hash_table *htab;
337
338 htab = bfd_malloc (sizeof (*htab));
339 if (htab == NULL)
340 return NULL;
341
342 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
343 _bfd_elf_link_hash_newfunc,
344 sizeof (struct elf_link_hash_entry)))
345 {
346 free (htab);
347 return NULL;
348 }
349
350 /* Init the stub hash table too. */
351 if (!bfd_hash_table_init (&htab->stub_hash_table, stub_hash_newfunc,
352 sizeof (struct spu_stub_hash_entry)))
353 return NULL;
354
355 memset (&htab->stub, 0,
356 sizeof (*htab) - offsetof (struct spu_link_hash_table, stub));
357
358 return &htab->elf.root;
359}
360
361/* Free the derived linker hash table. */
362
363static void
364spu_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
365{
366 struct spu_link_hash_table *ret = (struct spu_link_hash_table *) hash;
367
368 bfd_hash_table_free (&ret->stub_hash_table);
369 _bfd_generic_link_hash_table_free (hash);
370}
371
372/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
373 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
374 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
375
376static bfd_boolean
377get_sym_h (struct elf_link_hash_entry **hp,
378 Elf_Internal_Sym **symp,
379 asection **symsecp,
380 Elf_Internal_Sym **locsymsp,
381 unsigned long r_symndx,
382 bfd *ibfd)
383{
384 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
385
386 if (r_symndx >= symtab_hdr->sh_info)
387 {
388 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
389 struct elf_link_hash_entry *h;
390
391 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
392 while (h->root.type == bfd_link_hash_indirect
393 || h->root.type == bfd_link_hash_warning)
394 h = (struct elf_link_hash_entry *) h->root.u.i.link;
395
396 if (hp != NULL)
397 *hp = h;
398
399 if (symp != NULL)
400 *symp = NULL;
401
402 if (symsecp != NULL)
403 {
404 asection *symsec = NULL;
405 if (h->root.type == bfd_link_hash_defined
406 || h->root.type == bfd_link_hash_defweak)
407 symsec = h->root.u.def.section;
408 *symsecp = symsec;
409 }
410 }
411 else
412 {
413 Elf_Internal_Sym *sym;
414 Elf_Internal_Sym *locsyms = *locsymsp;
415
416 if (locsyms == NULL)
417 {
418 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
419 if (locsyms == NULL)
49fa1e15
AM
420 {
421 size_t symcount = symtab_hdr->sh_info;
422
423 /* If we are reading symbols into the contents, then
424 read the global syms too. This is done to cache
425 syms for later stack analysis. */
426 if ((unsigned char **) locsymsp == &symtab_hdr->contents)
427 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
428 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
429 NULL, NULL, NULL);
430 }
e9f53129
AM
431 if (locsyms == NULL)
432 return FALSE;
433 *locsymsp = locsyms;
434 }
435 sym = locsyms + r_symndx;
436
437 if (hp != NULL)
438 *hp = NULL;
439
440 if (symp != NULL)
441 *symp = sym;
442
443 if (symsecp != NULL)
444 {
445 asection *symsec = NULL;
446 if ((sym->st_shndx != SHN_UNDEF
447 && sym->st_shndx < SHN_LORESERVE)
448 || sym->st_shndx > SHN_HIRESERVE)
449 symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
450 *symsecp = symsec;
451 }
452 }
49fa1e15 453
e9f53129
AM
454 return TRUE;
455}
456
aa7a0635
AM
457/* Build a name for an entry in the stub hash table. We can't use a
458 local symbol name because ld -r might generate duplicate local symbols. */
e9f53129
AM
459
460static char *
aa7a0635 461spu_stub_name (const asection *sym_sec,
e9f53129
AM
462 const struct elf_link_hash_entry *h,
463 const Elf_Internal_Rela *rel)
464{
465 char *stub_name;
466 bfd_size_type len;
467
468 if (h)
469 {
aa7a0635 470 len = strlen (h->root.root.string) + 1 + 8 + 1;
e9f53129
AM
471 stub_name = bfd_malloc (len);
472 if (stub_name == NULL)
473 return stub_name;
474
aa7a0635 475 sprintf (stub_name, "%s+%x",
e9f53129
AM
476 h->root.root.string,
477 (int) rel->r_addend & 0xffffffff);
478 len -= 8;
479 }
480 else
481 {
aa7a0635 482 len = 8 + 1 + 8 + 1 + 8 + 1;
e9f53129
AM
483 stub_name = bfd_malloc (len);
484 if (stub_name == NULL)
485 return stub_name;
486
aa7a0635 487 sprintf (stub_name, "%x:%x+%x",
e9f53129
AM
488 sym_sec->id & 0xffffffff,
489 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
490 (int) rel->r_addend & 0xffffffff);
491 len = strlen (stub_name);
492 }
493
494 if (stub_name[len - 2] == '+'
495 && stub_name[len - 1] == '0'
496 && stub_name[len] == 0)
497 stub_name[len - 2] = 0;
498
499 return stub_name;
500}
501
502/* Create the note section if not already present. This is done early so
503 that the linker maps the sections to the right place in the output. */
504
505bfd_boolean
49fa1e15
AM
506spu_elf_create_sections (bfd *output_bfd,
507 struct bfd_link_info *info,
508 int stack_analysis,
509 int emit_stack_syms)
e9f53129
AM
510{
511 bfd *ibfd;
49fa1e15
AM
512 struct spu_link_hash_table *htab = spu_hash_table (info);
513
514 /* Stash some options away where we can get at them later. */
515 htab->stack_analysis = stack_analysis;
516 htab->emit_stack_syms = emit_stack_syms;
e9f53129
AM
517
518 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->next)
519 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
520 break;
521
522 if (ibfd == NULL)
523 {
524 /* Make SPU_PTNOTE_SPUNAME section. */
525 asection *s;
526 size_t name_len;
527 size_t size;
528 bfd_byte *data;
529 flagword flags;
530
531 ibfd = info->input_bfds;
532 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
533 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
534 if (s == NULL
535 || !bfd_set_section_alignment (ibfd, s, 4))
536 return FALSE;
537
538 name_len = strlen (bfd_get_filename (output_bfd)) + 1;
539 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
540 size += (name_len + 3) & -4;
541
542 if (!bfd_set_section_size (ibfd, s, size))
543 return FALSE;
544
545 data = bfd_zalloc (ibfd, size);
546 if (data == NULL)
547 return FALSE;
548
549 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
550 bfd_put_32 (ibfd, name_len, data + 4);
551 bfd_put_32 (ibfd, 1, data + 8);
552 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
553 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
554 bfd_get_filename (output_bfd), name_len);
555 s->contents = data;
556 }
557
558 return TRUE;
559}
560
561/* Return the section that should be marked against GC for a given
562 relocation. */
563
564static asection *
565spu_elf_gc_mark_hook (asection *sec,
566 struct bfd_link_info *info ATTRIBUTE_UNUSED,
567 Elf_Internal_Rela *rel ATTRIBUTE_UNUSED,
568 struct elf_link_hash_entry *h,
569 Elf_Internal_Sym *sym)
570{
571 if (h != NULL)
572 {
573 switch (h->root.type)
574 {
575 case bfd_link_hash_defined:
576 case bfd_link_hash_defweak:
577 return h->root.u.def.section;
578
579 case bfd_link_hash_common:
580 return h->root.u.c.p->section;
581
582 default:
583 break;
584 }
585 }
586 else
587 return bfd_section_from_elf_index (sec->owner, sym->st_shndx);
588
589 return NULL;
590}
591
592/* qsort predicate to sort sections by vma. */
593
594static int
595sort_sections (const void *a, const void *b)
596{
597 const asection *const *s1 = a;
598 const asection *const *s2 = b;
599 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
600
601 if (delta != 0)
602 return delta < 0 ? -1 : 1;
603
604 return (*s1)->index - (*s2)->index;
605}
606
607/* Identify overlays in the output bfd, and number them. */
608
609bfd_boolean
610spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
611{
612 struct spu_link_hash_table *htab = spu_hash_table (info);
613 asection **alloc_sec;
614 unsigned int i, n, ovl_index, num_buf;
615 asection *s;
616 bfd_vma ovl_end;
617
618 if (output_bfd->section_count < 2)
619 return FALSE;
620
621 alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
622 if (alloc_sec == NULL)
623 return FALSE;
624
625 /* Pick out all the alloced sections. */
626 for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
627 if ((s->flags & SEC_ALLOC) != 0
628 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
629 && s->size != 0)
630 alloc_sec[n++] = s;
631
632 if (n == 0)
633 {
634 free (alloc_sec);
635 return FALSE;
636 }
637
638 /* Sort them by vma. */
639 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
640
641 /* Look for overlapping vmas. Any with overlap must be overlays.
642 Count them. Also count the number of overlay regions and for
643 each region save a section from that region with the lowest vma
644 and another section with the highest end vma. */
645 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
646 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
647 {
648 s = alloc_sec[i];
649 if (s->vma < ovl_end)
650 {
651 asection *s0 = alloc_sec[i - 1];
652
653 if (spu_elf_section_data (s0)->ovl_index == 0)
654 {
655 spu_elf_section_data (s0)->ovl_index = ++ovl_index;
656 alloc_sec[num_buf * 2] = s0;
657 alloc_sec[num_buf * 2 + 1] = s0;
658 num_buf++;
659 }
660 spu_elf_section_data (s)->ovl_index = ++ovl_index;
661 if (ovl_end < s->vma + s->size)
662 {
663 ovl_end = s->vma + s->size;
664 alloc_sec[num_buf * 2 - 1] = s;
665 }
666 }
667 else
668 ovl_end = s->vma + s->size;
669 }
670
671 htab->num_overlays = ovl_index;
672 htab->num_buf = num_buf;
673 if (ovl_index == 0)
674 {
675 free (alloc_sec);
676 return FALSE;
677 }
678
679 alloc_sec = bfd_realloc (alloc_sec, num_buf * 2 * sizeof (*alloc_sec));
680 if (alloc_sec == NULL)
681 return FALSE;
682
683 htab->ovl_region = alloc_sec;
684 return TRUE;
685}
686
687/* One of these per stub. */
688#define SIZEOF_STUB1 8
689#define ILA_79 0x4200004f /* ila $79,function_address */
690#define BR 0x32000000 /* br stub2 */
691
692/* One of these per overlay. */
693#define SIZEOF_STUB2 8
694#define ILA_78 0x4200004e /* ila $78,overlay_number */
695 /* br __ovly_load */
696#define NOP 0x40200000
697
49fa1e15 698/* Return true for all relative and absolute branch instructions.
e9f53129
AM
699 bra 00110000 0..
700 brasl 00110001 0..
701 br 00110010 0..
702 brsl 00110011 0..
703 brz 00100000 0..
704 brnz 00100001 0..
705 brhz 00100010 0..
49fa1e15
AM
706 brhnz 00100011 0.. */
707
708static bfd_boolean
709is_branch (const unsigned char *insn)
710{
711 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
712}
713
714/* Return true for branch hint instructions.
e9f53129
AM
715 hbra 0001000..
716 hbrr 0001001.. */
717
718static bfd_boolean
49fa1e15 719is_hint (const unsigned char *insn)
e9f53129 720{
49fa1e15 721 return (insn[0] & 0xfc) == 0x10;
e9f53129
AM
722}
723
aa7a0635
AM
724/* Return TRUE if this reloc symbol should possibly go via an overlay stub. */
725
726static bfd_boolean
727needs_ovl_stub (const char *sym_name,
728 asection *sym_sec,
729 asection *input_section,
730 struct spu_link_hash_table *htab,
731 bfd_boolean is_branch)
732{
733 if (htab->num_overlays == 0)
734 return FALSE;
735
736 if (sym_sec == NULL
2c67c5f3
AM
737 || sym_sec->output_section == NULL
738 || spu_elf_section_data (sym_sec->output_section) == NULL)
aa7a0635
AM
739 return FALSE;
740
741 /* setjmp always goes via an overlay stub, because then the return
742 and hence the longjmp goes via __ovly_return. That magically
743 makes setjmp/longjmp between overlays work. */
744 if (strncmp (sym_name, "setjmp", 6) == 0
745 && (sym_name[6] == '\0' || sym_name[6] == '@'))
746 return TRUE;
747
748 /* Usually, symbols in non-overlay sections don't need stubs. */
749 if (spu_elf_section_data (sym_sec->output_section)->ovl_index == 0
750 && !htab->non_overlay_stubs)
751 return FALSE;
752
753 /* A reference from some other section to a symbol in an overlay
754 section needs a stub. */
755 if (spu_elf_section_data (sym_sec->output_section)->ovl_index
756 != spu_elf_section_data (input_section->output_section)->ovl_index)
757 return TRUE;
758
759 /* If this insn isn't a branch then we are possibly taking the
760 address of a function and passing it out somehow. */
761 return !is_branch;
762}
763
e9f53129 764struct stubarr {
f4b39977 765 struct bfd_hash_table *stub_hash_table;
e9f53129
AM
766 struct spu_stub_hash_entry **sh;
767 unsigned int count;
f4b39977 768 int err;
e9f53129
AM
769};
770
f4b39977
AM
771/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
772 symbols. */
773
774static bfd_boolean
775allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
776{
777 /* Symbols starting with _SPUEAR_ need a stub because they may be
778 invoked by the PPU. */
779 if ((h->root.type == bfd_link_hash_defined
780 || h->root.type == bfd_link_hash_defweak)
781 && h->def_regular
782 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
783 {
784 struct stubarr *stubs = inf;
785 static Elf_Internal_Rela zero_rel;
786 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
787 struct spu_stub_hash_entry *sh;
788
789 if (stub_name == NULL)
790 {
791 stubs->err = 1;
792 return FALSE;
793 }
794
795 sh = (struct spu_stub_hash_entry *)
796 bfd_hash_lookup (stubs->stub_hash_table, stub_name, TRUE, FALSE);
797 if (sh == NULL)
798 {
799 free (stub_name);
800 return FALSE;
801 }
802
803 /* If this entry isn't new, we already have a stub. */
804 if (sh->target_section != NULL)
805 {
806 free (stub_name);
807 return TRUE;
808 }
809
810 sh->target_section = h->root.u.def.section;
811 sh->target_off = h->root.u.def.value;
812 stubs->count += 1;
813 }
814
815 return TRUE;
816}
817
e9f53129
AM
818/* Called via bfd_hash_traverse to set up pointers to all symbols
819 in the stub hash table. */
820
821static bfd_boolean
822populate_stubs (struct bfd_hash_entry *bh, void *inf)
823{
824 struct stubarr *stubs = inf;
825
826 stubs->sh[--stubs->count] = (struct spu_stub_hash_entry *) bh;
827 return TRUE;
828}
829
830/* qsort predicate to sort stubs by overlay number. */
831
832static int
833sort_stubs (const void *a, const void *b)
834{
835 const struct spu_stub_hash_entry *const *sa = a;
836 const struct spu_stub_hash_entry *const *sb = b;
837 int i;
838 bfd_signed_vma d;
839
840 i = spu_elf_section_data ((*sa)->target_section->output_section)->ovl_index;
841 i -= spu_elf_section_data ((*sb)->target_section->output_section)->ovl_index;
842 if (i != 0)
843 return i;
844
845 d = ((*sa)->target_section->output_section->vma
846 + (*sa)->target_section->output_offset
847 + (*sa)->target_off
848 - (*sb)->target_section->output_section->vma
849 - (*sb)->target_section->output_offset
850 - (*sb)->target_off);
851 if (d != 0)
852 return d < 0 ? -1 : 1;
853
854 /* Two functions at the same address. Aliases perhaps. */
855 i = strcmp ((*sb)->root.string, (*sa)->root.string);
856 BFD_ASSERT (i != 0);
857 return i;
858}
859
860/* Allocate space for overlay call and return stubs. */
861
862bfd_boolean
863spu_elf_size_stubs (bfd *output_bfd,
864 struct bfd_link_info *info,
865 int non_overlay_stubs,
49fa1e15 866 int stack_analysis,
e9f53129
AM
867 asection **stub,
868 asection **ovtab,
869 asection **toe)
870{
871 struct spu_link_hash_table *htab = spu_hash_table (info);
872 bfd *ibfd;
873 struct stubarr stubs;
874 unsigned i, group;
875 flagword flags;
876
877 htab->non_overlay_stubs = non_overlay_stubs;
f4b39977 878 stubs.stub_hash_table = &htab->stub_hash_table;
e9f53129 879 stubs.count = 0;
f4b39977 880 stubs.err = 0;
e9f53129
AM
881 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
882 {
883 extern const bfd_target bfd_elf32_spu_vec;
884 Elf_Internal_Shdr *symtab_hdr;
885 asection *section;
886 Elf_Internal_Sym *local_syms = NULL;
49fa1e15 887 Elf_Internal_Sym **psyms;
e9f53129
AM
888
889 if (ibfd->xvec != &bfd_elf32_spu_vec)
890 continue;
891
892 /* We'll need the symbol table in a second. */
893 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
894 if (symtab_hdr->sh_info == 0)
895 continue;
896
49fa1e15
AM
897 /* Arrange to read and keep global syms for later stack analysis. */
898 psyms = &local_syms;
899 if (stack_analysis)
900 psyms = (Elf_Internal_Sym **) &symtab_hdr->contents;
901
e9f53129
AM
902 /* Walk over each section attached to the input bfd. */
903 for (section = ibfd->sections; section != NULL; section = section->next)
904 {
905 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
906
907 /* If there aren't any relocs, then there's nothing more to do. */
908 if ((section->flags & SEC_RELOC) == 0
909 || (section->flags & SEC_ALLOC) == 0
910 || (section->flags & SEC_LOAD) == 0
911 || section->reloc_count == 0)
912 continue;
913
914 /* If this section is a link-once section that will be
915 discarded, then don't create any stubs. */
916 if (section->output_section == NULL
917 || section->output_section->owner != output_bfd)
918 continue;
919
920 /* Get the relocs. */
921 internal_relocs
922 = _bfd_elf_link_read_relocs (ibfd, section, NULL, NULL,
923 info->keep_memory);
924 if (internal_relocs == NULL)
925 goto error_ret_free_local;
926
927 /* Now examine each relocation. */
928 irela = internal_relocs;
929 irelaend = irela + section->reloc_count;
930 for (; irela < irelaend; irela++)
931 {
932 enum elf_spu_reloc_type r_type;
933 unsigned int r_indx;
934 asection *sym_sec;
935 Elf_Internal_Sym *sym;
936 struct elf_link_hash_entry *h;
aa7a0635 937 const char *sym_name;
e9f53129
AM
938 char *stub_name;
939 struct spu_stub_hash_entry *sh;
940 unsigned int sym_type;
941 enum _insn_type { non_branch, branch, call } insn_type;
942
943 r_type = ELF32_R_TYPE (irela->r_info);
944 r_indx = ELF32_R_SYM (irela->r_info);
945
946 if (r_type >= R_SPU_max)
947 {
948 bfd_set_error (bfd_error_bad_value);
949 goto error_ret_free_internal;
950 }
951
952 /* Determine the reloc target section. */
49fa1e15 953 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
e9f53129
AM
954 goto error_ret_free_internal;
955
956 if (sym_sec == NULL
957 || sym_sec->output_section == NULL
958 || sym_sec->output_section->owner != output_bfd)
959 continue;
960
961 /* Ensure no stubs for user supplied overlay manager syms. */
962 if (h != NULL
963 && (strcmp (h->root.root.string, "__ovly_load") == 0
964 || strcmp (h->root.root.string, "__ovly_return") == 0))
965 continue;
966
967 insn_type = non_branch;
968 if (r_type == R_SPU_REL16
969 || r_type == R_SPU_ADDR16)
970 {
971 unsigned char insn[4];
972
973 if (!bfd_get_section_contents (ibfd, section, insn,
974 irela->r_offset, 4))
975 goto error_ret_free_internal;
976
49fa1e15 977 if (is_branch (insn) || is_hint (insn))
e9f53129
AM
978 {
979 insn_type = branch;
980 if ((insn[0] & 0xfd) == 0x31)
981 insn_type = call;
982 }
983 }
984
985 /* We are only interested in function symbols. */
986 if (h != NULL)
aa7a0635
AM
987 {
988 sym_type = h->type;
989 sym_name = h->root.root.string;
990 }
e9f53129 991 else
aa7a0635
AM
992 {
993 sym_type = ELF_ST_TYPE (sym->st_info);
994 sym_name = bfd_elf_sym_name (sym_sec->owner,
995 symtab_hdr,
996 sym,
997 sym_sec);
998 }
e9f53129
AM
999 if (sym_type != STT_FUNC)
1000 {
1001 /* It's common for people to write assembly and forget
1002 to give function symbols the right type. Handle
1003 calls to such symbols, but warn so that (hopefully)
1004 people will fix their code. We need the symbol
1005 type to be correct to distinguish function pointer
1006 initialisation from other pointer initialisation. */
1007 if (insn_type == call)
aa7a0635
AM
1008 (*_bfd_error_handler) (_("warning: call to non-function"
1009 " symbol %s defined in %B"),
1010 sym_sec->owner, sym_name);
e9f53129
AM
1011 else
1012 continue;
1013 }
1014
aa7a0635
AM
1015 if (!needs_ovl_stub (sym_name, sym_sec, section, htab,
1016 insn_type != non_branch))
e9f53129
AM
1017 continue;
1018
aa7a0635 1019 stub_name = spu_stub_name (sym_sec, h, irela);
e9f53129
AM
1020 if (stub_name == NULL)
1021 goto error_ret_free_internal;
1022
1023 sh = (struct spu_stub_hash_entry *)
1024 bfd_hash_lookup (&htab->stub_hash_table, stub_name,
1025 TRUE, FALSE);
1026 if (sh == NULL)
1027 {
1028 free (stub_name);
1029 error_ret_free_internal:
1030 if (elf_section_data (section)->relocs != internal_relocs)
1031 free (internal_relocs);
1032 error_ret_free_local:
1033 if (local_syms != NULL
1034 && (symtab_hdr->contents
1035 != (unsigned char *) local_syms))
1036 free (local_syms);
1037 return FALSE;
1038 }
1039
1040 /* If this entry isn't new, we already have a stub. */
1041 if (sh->target_section != NULL)
1042 {
1043 free (stub_name);
1044 continue;
1045 }
1046
1047 sh->target_section = sym_sec;
1048 if (h != NULL)
1049 sh->target_off = h->root.u.def.value;
1050 else
1051 sh->target_off = sym->st_value;
1052 sh->target_off += irela->r_addend;
1053
1054 stubs.count += 1;
1055 }
1056
1057 /* We're done with the internal relocs, free them. */
1058 if (elf_section_data (section)->relocs != internal_relocs)
1059 free (internal_relocs);
1060 }
1061
1062 if (local_syms != NULL
1063 && symtab_hdr->contents != (unsigned char *) local_syms)
1064 {
1065 if (!info->keep_memory)
1066 free (local_syms);
1067 else
1068 symtab_hdr->contents = (unsigned char *) local_syms;
1069 }
1070 }
1071
f4b39977
AM
1072 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, &stubs);
1073 if (stubs.err)
1074 return FALSE;
1075
e9f53129
AM
1076 *stub = NULL;
1077 if (stubs.count == 0)
1078 return TRUE;
1079
1080 ibfd = info->input_bfds;
1081 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1082 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1083 htab->stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1084 *stub = htab->stub;
1085 if (htab->stub == NULL
1086 || !bfd_set_section_alignment (ibfd, htab->stub, 2))
1087 return FALSE;
1088
1089 flags = (SEC_ALLOC | SEC_LOAD
1090 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1091 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1092 *ovtab = htab->ovtab;
1093 if (htab->ovtab == NULL
1094 || !bfd_set_section_alignment (ibfd, htab->stub, 4))
1095 return FALSE;
1096
1097 *toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1098 if (*toe == NULL
1099 || !bfd_set_section_alignment (ibfd, *toe, 4))
1100 return FALSE;
1101 (*toe)->size = 16;
1102
1103 /* Retrieve all the stubs and sort. */
1104 stubs.sh = bfd_malloc (stubs.count * sizeof (*stubs.sh));
1105 if (stubs.sh == NULL)
1106 return FALSE;
1107 i = stubs.count;
1108 bfd_hash_traverse (&htab->stub_hash_table, populate_stubs, &stubs);
1109 BFD_ASSERT (stubs.count == 0);
1110
1111 stubs.count = i;
1112 qsort (stubs.sh, stubs.count, sizeof (*stubs.sh), sort_stubs);
1113
1114 /* Now that the stubs are sorted, place them in the stub section.
1115 Stubs are grouped per overlay
1116 . ila $79,func1
1117 . br 1f
1118 . ila $79,func2
1119 . br 1f
1120 .
1121 .
1122 . ila $79,funcn
1123 . nop
1124 . 1:
1125 . ila $78,ovl_index
1126 . br __ovly_load */
1127
1128 group = 0;
1129 for (i = 0; i < stubs.count; i++)
1130 {
1131 if (spu_elf_section_data (stubs.sh[group]->target_section
1132 ->output_section)->ovl_index
1133 != spu_elf_section_data (stubs.sh[i]->target_section
1134 ->output_section)->ovl_index)
1135 {
1136 htab->stub->size += SIZEOF_STUB2;
1137 for (; group != i; group++)
1138 stubs.sh[group]->delta
1139 = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1140 }
1141 if (group == i
1142 || ((stubs.sh[i - 1]->target_section->output_section->vma
1143 + stubs.sh[i - 1]->target_section->output_offset
1144 + stubs.sh[i - 1]->target_off)
1145 != (stubs.sh[i]->target_section->output_section->vma
1146 + stubs.sh[i]->target_section->output_offset
1147 + stubs.sh[i]->target_off)))
1148 {
1149 stubs.sh[i]->off = htab->stub->size;
1150 htab->stub->size += SIZEOF_STUB1;
1151 }
1152 else
1153 stubs.sh[i]->off = stubs.sh[i - 1]->off;
1154 }
1155 if (group != i)
1156 htab->stub->size += SIZEOF_STUB2;
1157 for (; group != i; group++)
1158 stubs.sh[group]->delta = stubs.sh[i - 1]->off - stubs.sh[group]->off;
1159
1160 /* htab->ovtab consists of two arrays.
1161 . struct {
1162 . u32 vma;
1163 . u32 size;
1164 . u32 file_off;
1165 . u32 buf;
1166 . } _ovly_table[];
1167 .
1168 . struct {
1169 . u32 mapped;
1170 . } _ovly_buf_table[]; */
1171
1172 htab->ovtab->alignment_power = 4;
1173 htab->ovtab->size = htab->num_overlays * 16 + htab->num_buf * 4;
1174
1175 return TRUE;
1176}
1177
1178/* Functions to handle embedded spu_ovl.o object. */
1179
1180static void *
1181ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1182{
1183 return stream;
1184}
1185
1186static file_ptr
1187ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1188 void *stream,
1189 void *buf,
1190 file_ptr nbytes,
1191 file_ptr offset)
1192{
1193 struct _ovl_stream *os;
1194 size_t count;
1195 size_t max;
1196
1197 os = (struct _ovl_stream *) stream;
7a8757b3 1198 max = (const char *) os->end - (const char *) os->start;
e9f53129
AM
1199
1200 if ((ufile_ptr) offset >= max)
1201 return 0;
1202
1203 count = nbytes;
1204 if (count > max - offset)
1205 count = max - offset;
1206
7a8757b3 1207 memcpy (buf, (const char *) os->start + offset, count);
e9f53129
AM
1208 return count;
1209}
1210
1211bfd_boolean
1212spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1213{
1214 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1215 "elf32-spu",
1216 ovl_mgr_open,
1217 (void *) stream,
1218 ovl_mgr_pread,
f6cf9273 1219 NULL,
e9f53129
AM
1220 NULL);
1221 return *ovl_bfd != NULL;
1222}
1223
1224/* Fill in the ila and br for a stub. On the last stub for a group,
1225 write the stub that sets the overlay number too. */
1226
1227static bfd_boolean
1228write_one_stub (struct bfd_hash_entry *bh, void *inf)
1229{
1230 struct spu_stub_hash_entry *ent = (struct spu_stub_hash_entry *) bh;
1231 struct spu_link_hash_table *htab = inf;
1232 asection *sec = htab->stub;
1233 asection *s = ent->target_section;
1234 unsigned int ovl;
1235 bfd_vma val;
1236
1237 val = ent->target_off + s->output_offset + s->output_section->vma;
1238 bfd_put_32 (sec->owner, ILA_79 + ((val << 7) & 0x01ffff80),
1239 sec->contents + ent->off);
1240 val = ent->delta + 4;
1241 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1242 sec->contents + ent->off + 4);
1243
1244 /* If this is the last stub of this group, write stub2. */
1245 if (ent->delta == 0)
1246 {
1247 bfd_put_32 (sec->owner, NOP,
1248 sec->contents + ent->off + 4);
1249
1250 ovl = spu_elf_section_data (s->output_section)->ovl_index;
1251 bfd_put_32 (sec->owner, ILA_78 + ((ovl << 7) & 0x01ffff80),
1252 sec->contents + ent->off + 8);
1253
1254 val = (htab->ovly_load->root.u.def.section->output_section->vma
1255 + htab->ovly_load->root.u.def.section->output_offset
1256 + htab->ovly_load->root.u.def.value
1257 - (sec->output_section->vma
1258 + sec->output_offset
1259 + ent->off + 12));
1260
1261 if (val + 0x20000 >= 0x40000)
1262 htab->stub_overflow = TRUE;
1263
1264 bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
1265 sec->contents + ent->off + 12);
1266 }
1267
1268 if (htab->emit_stub_syms)
1269 {
1270 struct elf_link_hash_entry *h;
1271 size_t len1, len2;
1272 char *name;
1273
aa7a0635 1274 len1 = sizeof ("00000000.ovl_call.") - 1;
e9f53129
AM
1275 len2 = strlen (ent->root.string);
1276 name = bfd_malloc (len1 + len2 + 1);
1277 if (name == NULL)
1278 return FALSE;
aa7a0635
AM
1279 memcpy (name, "00000000.ovl_call.", len1);
1280 memcpy (name + len1, ent->root.string, len2 + 1);
49fa1e15
AM
1281 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1282 free (name);
e9f53129
AM
1283 if (h == NULL)
1284 return FALSE;
1285 if (h->root.type == bfd_link_hash_new)
1286 {
1287 h->root.type = bfd_link_hash_defined;
1288 h->root.u.def.section = sec;
1289 h->root.u.def.value = ent->off;
1290 h->size = (ent->delta == 0
1291 ? SIZEOF_STUB1 + SIZEOF_STUB2 : SIZEOF_STUB1);
1292 h->type = STT_FUNC;
1293 h->ref_regular = 1;
1294 h->def_regular = 1;
1295 h->ref_regular_nonweak = 1;
1296 h->forced_local = 1;
1297 h->non_elf = 0;
1298 }
1299 }
1300
1301 return TRUE;
1302}
1303
1304/* Define an STT_OBJECT symbol. */
1305
1306static struct elf_link_hash_entry *
1307define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1308{
1309 struct elf_link_hash_entry *h;
1310
1311 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1312 if (h == NULL)
1313 return NULL;
1314
1315 if (h->root.type != bfd_link_hash_defined
1316 || !h->def_regular)
1317 {
1318 h->root.type = bfd_link_hash_defined;
1319 h->root.u.def.section = htab->ovtab;
1320 h->type = STT_OBJECT;
1321 h->ref_regular = 1;
1322 h->def_regular = 1;
1323 h->ref_regular_nonweak = 1;
1324 h->non_elf = 0;
1325 }
1326 else
1327 {
1328 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1329 h->root.u.def.section->owner,
1330 h->root.root.string);
1331 bfd_set_error (bfd_error_bad_value);
1332 return NULL;
1333 }
1334
1335 return h;
1336}
1337
1338/* Fill in all stubs and the overlay tables. */
1339
1340bfd_boolean
1341spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms, asection *toe)
1342{
1343 struct spu_link_hash_table *htab = spu_hash_table (info);
1344 struct elf_link_hash_entry *h;
1345 bfd_byte *p;
1346 asection *s;
1347 bfd *obfd;
1348 unsigned int i;
1349
1350 htab->emit_stub_syms = emit_syms;
1351 htab->stub->contents = bfd_zalloc (htab->stub->owner, htab->stub->size);
1352 if (htab->stub->contents == NULL)
1353 return FALSE;
1354
1355 h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1356 htab->ovly_load = h;
1357 BFD_ASSERT (h != NULL
1358 && (h->root.type == bfd_link_hash_defined
1359 || h->root.type == bfd_link_hash_defweak)
1360 && h->def_regular);
1361
1362 s = h->root.u.def.section->output_section;
1363 if (spu_elf_section_data (s)->ovl_index)
1364 {
1365 (*_bfd_error_handler) (_("%s in overlay section"),
1366 h->root.u.def.section->owner);
1367 bfd_set_error (bfd_error_bad_value);
1368 return FALSE;
1369 }
1370
1371 /* Write out all the stubs. */
1372 bfd_hash_traverse (&htab->stub_hash_table, write_one_stub, htab);
1373
1374 if (htab->stub_overflow)
1375 {
1376 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1377 bfd_set_error (bfd_error_bad_value);
1378 return FALSE;
1379 }
1380
1381 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1382 if (htab->ovtab->contents == NULL)
1383 return FALSE;
1384
1385 /* Write out _ovly_table. */
1386 p = htab->ovtab->contents;
1387 obfd = htab->ovtab->output_section->owner;
1388 for (s = obfd->sections; s != NULL; s = s->next)
1389 {
1390 unsigned int ovl_index = spu_elf_section_data (s)->ovl_index;
1391
1392 if (ovl_index != 0)
1393 {
1394 unsigned int lo, hi, mid;
1395 unsigned long off = (ovl_index - 1) * 16;
1396 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1397 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1398 /* file_off written later in spu_elf_modify_program_headers. */
1399
1400 lo = 0;
1401 hi = htab->num_buf;
1402 while (lo < hi)
1403 {
1404 mid = (lo + hi) >> 1;
1405 if (htab->ovl_region[2 * mid + 1]->vma
1406 + htab->ovl_region[2 * mid + 1]->size <= s->vma)
1407 lo = mid + 1;
1408 else if (htab->ovl_region[2 * mid]->vma > s->vma)
1409 hi = mid;
1410 else
1411 {
1412 bfd_put_32 (htab->ovtab->owner, mid + 1, p + off + 12);
1413 break;
1414 }
1415 }
1416 BFD_ASSERT (lo < hi);
1417 }
1418 }
1419
1420 /* Write out _ovly_buf_table. */
1421 p = htab->ovtab->contents + htab->num_overlays * 16;
1422 for (i = 0; i < htab->num_buf; i++)
1423 {
1424 bfd_put_32 (htab->ovtab->owner, 0, p);
1425 p += 4;
1426 }
1427
1428 h = define_ovtab_symbol (htab, "_ovly_table");
1429 if (h == NULL)
1430 return FALSE;
1431 h->root.u.def.value = 0;
1432 h->size = htab->num_overlays * 16;
1433
1434 h = define_ovtab_symbol (htab, "_ovly_table_end");
1435 if (h == NULL)
1436 return FALSE;
1437 h->root.u.def.value = htab->num_overlays * 16;
1438 h->size = 0;
1439
1440 h = define_ovtab_symbol (htab, "_ovly_buf_table");
1441 if (h == NULL)
1442 return FALSE;
1443 h->root.u.def.value = htab->num_overlays * 16;
1444 h->size = htab->num_buf * 4;
1445
1446 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1447 if (h == NULL)
1448 return FALSE;
1449 h->root.u.def.value = htab->num_overlays * 16 + htab->num_buf * 4;
1450 h->size = 0;
1451
1452 h = define_ovtab_symbol (htab, "_EAR_");
1453 if (h == NULL)
1454 return FALSE;
1455 h->root.u.def.section = toe;
1456 h->root.u.def.value = 0;
1457 h->size = 16;
1458
1459 return TRUE;
1460}
1461
49fa1e15
AM
1462/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1463 Search for stack adjusting insns, and return the sp delta. */
1464
1465static int
1466find_function_stack_adjust (asection *sec, bfd_vma offset)
1467{
1468 int unrecog;
1469 int reg[128];
1470
1471 memset (reg, 0, sizeof (reg));
1472 for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1473 {
1474 unsigned char buf[4];
1475 int rt, ra;
1476 int imm;
1477
1478 /* Assume no relocs on stack adjusing insns. */
1479 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1480 break;
1481
1482 if (buf[0] == 0x24 /* stqd */)
1483 continue;
1484
1485 rt = buf[3] & 0x7f;
1486 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1487 /* Partly decoded immediate field. */
1488 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1489
1490 if (buf[0] == 0x1c /* ai */)
1491 {
1492 imm >>= 7;
1493 imm = (imm ^ 0x200) - 0x200;
1494 reg[rt] = reg[ra] + imm;
1495
1496 if (rt == 1 /* sp */)
1497 {
1498 if (imm > 0)
1499 break;
1500 return reg[rt];
1501 }
1502 }
1503 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1504 {
1505 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1506
1507 reg[rt] = reg[ra] + reg[rb];
1508 if (rt == 1)
1509 return reg[rt];
1510 }
1511 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1512 {
1513 if (buf[0] >= 0x42 /* ila */)
1514 imm |= (buf[0] & 1) << 17;
1515 else
1516 {
1517 imm &= 0xffff;
1518
1519 if (buf[0] == 0x40 /* il */)
1520 {
1521 if ((buf[1] & 0x80) == 0)
1522 goto unknown_insn;
1523 imm = (imm ^ 0x8000) - 0x8000;
1524 }
1525 else if ((buf[1] & 0x80) == 0 /* ilhu */)
1526 imm <<= 16;
1527 }
1528 reg[rt] = imm;
1529 continue;
1530 }
1531 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1532 {
1533 reg[rt] |= imm & 0xffff;
1534 continue;
1535 }
1536 else if (buf[0] == 0x04 /* ori */)
1537 {
1538 imm >>= 7;
1539 imm = (imm ^ 0x200) - 0x200;
1540 reg[rt] = reg[ra] | imm;
1541 continue;
1542 }
1543 else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1544 || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1545 {
1546 /* Used in pic reg load. Say rt is trashed. */
1547 reg[rt] = 0;
1548 continue;
1549 }
1550 else if (is_branch (buf))
1551 /* If we hit a branch then we must be out of the prologue. */
1552 break;
1553 unknown_insn:
1554 ++unrecog;
1555 }
1556
1557 return 0;
1558}
1559
1560/* qsort predicate to sort symbols by section and value. */
1561
1562static Elf_Internal_Sym *sort_syms_syms;
1563static asection **sort_syms_psecs;
1564
1565static int
1566sort_syms (const void *a, const void *b)
1567{
1568 Elf_Internal_Sym *const *s1 = a;
1569 Elf_Internal_Sym *const *s2 = b;
1570 asection *sec1,*sec2;
1571 bfd_signed_vma delta;
1572
1573 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1574 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1575
1576 if (sec1 != sec2)
1577 return sec1->index - sec2->index;
1578
1579 delta = (*s1)->st_value - (*s2)->st_value;
1580 if (delta != 0)
1581 return delta < 0 ? -1 : 1;
1582
1583 delta = (*s2)->st_size - (*s1)->st_size;
1584 if (delta != 0)
1585 return delta < 0 ? -1 : 1;
1586
1587 return *s1 < *s2 ? -1 : 1;
1588}
1589
1590struct call_info
1591{
1592 struct function_info *fun;
1593 struct call_info *next;
1594 int is_tail;
1595};
1596
1597struct function_info
1598{
1599 /* List of functions called. Also branches to hot/cold part of
1600 function. */
1601 struct call_info *call_list;
1602 /* For hot/cold part of function, point to owner. */
1603 struct function_info *start;
1604 /* Symbol at start of function. */
1605 union {
1606 Elf_Internal_Sym *sym;
1607 struct elf_link_hash_entry *h;
1608 } u;
1609 /* Function section. */
1610 asection *sec;
1611 /* Address range of (this part of) function. */
1612 bfd_vma lo, hi;
1613 /* Stack usage. */
1614 int stack;
1615 /* Set if global symbol. */
1616 unsigned int global : 1;
1617 /* Set if known to be start of function (as distinct from a hunk
1618 in hot/cold section. */
1619 unsigned int is_func : 1;
1620 /* Flags used during call tree traversal. */
1621 unsigned int visit1 : 1;
1622 unsigned int non_root : 1;
1623 unsigned int visit2 : 1;
1624 unsigned int marking : 1;
1625 unsigned int visit3 : 1;
1626};
1627
1628struct spu_elf_stack_info
1629{
1630 int num_fun;
1631 int max_fun;
1632 /* Variable size array describing functions, one per contiguous
1633 address range belonging to a function. */
1634 struct function_info fun[1];
1635};
1636
1637/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1638 entries for section SEC. */
1639
1640static struct spu_elf_stack_info *
1641alloc_stack_info (asection *sec, int max_fun)
1642{
1643 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1644 bfd_size_type amt;
1645
1646 amt = sizeof (struct spu_elf_stack_info);
1647 amt += (max_fun - 1) * sizeof (struct function_info);
1648 sec_data->stack_info = bfd_zmalloc (amt);
1649 if (sec_data->stack_info != NULL)
1650 sec_data->stack_info->max_fun = max_fun;
1651 return sec_data->stack_info;
1652}
1653
1654/* Add a new struct function_info describing a (part of a) function
1655 starting at SYM_H. Keep the array sorted by address. */
1656
1657static struct function_info *
1658maybe_insert_function (asection *sec,
1659 void *sym_h,
1660 bfd_boolean global,
1661 bfd_boolean is_func)
1662{
1663 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1664 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1665 int i;
1666 bfd_vma off, size;
1667
1668 if (sinfo == NULL)
1669 {
1670 sinfo = alloc_stack_info (sec, 20);
1671 if (sinfo == NULL)
1672 return NULL;
1673 }
1674
1675 if (!global)
1676 {
1677 Elf_Internal_Sym *sym = sym_h;
1678 off = sym->st_value;
1679 size = sym->st_size;
1680 }
1681 else
1682 {
1683 struct elf_link_hash_entry *h = sym_h;
1684 off = h->root.u.def.value;
1685 size = h->size;
1686 }
1687
1688 for (i = sinfo->num_fun; --i >= 0; )
1689 if (sinfo->fun[i].lo <= off)
1690 break;
1691
1692 if (i >= 0)
1693 {
1694 /* Don't add another entry for an alias, but do update some
1695 info. */
1696 if (sinfo->fun[i].lo == off)
1697 {
1698 /* Prefer globals over local syms. */
1699 if (global && !sinfo->fun[i].global)
1700 {
1701 sinfo->fun[i].global = TRUE;
1702 sinfo->fun[i].u.h = sym_h;
1703 }
1704 if (is_func)
1705 sinfo->fun[i].is_func = TRUE;
1706 return &sinfo->fun[i];
1707 }
1708 /* Ignore a zero-size symbol inside an existing function. */
1709 else if (sinfo->fun[i].hi > off && size == 0)
1710 return &sinfo->fun[i];
1711 }
1712
1713 if (++i < sinfo->num_fun)
1714 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1715 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1716 else if (i >= sinfo->max_fun)
1717 {
1718 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1719 bfd_size_type old = amt;
1720
1721 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1722 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1723 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1724 sinfo = bfd_realloc (sinfo, amt);
1725 if (sinfo == NULL)
1726 return NULL;
1727 memset ((char *) sinfo + old, 0, amt - old);
1728 sec_data->stack_info = sinfo;
1729 }
1730 sinfo->fun[i].is_func = is_func;
1731 sinfo->fun[i].global = global;
1732 sinfo->fun[i].sec = sec;
1733 if (global)
1734 sinfo->fun[i].u.h = sym_h;
1735 else
1736 sinfo->fun[i].u.sym = sym_h;
1737 sinfo->fun[i].lo = off;
1738 sinfo->fun[i].hi = off + size;
1739 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1740 sinfo->num_fun += 1;
1741 return &sinfo->fun[i];
1742}
1743
1744/* Return the name of FUN. */
1745
1746static const char *
1747func_name (struct function_info *fun)
1748{
1749 asection *sec;
1750 bfd *ibfd;
1751 Elf_Internal_Shdr *symtab_hdr;
1752
1753 while (fun->start != NULL)
1754 fun = fun->start;
1755
1756 if (fun->global)
1757 return fun->u.h->root.root.string;
1758
1759 sec = fun->sec;
1760 if (fun->u.sym->st_name == 0)
1761 {
1762 size_t len = strlen (sec->name);
1763 char *name = bfd_malloc (len + 10);
1764 if (name == NULL)
1765 return "(null)";
1766 sprintf (name, "%s+%lx", sec->name,
1767 (unsigned long) fun->u.sym->st_value & 0xffffffff);
1768 return name;
1769 }
1770 ibfd = sec->owner;
1771 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1772 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1773}
1774
1775/* Read the instruction at OFF in SEC. Return true iff the instruction
1776 is a nop, lnop, or stop 0 (all zero insn). */
1777
1778static bfd_boolean
1779is_nop (asection *sec, bfd_vma off)
1780{
1781 unsigned char insn[4];
1782
1783 if (off + 4 > sec->size
1784 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1785 return FALSE;
1786 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1787 return TRUE;
1788 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1789 return TRUE;
1790 return FALSE;
1791}
1792
1793/* Extend the range of FUN to cover nop padding up to LIMIT.
1794 Return TRUE iff some instruction other than a NOP was found. */
1795
1796static bfd_boolean
1797insns_at_end (struct function_info *fun, bfd_vma limit)
1798{
1799 bfd_vma off = (fun->hi + 3) & -4;
1800
1801 while (off < limit && is_nop (fun->sec, off))
1802 off += 4;
1803 if (off < limit)
1804 {
1805 fun->hi = off;
1806 return TRUE;
1807 }
1808 fun->hi = limit;
1809 return FALSE;
1810}
1811
1812/* Check and fix overlapping function ranges. Return TRUE iff there
1813 are gaps in the current info we have about functions in SEC. */
1814
1815static bfd_boolean
1816check_function_ranges (asection *sec, struct bfd_link_info *info)
1817{
1818 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1819 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1820 int i;
1821 bfd_boolean gaps = FALSE;
1822
1823 if (sinfo == NULL)
1824 return FALSE;
1825
1826 for (i = 1; i < sinfo->num_fun; i++)
1827 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1828 {
1829 /* Fix overlapping symbols. */
1830 const char *f1 = func_name (&sinfo->fun[i - 1]);
1831 const char *f2 = func_name (&sinfo->fun[i]);
1832
1833 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1834 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1835 }
1836 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1837 gaps = TRUE;
1838
1839 if (sinfo->num_fun == 0)
1840 gaps = TRUE;
1841 else
1842 {
1843 if (sinfo->fun[0].lo != 0)
1844 gaps = TRUE;
1845 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1846 {
1847 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1848
1849 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1850 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1851 }
1852 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1853 gaps = TRUE;
1854 }
1855 return gaps;
1856}
1857
1858/* Search current function info for a function that contains address
1859 OFFSET in section SEC. */
1860
1861static struct function_info *
1862find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1863{
1864 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1865 struct spu_elf_stack_info *sinfo = sec_data->stack_info;
1866 int lo, hi, mid;
1867
1868 lo = 0;
1869 hi = sinfo->num_fun;
1870 while (lo < hi)
1871 {
1872 mid = (lo + hi) / 2;
1873 if (offset < sinfo->fun[mid].lo)
1874 hi = mid;
1875 else if (offset >= sinfo->fun[mid].hi)
1876 lo = mid + 1;
1877 else
1878 return &sinfo->fun[mid];
1879 }
1880 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1881 sec, offset);
1882 return NULL;
1883}
1884
1885/* Add CALLEE to CALLER call list if not already present. */
1886
1887static bfd_boolean
1888insert_callee (struct function_info *caller, struct call_info *callee)
1889{
1890 struct call_info *p;
1891 for (p = caller->call_list; p != NULL; p = p->next)
1892 if (p->fun == callee->fun)
1893 {
1894 /* Tail calls use less stack than normal calls. Retain entry
1895 for normal call over one for tail call. */
1896 if (p->is_tail > callee->is_tail)
1897 p->is_tail = callee->is_tail;
1898 return FALSE;
1899 }
1900 callee->next = caller->call_list;
1901 caller->call_list = callee;
1902 return TRUE;
1903}
1904
1905/* Rummage through the relocs for SEC, looking for function calls.
1906 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
1907 mark destination symbols on calls as being functions. Also
1908 look at branches, which may be tail calls or go to hot/cold
1909 section part of same function. */
1910
1911static bfd_boolean
1912mark_functions_via_relocs (asection *sec,
1913 struct bfd_link_info *info,
1914 int call_tree)
1915{
1916 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1917 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1918 Elf_Internal_Sym *syms, **psyms;
1919 static bfd_boolean warned;
1920
1921 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
1922 info->keep_memory);
1923 if (internal_relocs == NULL)
1924 return FALSE;
1925
1926 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
1927 psyms = (Elf_Internal_Sym **) &symtab_hdr->contents;
1928 syms = *psyms;
1929 irela = internal_relocs;
1930 irelaend = irela + sec->reloc_count;
1931 for (; irela < irelaend; irela++)
1932 {
1933 enum elf_spu_reloc_type r_type;
1934 unsigned int r_indx;
1935 asection *sym_sec;
1936 Elf_Internal_Sym *sym;
1937 struct elf_link_hash_entry *h;
1938 bfd_vma val;
1939 unsigned char insn[4];
1940 bfd_boolean is_call;
1941 struct function_info *caller;
1942 struct call_info *callee;
1943
1944 r_type = ELF32_R_TYPE (irela->r_info);
1945 if (r_type != R_SPU_REL16
1946 && r_type != R_SPU_ADDR16)
1947 continue;
1948
1949 r_indx = ELF32_R_SYM (irela->r_info);
1950 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
1951 return FALSE;
1952
1953 if (sym_sec == NULL
1954 || sym_sec->output_section == NULL
1955 || sym_sec->output_section->owner != sec->output_section->owner)
1956 continue;
1957
1958 if (!bfd_get_section_contents (sec->owner, sec, insn,
1959 irela->r_offset, 4))
1960 return FALSE;
1961 if (!is_branch (insn))
1962 continue;
1963
1964 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1965 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
1966 {
1967 if (!call_tree)
1968 warned = TRUE;
1969 if (!call_tree || !warned)
1970 info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
1971 " %B(%A), stack analysis incomplete\n"),
1972 sec->owner, sec, irela->r_offset,
1973 sym_sec->owner, sym_sec);
1974 continue;
1975 }
1976
1977 is_call = (insn[0] & 0xfd) == 0x31;
1978
1979 if (h)
1980 val = h->root.u.def.value;
1981 else
1982 val = sym->st_value;
1983 val += irela->r_addend;
1984
1985 if (!call_tree)
1986 {
1987 struct function_info *fun;
1988
1989 if (irela->r_addend != 0)
1990 {
1991 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
1992 if (fake == NULL)
1993 return FALSE;
1994 fake->st_value = val;
1995 fake->st_shndx
1996 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
1997 sym = fake;
1998 }
1999 if (sym)
2000 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2001 else
2002 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2003 if (fun == NULL)
2004 return FALSE;
2005 if (irela->r_addend != 0
2006 && fun->u.sym != sym)
2007 free (sym);
2008 continue;
2009 }
2010
2011 caller = find_function (sec, irela->r_offset, info);
2012 if (caller == NULL)
2013 return FALSE;
2014 callee = bfd_malloc (sizeof *callee);
2015 if (callee == NULL)
2016 return FALSE;
2017
2018 callee->fun = find_function (sym_sec, val, info);
2019 if (callee->fun == NULL)
2020 return FALSE;
2021 callee->is_tail = !is_call;
2022 if (!insert_callee (caller, callee))
2023 free (callee);
2024 else if (!is_call
2025 && !callee->fun->is_func
2026 && callee->fun->stack == 0)
2027 {
2028 /* This is either a tail call or a branch from one part of
2029 the function to another, ie. hot/cold section. If the
2030 destination has been called by some other function then
2031 it is a separate function. We also assume that functions
2032 are not split across input files. */
2033 if (callee->fun->start != NULL
2034 || sec->owner != sym_sec->owner)
2035 {
2036 callee->fun->start = NULL;
2037 callee->fun->is_func = TRUE;
2038 }
2039 else
2040 callee->fun->start = caller;
2041 }
2042 }
2043
2044 return TRUE;
2045}
2046
2047/* Handle something like .init or .fini, which has a piece of a function.
2048 These sections are pasted together to form a single function. */
2049
2050static bfd_boolean
2051pasted_function (asection *sec, struct bfd_link_info *info)
2052{
2053 struct bfd_link_order *l;
2054 struct _spu_elf_section_data *sec_data;
2055 struct spu_elf_stack_info *sinfo;
2056 Elf_Internal_Sym *fake;
2057 struct function_info *fun, *fun_start;
2058
2059 fake = bfd_zmalloc (sizeof (*fake));
2060 if (fake == NULL)
2061 return FALSE;
2062 fake->st_value = 0;
2063 fake->st_size = sec->size;
2064 fake->st_shndx
2065 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2066 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2067 if (!fun)
2068 return FALSE;
2069
2070 /* Find a function immediately preceding this section. */
2071 fun_start = NULL;
2072 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2073 {
2074 if (l->u.indirect.section == sec)
2075 {
2076 if (fun_start != NULL)
2077 {
2078 if (fun_start->start)
2079 fun_start = fun_start->start;
2080 fun->start = fun_start;
2081 }
2082 return TRUE;
2083 }
2084 if (l->type == bfd_indirect_link_order
2085 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2086 && (sinfo = sec_data->stack_info) != NULL
2087 && sinfo->num_fun != 0)
2088 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2089 }
2090
2091 info->callbacks->einfo (_("%A link_order not found\n"), sec);
2092 return FALSE;
2093}
2094
2095/* We're only interested in code sections. */
2096
2097static bfd_boolean
2098interesting_section (asection *s, bfd *obfd, struct spu_link_hash_table *htab)
2099{
2100 return (s != htab->stub
2101 && s->output_section != NULL
2102 && s->output_section->owner == obfd
2103 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2104 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2105 && s->size != 0);
2106}
2107
2108/* Map address ranges in code sections to functions. */
2109
2110static bfd_boolean
2111discover_functions (bfd *output_bfd, struct bfd_link_info *info)
2112{
2113 struct spu_link_hash_table *htab = spu_hash_table (info);
2114 bfd *ibfd;
2115 int bfd_idx;
2116 Elf_Internal_Sym ***psym_arr;
2117 asection ***sec_arr;
2118 bfd_boolean gaps = FALSE;
2119
2120 bfd_idx = 0;
2121 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2122 bfd_idx++;
2123
2124 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2125 if (psym_arr == NULL)
2126 return FALSE;
2127 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2128 if (sec_arr == NULL)
2129 return FALSE;
2130
2131
2132 for (ibfd = info->input_bfds, bfd_idx = 0;
2133 ibfd != NULL;
2134 ibfd = ibfd->link_next, bfd_idx++)
2135 {
2136 extern const bfd_target bfd_elf32_spu_vec;
2137 Elf_Internal_Shdr *symtab_hdr;
2138 asection *sec;
2139 size_t symcount;
2140 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2141 asection **psecs, **p;
2142
2143 if (ibfd->xvec != &bfd_elf32_spu_vec)
2144 continue;
2145
2146 /* Read all the symbols. */
2147 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2148 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2149 if (symcount == 0)
2150 continue;
2151
2152 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2153 if (syms == NULL)
2154 {
2155 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2156 NULL, NULL, NULL);
2157 symtab_hdr->contents = (void *) syms;
2158 if (syms == NULL)
2159 return FALSE;
2160 }
2161
2162 /* Select defined function symbols that are going to be output. */
2163 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2164 if (psyms == NULL)
2165 return FALSE;
2166 psym_arr[bfd_idx] = psyms;
2167 psecs = bfd_malloc (symcount * sizeof (*psecs));
2168 if (psecs == NULL)
2169 return FALSE;
2170 sec_arr[bfd_idx] = psecs;
2171 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2172 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2173 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2174 {
2175 asection *s;
2176
2177 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2178 if (s != NULL && interesting_section (s, output_bfd, htab))
2179 *psy++ = sy;
2180 }
2181 symcount = psy - psyms;
2182 *psy = NULL;
2183
2184 /* Sort them by section and offset within section. */
2185 sort_syms_syms = syms;
2186 sort_syms_psecs = psecs;
2187 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2188
2189 /* Now inspect the function symbols. */
2190 for (psy = psyms; psy < psyms + symcount; )
2191 {
2192 asection *s = psecs[*psy - syms];
2193 Elf_Internal_Sym **psy2;
2194
2195 for (psy2 = psy; ++psy2 < psyms + symcount; )
2196 if (psecs[*psy2 - syms] != s)
2197 break;
2198
2199 if (!alloc_stack_info (s, psy2 - psy))
2200 return FALSE;
2201 psy = psy2;
2202 }
2203
2204 /* First install info about properly typed and sized functions.
2205 In an ideal world this will cover all code sections, except
2206 when partitioning functions into hot and cold sections,
2207 and the horrible pasted together .init and .fini functions. */
2208 for (psy = psyms; psy < psyms + symcount; ++psy)
2209 {
2210 sy = *psy;
2211 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2212 {
2213 asection *s = psecs[sy - syms];
2214 if (!maybe_insert_function (s, sy, FALSE, TRUE))
2215 return FALSE;
2216 }
2217 }
2218
2219 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2220 if (interesting_section (sec, output_bfd, htab))
2221 gaps |= check_function_ranges (sec, info);
2222 }
2223
2224 if (gaps)
2225 {
2226 /* See if we can discover more function symbols by looking at
2227 relocations. */
2228 for (ibfd = info->input_bfds, bfd_idx = 0;
2229 ibfd != NULL;
2230 ibfd = ibfd->link_next, bfd_idx++)
2231 {
2232 asection *sec;
2233
2234 if (psym_arr[bfd_idx] == NULL)
2235 continue;
2236
2237 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2238 if (interesting_section (sec, output_bfd, htab)
2239 && sec->reloc_count != 0)
2240 {
2241 if (!mark_functions_via_relocs (sec, info, FALSE))
2242 return FALSE;
2243 }
2244 }
2245
2246 for (ibfd = info->input_bfds, bfd_idx = 0;
2247 ibfd != NULL;
2248 ibfd = ibfd->link_next, bfd_idx++)
2249 {
2250 Elf_Internal_Shdr *symtab_hdr;
2251 asection *sec;
2252 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2253 asection **psecs;
2254
2255 if ((psyms = psym_arr[bfd_idx]) == NULL)
2256 continue;
2257
2258 psecs = sec_arr[bfd_idx];
2259
2260 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2261 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2262
2263 gaps = FALSE;
2264 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2265 if (interesting_section (sec, output_bfd, htab))
2266 gaps |= check_function_ranges (sec, info);
2267 if (!gaps)
2268 continue;
2269
2270 /* Finally, install all globals. */
2271 for (psy = psyms; (sy = *psy) != NULL; ++psy)
2272 {
2273 asection *s;
2274
2275 s = psecs[sy - syms];
2276
2277 /* Global syms might be improperly typed functions. */
2278 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2279 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2280 {
2281 if (!maybe_insert_function (s, sy, FALSE, FALSE))
2282 return FALSE;
2283 }
2284 }
2285
2286 /* Some of the symbols we've installed as marking the
2287 beginning of functions may have a size of zero. Extend
2288 the range of such functions to the beginning of the
2289 next symbol of interest. */
2290 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2291 if (interesting_section (sec, output_bfd, htab))
2292 {
2293 struct _spu_elf_section_data *sec_data;
2294 struct spu_elf_stack_info *sinfo;
2295
2296 sec_data = spu_elf_section_data (sec);
2297 sinfo = sec_data->stack_info;
2298 if (sinfo != NULL)
2299 {
2300 int fun_idx;
2301 bfd_vma hi = sec->size;
2302
2303 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2304 {
2305 sinfo->fun[fun_idx].hi = hi;
2306 hi = sinfo->fun[fun_idx].lo;
2307 }
2308 }
2309 /* No symbols in this section. Must be .init or .fini
2310 or something similar. */
2311 else if (!pasted_function (sec, info))
2312 return FALSE;
2313 }
2314 }
2315 }
2316
2317 for (ibfd = info->input_bfds, bfd_idx = 0;
2318 ibfd != NULL;
2319 ibfd = ibfd->link_next, bfd_idx++)
2320 {
2321 if (psym_arr[bfd_idx] == NULL)
2322 continue;
2323
2324 free (psym_arr[bfd_idx]);
2325 free (sec_arr[bfd_idx]);
2326 }
2327
2328 free (psym_arr);
2329 free (sec_arr);
2330
2331 return TRUE;
2332}
2333
2334/* Mark nodes in the call graph that are called by some other node. */
2335
2336static void
2337mark_non_root (struct function_info *fun)
2338{
2339 struct call_info *call;
2340
2341 fun->visit1 = TRUE;
2342 for (call = fun->call_list; call; call = call->next)
2343 {
2344 call->fun->non_root = TRUE;
2345 if (!call->fun->visit1)
2346 mark_non_root (call->fun);
2347 }
2348}
2349
2350/* Remove cycles from the call graph. */
2351
2352static void
2353call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
2354{
2355 struct call_info **callp, *call;
2356
2357 fun->visit2 = TRUE;
2358 fun->marking = TRUE;
2359
2360 callp = &fun->call_list;
2361 while ((call = *callp) != NULL)
2362 {
2363 if (!call->fun->visit2)
2364 call_graph_traverse (call->fun, info);
2365 else if (call->fun->marking)
2366 {
2367 const char *f1 = func_name (fun);
2368 const char *f2 = func_name (call->fun);
2369
2370 info->callbacks->info (_("Stack analysis will ignore the call "
2371 "from %s to %s\n"),
2372 f1, f2);
2373 *callp = call->next;
2374 continue;
2375 }
2376 callp = &call->next;
2377 }
2378 fun->marking = FALSE;
2379}
2380
2381/* Populate call_list for each function. */
2382
2383static bfd_boolean
2384build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
2385{
2386 struct spu_link_hash_table *htab = spu_hash_table (info);
2387 bfd *ibfd;
2388
2389 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2390 {
2391 extern const bfd_target bfd_elf32_spu_vec;
2392 asection *sec;
2393
2394 if (ibfd->xvec != &bfd_elf32_spu_vec)
2395 continue;
2396
2397 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2398 {
2399 if (!interesting_section (sec, output_bfd, htab)
2400 || sec->reloc_count == 0)
2401 continue;
2402
2403 if (!mark_functions_via_relocs (sec, info, TRUE))
2404 return FALSE;
2405 }
2406
2407 /* Transfer call info from hot/cold section part of function
2408 to main entry. */
2409 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2410 {
2411 struct _spu_elf_section_data *sec_data;
2412 struct spu_elf_stack_info *sinfo;
2413
2414 if ((sec_data = spu_elf_section_data (sec)) != NULL
2415 && (sinfo = sec_data->stack_info) != NULL)
2416 {
2417 int i;
2418 for (i = 0; i < sinfo->num_fun; ++i)
2419 {
2420 if (sinfo->fun[i].start != NULL)
2421 {
2422 struct call_info *call = sinfo->fun[i].call_list;
2423
2424 while (call != NULL)
2425 {
2426 struct call_info *call_next = call->next;
2427 if (!insert_callee (sinfo->fun[i].start, call))
2428 free (call);
2429 call = call_next;
2430 }
2431 sinfo->fun[i].call_list = NULL;
2432 sinfo->fun[i].non_root = TRUE;
2433 }
2434 }
2435 }
2436 }
2437 }
2438
2439 /* Find the call graph root(s). */
2440 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2441 {
2442 extern const bfd_target bfd_elf32_spu_vec;
2443 asection *sec;
2444
2445 if (ibfd->xvec != &bfd_elf32_spu_vec)
2446 continue;
2447
2448 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2449 {
2450 struct _spu_elf_section_data *sec_data;
2451 struct spu_elf_stack_info *sinfo;
2452
2453 if ((sec_data = spu_elf_section_data (sec)) != NULL
2454 && (sinfo = sec_data->stack_info) != NULL)
2455 {
2456 int i;
2457 for (i = 0; i < sinfo->num_fun; ++i)
2458 if (!sinfo->fun[i].visit1)
2459 mark_non_root (&sinfo->fun[i]);
2460 }
2461 }
2462 }
2463
2464 /* Remove cycles from the call graph. We start from the root node(s)
2465 so that we break cycles in a reasonable place. */
2466 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2467 {
2468 extern const bfd_target bfd_elf32_spu_vec;
2469 asection *sec;
2470
2471 if (ibfd->xvec != &bfd_elf32_spu_vec)
2472 continue;
2473
2474 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2475 {
2476 struct _spu_elf_section_data *sec_data;
2477 struct spu_elf_stack_info *sinfo;
2478
2479 if ((sec_data = spu_elf_section_data (sec)) != NULL
2480 && (sinfo = sec_data->stack_info) != NULL)
2481 {
2482 int i;
2483 for (i = 0; i < sinfo->num_fun; ++i)
2484 if (!sinfo->fun[i].non_root)
2485 call_graph_traverse (&sinfo->fun[i], info);
2486 }
2487 }
2488 }
2489
2490 return TRUE;
2491}
2492
2493/* Descend the call graph for FUN, accumulating total stack required. */
2494
2495static bfd_vma
2496sum_stack (struct function_info *fun,
2497 struct bfd_link_info *info,
2498 int emit_stack_syms)
2499{
2500 struct call_info *call;
2501 struct function_info *max = NULL;
2502 bfd_vma max_stack = fun->stack;
2503 bfd_vma stack;
2504 const char *f1;
2505
2506 if (fun->visit3)
2507 return max_stack;
2508
2509 for (call = fun->call_list; call; call = call->next)
2510 {
2511 stack = sum_stack (call->fun, info, emit_stack_syms);
2512 /* Include caller stack for normal calls, don't do so for
2513 tail calls. fun->stack here is local stack usage for
2514 this function. */
2515 if (!call->is_tail)
2516 stack += fun->stack;
2517 if (max_stack < stack)
2518 {
2519 max_stack = stack;
2520 max = call->fun;
2521 }
2522 }
2523
2524 f1 = func_name (fun);
2525 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"), f1, fun->stack, max_stack);
2526
2527 if (fun->call_list)
2528 {
2529 info->callbacks->minfo (_(" calls:\n"));
2530 for (call = fun->call_list; call; call = call->next)
2531 {
2532 const char *f2 = func_name (call->fun);
2533 const char *ann1 = call->fun == max ? "*" : " ";
2534 const char *ann2 = call->is_tail ? "t" : " ";
2535
2536 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
2537 }
2538 }
2539
2540 /* Now fun->stack holds cumulative stack. */
2541 fun->stack = max_stack;
2542 fun->visit3 = TRUE;
2543
2544 if (emit_stack_syms)
2545 {
2546 struct spu_link_hash_table *htab = spu_hash_table (info);
2547 char *name = bfd_malloc (18 + strlen (f1));
2548 struct elf_link_hash_entry *h;
2549
2550 if (name != NULL)
2551 {
2552 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
2553 sprintf (name, "__stack_%s", f1);
2554 else
2555 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
2556
2557 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
2558 free (name);
2559 if (h != NULL
2560 && (h->root.type == bfd_link_hash_new
2561 || h->root.type == bfd_link_hash_undefined
2562 || h->root.type == bfd_link_hash_undefweak))
2563 {
2564 h->root.type = bfd_link_hash_defined;
2565 h->root.u.def.section = bfd_abs_section_ptr;
2566 h->root.u.def.value = max_stack;
2567 h->size = 0;
2568 h->type = 0;
2569 h->ref_regular = 1;
2570 h->def_regular = 1;
2571 h->ref_regular_nonweak = 1;
2572 h->forced_local = 1;
2573 h->non_elf = 0;
2574 }
2575 }
2576 }
2577
2578 return max_stack;
2579}
2580
2581/* Provide an estimate of total stack required. */
2582
2583static bfd_boolean
2584spu_elf_stack_analysis (bfd *output_bfd,
2585 struct bfd_link_info *info,
2586 int emit_stack_syms)
2587{
2588 bfd *ibfd;
2589 bfd_vma max_stack = 0;
2590
2591 if (!discover_functions (output_bfd, info))
2592 return FALSE;
2593
2594 if (!build_call_tree (output_bfd, info))
2595 return FALSE;
2596
2597 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
2598 info->callbacks->minfo (_("\nStack size for functions. "
2599 "Annotations: '*' max stack, 't' tail call\n"));
2600 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2601 {
2602 extern const bfd_target bfd_elf32_spu_vec;
2603 asection *sec;
2604
2605 if (ibfd->xvec != &bfd_elf32_spu_vec)
2606 continue;
2607
2608 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2609 {
2610 struct _spu_elf_section_data *sec_data;
2611 struct spu_elf_stack_info *sinfo;
2612
2613 if ((sec_data = spu_elf_section_data (sec)) != NULL
2614 && (sinfo = sec_data->stack_info) != NULL)
2615 {
2616 int i;
2617 for (i = 0; i < sinfo->num_fun; ++i)
2618 {
2619 if (!sinfo->fun[i].non_root)
2620 {
2621 bfd_vma stack;
2622 const char *f1;
2623
2624 stack = sum_stack (&sinfo->fun[i], info,
2625 emit_stack_syms);
2626 f1 = func_name (&sinfo->fun[i]);
2627 info->callbacks->info (_(" %s: 0x%v\n"),
2628 f1, stack);
2629 if (max_stack < stack)
2630 max_stack = stack;
2631 }
2632 }
2633 }
2634 }
2635 }
2636
2637 info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
2638 return TRUE;
2639}
2640
2641/* Perform a final link. */
2642
2643static bfd_boolean
2644spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
2645{
2646 struct spu_link_hash_table *htab = spu_hash_table (info);
2647
2648 if (htab->stack_analysis
2649 && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
2650 info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
2651
2652 return bfd_elf_final_link (output_bfd, info);
2653}
2654
e9f53129
AM
2655/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
2656
2657static bfd_boolean
2658spu_elf_relocate_section (bfd *output_bfd,
2659 struct bfd_link_info *info,
2660 bfd *input_bfd,
2661 asection *input_section,
2662 bfd_byte *contents,
2663 Elf_Internal_Rela *relocs,
2664 Elf_Internal_Sym *local_syms,
2665 asection **local_sections)
2666{
2667 Elf_Internal_Shdr *symtab_hdr;
2668 struct elf_link_hash_entry **sym_hashes;
2669 Elf_Internal_Rela *rel, *relend;
2670 struct spu_link_hash_table *htab;
2671 bfd_boolean ret = TRUE;
2672
e9f53129
AM
2673 htab = spu_hash_table (info);
2674 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2675 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
2676
2677 rel = relocs;
2678 relend = relocs + input_section->reloc_count;
2679 for (; rel < relend; rel++)
2680 {
2681 int r_type;
2682 reloc_howto_type *howto;
2683 unsigned long r_symndx;
2684 Elf_Internal_Sym *sym;
2685 asection *sec;
2686 struct elf_link_hash_entry *h;
2687 const char *sym_name;
2688 bfd_vma relocation;
2689 bfd_vma addend;
2690 bfd_reloc_status_type r;
2691 bfd_boolean unresolved_reloc;
2692 bfd_boolean warned;
49fa1e15 2693 bfd_boolean branch;
e9f53129
AM
2694
2695 r_symndx = ELF32_R_SYM (rel->r_info);
2696 r_type = ELF32_R_TYPE (rel->r_info);
2697 howto = elf_howto_table + r_type;
2698 unresolved_reloc = FALSE;
2699 warned = FALSE;
2700
2701 h = NULL;
2702 sym = NULL;
2703 sec = NULL;
2704 if (r_symndx < symtab_hdr->sh_info)
2705 {
2706 sym = local_syms + r_symndx;
2707 sec = local_sections[r_symndx];
2708 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
2709 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
2710 }
2711 else
2712 {
2713 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2714 r_symndx, symtab_hdr, sym_hashes,
2715 h, sec, relocation,
2716 unresolved_reloc, warned);
2717 sym_name = h->root.root.string;
2718 }
2719
ab96bf03
AM
2720 if (sec != NULL && elf_discarded_section (sec))
2721 {
2722 /* For relocs against symbols from removed linkonce sections,
2723 or sections discarded by a linker script, we just want the
2724 section contents zeroed. Avoid any special processing. */
2725 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
2726 rel->r_info = 0;
2727 rel->r_addend = 0;
2728 continue;
2729 }
2730
2731 if (info->relocatable)
2732 continue;
2733
e9f53129
AM
2734 if (unresolved_reloc)
2735 {
2736 (*_bfd_error_handler)
2737 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
2738 input_bfd,
2739 bfd_get_section_name (input_bfd, input_section),
2740 (long) rel->r_offset,
2741 howto->name,
2742 sym_name);
2743 ret = FALSE;
2744 }
2745
2746 /* If this symbol is in an overlay area, we may need to relocate
2747 to the overlay stub. */
2748 addend = rel->r_addend;
49fa1e15
AM
2749 branch = (is_branch (contents + rel->r_offset)
2750 || is_hint (contents + rel->r_offset));
2751 if (needs_ovl_stub (sym_name, sec, input_section, htab, branch))
e9f53129
AM
2752 {
2753 char *stub_name;
2754 struct spu_stub_hash_entry *sh;
2755
aa7a0635 2756 stub_name = spu_stub_name (sec, h, rel);
e9f53129
AM
2757 if (stub_name == NULL)
2758 return FALSE;
2759
2760 sh = (struct spu_stub_hash_entry *)
2761 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2762 if (sh != NULL)
2763 {
2764 relocation = (htab->stub->output_section->vma
2765 + htab->stub->output_offset
2766 + sh->off);
2767 addend = 0;
2768 }
2769 free (stub_name);
2770 }
2771
2772 r = _bfd_final_link_relocate (howto,
2773 input_bfd,
2774 input_section,
2775 contents,
2776 rel->r_offset, relocation, addend);
2777
2778 if (r != bfd_reloc_ok)
2779 {
2780 const char *msg = (const char *) 0;
2781
2782 switch (r)
2783 {
2784 case bfd_reloc_overflow:
2785 if (!((*info->callbacks->reloc_overflow)
2786 (info, (h ? &h->root : NULL), sym_name, howto->name,
2787 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
2788 return FALSE;
2789 break;
2790
2791 case bfd_reloc_undefined:
2792 if (!((*info->callbacks->undefined_symbol)
2793 (info, sym_name, input_bfd, input_section,
2794 rel->r_offset, TRUE)))
2795 return FALSE;
2796 break;
2797
2798 case bfd_reloc_outofrange:
2799 msg = _("internal error: out of range error");
2800 goto common_error;
2801
2802 case bfd_reloc_notsupported:
2803 msg = _("internal error: unsupported relocation error");
2804 goto common_error;
2805
2806 case bfd_reloc_dangerous:
2807 msg = _("internal error: dangerous error");
2808 goto common_error;
2809
2810 default:
2811 msg = _("internal error: unknown error");
2812 /* fall through */
2813
2814 common_error:
2815 if (!((*info->callbacks->warning)
2816 (info, msg, sym_name, input_bfd, input_section,
2817 rel->r_offset)))
2818 return FALSE;
2819 break;
2820 }
2821 }
2822 }
2823
2824 return ret;
2825}
2826
c1b2796f
AM
2827/* Adjust _SPUEAR_ syms to point at their overlay stubs. */
2828
2829static bfd_boolean
2830spu_elf_output_symbol_hook (struct bfd_link_info *info,
2831 const char *sym_name ATTRIBUTE_UNUSED,
2832 Elf_Internal_Sym *sym,
2833 asection *sym_sec ATTRIBUTE_UNUSED,
2834 struct elf_link_hash_entry *h)
2835{
2836 struct spu_link_hash_table *htab = spu_hash_table (info);
2837
2838 if (!info->relocatable
2839 && htab->num_overlays != 0
2840 && h != NULL
2841 && (h->root.type == bfd_link_hash_defined
2842 || h->root.type == bfd_link_hash_defweak)
2843 && h->def_regular
2844 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
2845 {
2846 static Elf_Internal_Rela zero_rel;
2847 char *stub_name = spu_stub_name (h->root.u.def.section, h, &zero_rel);
2848 struct spu_stub_hash_entry *sh;
2849
2850 if (stub_name == NULL)
2851 return FALSE;
2852 sh = (struct spu_stub_hash_entry *)
2853 bfd_hash_lookup (&htab->stub_hash_table, stub_name, FALSE, FALSE);
2854 free (stub_name);
2855 if (sh == NULL)
2856 return TRUE;
2857 sym->st_shndx
2858 = _bfd_elf_section_from_bfd_section (htab->stub->output_section->owner,
2859 htab->stub->output_section);
2860 sym->st_value = (htab->stub->output_section->vma
2861 + htab->stub->output_offset
2862 + sh->off);
2863 }
2864
2865 return TRUE;
2866}
2867
e9f53129
AM
2868static int spu_plugin = 0;
2869
2870void
2871spu_elf_plugin (int val)
2872{
2873 spu_plugin = val;
2874}
2875
2876/* Set ELF header e_type for plugins. */
2877
2878static void
2879spu_elf_post_process_headers (bfd *abfd,
2880 struct bfd_link_info *info ATTRIBUTE_UNUSED)
2881{
2882 if (spu_plugin)
2883 {
2884 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
2885
2886 i_ehdrp->e_type = ET_DYN;
2887 }
2888}
2889
2890/* We may add an extra PT_LOAD segment for .toe. We also need extra
2891 segments for overlays. */
2892
2893static int
2894spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
2895{
2896 struct spu_link_hash_table *htab = spu_hash_table (info);
2897 int extra = htab->num_overlays;
2898 asection *sec;
2899
2900 if (extra)
2901 ++extra;
2902
2903 sec = bfd_get_section_by_name (abfd, ".toe");
2904 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
2905 ++extra;
2906
2907 return extra;
2908}
2909
2910/* Remove .toe section from other PT_LOAD segments and put it in
2911 a segment of its own. Put overlays in separate segments too. */
2912
2913static bfd_boolean
2914spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
2915{
2916 asection *toe, *s;
2917 struct elf_segment_map *m;
2918 unsigned int i;
2919
2920 if (info == NULL)
2921 return TRUE;
2922
2923 toe = bfd_get_section_by_name (abfd, ".toe");
2924 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2925 if (m->p_type == PT_LOAD && m->count > 1)
2926 for (i = 0; i < m->count; i++)
2927 if ((s = m->sections[i]) == toe
2928 || spu_elf_section_data (s)->ovl_index != 0)
2929 {
2930 struct elf_segment_map *m2;
2931 bfd_vma amt;
2932
2933 if (i + 1 < m->count)
2934 {
2935 amt = sizeof (struct elf_segment_map);
2936 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
2937 m2 = bfd_zalloc (abfd, amt);
2938 if (m2 == NULL)
2939 return FALSE;
2940 m2->count = m->count - (i + 1);
2941 memcpy (m2->sections, m->sections + i + 1,
2942 m2->count * sizeof (m->sections[0]));
2943 m2->p_type = PT_LOAD;
2944 m2->next = m->next;
2945 m->next = m2;
2946 }
2947 m->count = 1;
2948 if (i != 0)
2949 {
2950 m->count = i;
2951 amt = sizeof (struct elf_segment_map);
2952 m2 = bfd_zalloc (abfd, amt);
2953 if (m2 == NULL)
2954 return FALSE;
2955 m2->p_type = PT_LOAD;
2956 m2->count = 1;
2957 m2->sections[0] = s;
2958 m2->next = m->next;
2959 m->next = m2;
2960 }
2961 break;
2962 }
2963
2964 return TRUE;
2965}
2966
2967/* Check that all loadable section VMAs lie in the range
2968 LO .. HI inclusive. */
2969
2970asection *
2971spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
2972{
2973 struct elf_segment_map *m;
2974 unsigned int i;
2975
2976 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2977 if (m->p_type == PT_LOAD)
2978 for (i = 0; i < m->count; i++)
2979 if (m->sections[i]->size != 0
2980 && (m->sections[i]->vma < lo
2981 || m->sections[i]->vma > hi
2982 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2983 return m->sections[i];
2984
2985 return NULL;
2986}
2987
2988/* Tweak phdrs before writing them out. */
2989
2990static int
2991spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
2992{
2993 const struct elf_backend_data *bed;
2994 struct elf_obj_tdata *tdata;
2995 Elf_Internal_Phdr *phdr, *last;
2996 struct spu_link_hash_table *htab;
2997 unsigned int count;
2998 unsigned int i;
2999
3000 if (info == NULL)
3001 return TRUE;
3002
3003 bed = get_elf_backend_data (abfd);
3004 tdata = elf_tdata (abfd);
3005 phdr = tdata->phdr;
3006 count = tdata->program_header_size / bed->s->sizeof_phdr;
3007 htab = spu_hash_table (info);
3008 if (htab->num_overlays != 0)
3009 {
3010 struct elf_segment_map *m;
3011 unsigned int o;
3012
3013 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
3014 if (m->count != 0
3015 && (o = spu_elf_section_data (m->sections[0])->ovl_index) != 0)
3016 {
3017 /* Mark this as an overlay header. */
3018 phdr[i].p_flags |= PF_OVERLAY;
3019
3020 if (htab->ovtab != NULL && htab->ovtab->size != 0)
3021 {
3022 bfd_byte *p = htab->ovtab->contents;
3023 unsigned int off = (o - 1) * 16 + 8;
3024
3025 /* Write file_off into _ovly_table. */
3026 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
3027 }
3028 }
3029 }
3030
3031 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
3032 of 16. This should always be possible when using the standard
3033 linker scripts, but don't create overlapping segments if
3034 someone is playing games with linker scripts. */
3035 last = NULL;
3036 for (i = count; i-- != 0; )
3037 if (phdr[i].p_type == PT_LOAD)
3038 {
3039 unsigned adjust;
3040
3041 adjust = -phdr[i].p_filesz & 15;
3042 if (adjust != 0
3043 && last != NULL
3044 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
3045 break;
3046
3047 adjust = -phdr[i].p_memsz & 15;
3048 if (adjust != 0
3049 && last != NULL
3050 && phdr[i].p_filesz != 0
3051 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
3052 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
3053 break;
3054
3055 if (phdr[i].p_filesz != 0)
3056 last = &phdr[i];
3057 }
3058
3059 if (i == (unsigned int) -1)
3060 for (i = count; i-- != 0; )
3061 if (phdr[i].p_type == PT_LOAD)
3062 {
3063 unsigned adjust;
3064
3065 adjust = -phdr[i].p_filesz & 15;
3066 phdr[i].p_filesz += adjust;
3067
3068 adjust = -phdr[i].p_memsz & 15;
3069 phdr[i].p_memsz += adjust;
3070 }
3071
3072 return TRUE;
3073}
3074
3075/* Arrange for our linker created section to be output. */
3076
3077static bfd_boolean
3078spu_elf_section_processing (bfd *abfd ATTRIBUTE_UNUSED,
3079 Elf_Internal_Shdr *i_shdrp)
3080{
3081 asection *sec;
3082
3083 sec = i_shdrp->bfd_section;
3084 if (sec != NULL
3085 && (sec->flags & SEC_LINKER_CREATED) != 0
3086 && sec->name != NULL
3087 && strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
3088 i_shdrp->contents = sec->contents;
3089
3090 return TRUE;
3091}
3092
3093#define TARGET_BIG_SYM bfd_elf32_spu_vec
3094#define TARGET_BIG_NAME "elf32-spu"
3095#define ELF_ARCH bfd_arch_spu
3096#define ELF_MACHINE_CODE EM_SPU
3097/* This matches the alignment need for DMA. */
3098#define ELF_MAXPAGESIZE 0x80
3099#define elf_backend_rela_normal 1
3100#define elf_backend_can_gc_sections 1
3101
3102#define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
157090f7 3103#define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
e9f53129
AM
3104#define elf_info_to_howto spu_elf_info_to_howto
3105#define elf_backend_gc_mark_hook spu_elf_gc_mark_hook
3106#define elf_backend_relocate_section spu_elf_relocate_section
3107#define elf_backend_symbol_processing spu_elf_backend_symbol_processing
c1b2796f 3108#define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
e9f53129
AM
3109#define bfd_elf32_new_section_hook spu_elf_new_section_hook
3110#define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
3111#define bfd_elf32_bfd_link_hash_table_free spu_elf_link_hash_table_free
3112
3113#define elf_backend_additional_program_headers spu_elf_additional_program_headers
3114#define elf_backend_modify_segment_map spu_elf_modify_segment_map
3115#define elf_backend_modify_program_headers spu_elf_modify_program_headers
3116#define elf_backend_post_process_headers spu_elf_post_process_headers
3117#define elf_backend_section_processing spu_elf_section_processing
3118#define elf_backend_special_sections spu_elf_special_sections
49fa1e15 3119#define bfd_elf32_bfd_final_link spu_elf_final_link
e9f53129
AM
3120
3121#include "elf32-target.h"
This page took 0.190304 seconds and 4 git commands to generate.