1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 struct spu_elf_params
*params
;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count
;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry
*ovly_entry
[2];
316 /* Number of overlay buffers. */
317 unsigned int num_buf
;
319 /* Total number of overlays. */
320 unsigned int num_overlays
;
322 /* For soft icache. */
323 unsigned int line_size_log2
;
324 unsigned int num_lines_log2
;
325 unsigned int fromelem_size_log2
;
327 /* How much memory we have. */
328 unsigned int local_store
;
329 /* Local store --auto-overlay should reserve for non-overlay
330 functions and data. */
331 unsigned int overlay_fixed
;
332 /* Local store --auto-overlay should reserve for stack and heap. */
333 unsigned int reserved
;
334 /* If reserved is not specified, stack analysis will calculate a value
335 for the stack. This parameter adjusts that value to allow for
336 negative sp access (the ABI says 2000 bytes below sp are valid,
337 and the overlay manager uses some of this area). */
338 int extra_stack_space
;
339 /* Count of overlay stubs needed in non-overlay area. */
340 unsigned int non_ovly_stub
;
343 unsigned int stub_err
: 1;
346 /* Hijack the generic got fields for overlay stub accounting. */
350 struct got_entry
*next
;
359 #define spu_hash_table(p) \
360 ((struct spu_link_hash_table *) ((p)->hash))
364 struct function_info
*fun
;
365 struct call_info
*next
;
367 unsigned int max_depth
;
368 unsigned int is_tail
: 1;
369 unsigned int is_pasted
: 1;
370 unsigned int priority
: 13;
375 /* List of functions called. Also branches to hot/cold part of
377 struct call_info
*call_list
;
378 /* For hot/cold part of function, point to owner. */
379 struct function_info
*start
;
380 /* Symbol at start of function. */
382 Elf_Internal_Sym
*sym
;
383 struct elf_link_hash_entry
*h
;
385 /* Function section. */
388 /* Where last called from, and number of sections called from. */
389 asection
*last_caller
;
390 unsigned int call_count
;
391 /* Address range of (this part of) function. */
393 /* Offset where we found a store of lr, or -1 if none found. */
395 /* Offset where we found the stack adjustment insn. */
399 /* Distance from root of call tree. Tail and hot/cold branches
400 count as one deeper. We aren't counting stack frames here. */
402 /* Set if global symbol. */
403 unsigned int global
: 1;
404 /* Set if known to be start of function (as distinct from a hunk
405 in hot/cold section. */
406 unsigned int is_func
: 1;
407 /* Set if not a root node. */
408 unsigned int non_root
: 1;
409 /* Flags used during call tree traversal. It's cheaper to replicate
410 the visit flags than have one which needs clearing after a traversal. */
411 unsigned int visit1
: 1;
412 unsigned int visit2
: 1;
413 unsigned int marking
: 1;
414 unsigned int visit3
: 1;
415 unsigned int visit4
: 1;
416 unsigned int visit5
: 1;
417 unsigned int visit6
: 1;
418 unsigned int visit7
: 1;
421 struct spu_elf_stack_info
425 /* Variable size array describing functions, one per contiguous
426 address range belonging to a function. */
427 struct function_info fun
[1];
430 static struct function_info
*find_function (asection
*, bfd_vma
,
431 struct bfd_link_info
*);
433 /* Create a spu ELF linker hash table. */
435 static struct bfd_link_hash_table
*
436 spu_elf_link_hash_table_create (bfd
*abfd
)
438 struct spu_link_hash_table
*htab
;
440 htab
= bfd_malloc (sizeof (*htab
));
444 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
445 _bfd_elf_link_hash_newfunc
,
446 sizeof (struct elf_link_hash_entry
)))
452 memset (&htab
->ovtab
, 0,
453 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
455 htab
->elf
.init_got_refcount
.refcount
= 0;
456 htab
->elf
.init_got_refcount
.glist
= NULL
;
457 htab
->elf
.init_got_offset
.offset
= 0;
458 htab
->elf
.init_got_offset
.glist
= NULL
;
459 return &htab
->elf
.root
;
463 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
465 bfd_vma max_branch_log2
;
467 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
468 htab
->params
= params
;
469 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
470 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
476 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
484 get_sym_h (struct elf_link_hash_entry
**hp
,
485 Elf_Internal_Sym
**symp
,
487 Elf_Internal_Sym
**locsymsp
,
488 unsigned long r_symndx
,
491 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
493 if (r_symndx
>= symtab_hdr
->sh_info
)
495 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
496 struct elf_link_hash_entry
*h
;
498 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
499 while (h
->root
.type
== bfd_link_hash_indirect
500 || h
->root
.type
== bfd_link_hash_warning
)
501 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
511 asection
*symsec
= NULL
;
512 if (h
->root
.type
== bfd_link_hash_defined
513 || h
->root
.type
== bfd_link_hash_defweak
)
514 symsec
= h
->root
.u
.def
.section
;
520 Elf_Internal_Sym
*sym
;
521 Elf_Internal_Sym
*locsyms
= *locsymsp
;
525 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
527 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
529 0, NULL
, NULL
, NULL
);
534 sym
= locsyms
+ r_symndx
;
543 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
553 spu_elf_create_sections (struct bfd_link_info
*info
)
557 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
558 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
563 /* Make SPU_PTNOTE_SPUNAME section. */
570 ibfd
= info
->input_bfds
;
571 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
572 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
574 || !bfd_set_section_alignment (ibfd
, s
, 4))
577 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
578 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
579 size
+= (name_len
+ 3) & -4;
581 if (!bfd_set_section_size (ibfd
, s
, size
))
584 data
= bfd_zalloc (ibfd
, size
);
588 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
589 bfd_put_32 (ibfd
, name_len
, data
+ 4);
590 bfd_put_32 (ibfd
, 1, data
+ 8);
591 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
592 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
593 bfd_get_filename (info
->output_bfd
), name_len
);
600 /* qsort predicate to sort sections by vma. */
603 sort_sections (const void *a
, const void *b
)
605 const asection
*const *s1
= a
;
606 const asection
*const *s2
= b
;
607 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
610 return delta
< 0 ? -1 : 1;
612 return (*s1
)->index
- (*s2
)->index
;
615 /* Identify overlays in the output bfd, and number them.
616 Returns 0 on error, 1 if no overlays, 2 if overlays. */
619 spu_elf_find_overlays (struct bfd_link_info
*info
)
621 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
622 asection
**alloc_sec
;
623 unsigned int i
, n
, ovl_index
, num_buf
;
626 static const char *const entry_names
[2][2] = {
627 { "__ovly_load", "__icache_br_handler" },
628 { "__ovly_return", "__icache_call_handler" }
631 if (info
->output_bfd
->section_count
< 2)
635 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
636 if (alloc_sec
== NULL
)
639 /* Pick out all the alloced sections. */
640 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
641 if ((s
->flags
& SEC_ALLOC
) != 0
642 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
652 /* Sort them by vma. */
653 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
655 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
656 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
658 /* Look for an overlapping vma to find the first overlay section. */
659 bfd_vma vma_start
= 0;
660 bfd_vma lma_start
= 0;
662 for (i
= 1; i
< n
; i
++)
665 if (s
->vma
< ovl_end
)
667 asection
*s0
= alloc_sec
[i
- 1];
669 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
675 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
680 ovl_end
= s
->vma
+ s
->size
;
683 /* Now find any sections within the cache area. */
684 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
687 if (s
->vma
>= ovl_end
)
690 /* A section in an overlay area called .ovl.init is not
691 an overlay, in the sense that it might be loaded in
692 by the overlay manager, but rather the initial
693 section contents for the overlay buffer. */
694 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
696 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
697 if (((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
698 || ((s
->lma
- lma_start
) & (htab
->params
->line_size
- 1)))
700 info
->callbacks
->einfo (_("%X%P: overlay section %A "
701 "does not start on a cache line.\n"),
703 bfd_set_error (bfd_error_bad_value
);
706 else if (s
->size
> htab
->params
->line_size
)
708 info
->callbacks
->einfo (_("%X%P: overlay section %A "
709 "is larger than a cache line.\n"),
711 bfd_set_error (bfd_error_bad_value
);
715 alloc_sec
[ovl_index
++] = s
;
716 spu_elf_section_data (s
)->u
.o
.ovl_index
717 = ((s
->lma
- lma_start
) >> htab
->line_size_log2
) + 1;
718 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
722 /* Ensure there are no more overlay sections. */
726 if (s
->vma
< ovl_end
)
728 info
->callbacks
->einfo (_("%X%P: overlay section %A "
729 "is not in cache area.\n"),
731 bfd_set_error (bfd_error_bad_value
);
735 ovl_end
= s
->vma
+ s
->size
;
740 /* Look for overlapping vmas. Any with overlap must be overlays.
741 Count them. Also count the number of overlay regions. */
742 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
745 if (s
->vma
< ovl_end
)
747 asection
*s0
= alloc_sec
[i
- 1];
749 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
752 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
754 alloc_sec
[ovl_index
] = s0
;
755 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
756 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
759 ovl_end
= s
->vma
+ s
->size
;
761 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
763 alloc_sec
[ovl_index
] = s
;
764 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
765 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
766 if (s0
->vma
!= s
->vma
)
768 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
769 "and %A do not start at the "
772 bfd_set_error (bfd_error_bad_value
);
775 if (ovl_end
< s
->vma
+ s
->size
)
776 ovl_end
= s
->vma
+ s
->size
;
780 ovl_end
= s
->vma
+ s
->size
;
784 htab
->num_overlays
= ovl_index
;
785 htab
->num_buf
= num_buf
;
786 htab
->ovl_sec
= alloc_sec
;
791 for (i
= 0; i
< 2; i
++)
794 struct elf_link_hash_entry
*h
;
796 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
797 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
801 if (h
->root
.type
== bfd_link_hash_new
)
803 h
->root
.type
= bfd_link_hash_undefined
;
805 h
->ref_regular_nonweak
= 1;
808 htab
->ovly_entry
[i
] = h
;
814 /* Non-zero to use bra in overlay stubs rather than br. */
817 #define BRA 0x30000000
818 #define BRASL 0x31000000
819 #define BR 0x32000000
820 #define BRSL 0x33000000
821 #define NOP 0x40200000
822 #define LNOP 0x00200000
823 #define ILA 0x42000000
825 /* Return true for all relative and absolute branch instructions.
833 brhnz 00100011 0.. */
836 is_branch (const unsigned char *insn
)
838 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
841 /* Return true for all indirect branch instructions.
849 bihnz 00100101 011 */
852 is_indirect_branch (const unsigned char *insn
)
854 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
857 /* Return true for branch hint instructions.
862 is_hint (const unsigned char *insn
)
864 return (insn
[0] & 0xfc) == 0x10;
867 /* True if INPUT_SECTION might need overlay stubs. */
870 maybe_needs_stubs (asection
*input_section
)
872 /* No stubs for debug sections and suchlike. */
873 if ((input_section
->flags
& SEC_ALLOC
) == 0)
876 /* No stubs for link-once sections that will be discarded. */
877 if (input_section
->output_section
== bfd_abs_section_ptr
)
880 /* Don't create stubs for .eh_frame references. */
881 if (strcmp (input_section
->name
, ".eh_frame") == 0)
903 /* Return non-zero if this reloc symbol should go via an overlay stub.
904 Return 2 if the stub must be in non-overlay area. */
906 static enum _stub_type
907 needs_ovl_stub (struct elf_link_hash_entry
*h
,
908 Elf_Internal_Sym
*sym
,
910 asection
*input_section
,
911 Elf_Internal_Rela
*irela
,
913 struct bfd_link_info
*info
)
915 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
916 enum elf_spu_reloc_type r_type
;
917 unsigned int sym_type
;
918 bfd_boolean branch
, hint
, call
;
919 enum _stub_type ret
= no_stub
;
923 || sym_sec
->output_section
== bfd_abs_section_ptr
924 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
929 /* Ensure no stubs for user supplied overlay manager syms. */
930 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
933 /* setjmp always goes via an overlay stub, because then the return
934 and hence the longjmp goes via __ovly_return. That magically
935 makes setjmp/longjmp between overlays work. */
936 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
937 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
944 sym_type
= ELF_ST_TYPE (sym
->st_info
);
946 r_type
= ELF32_R_TYPE (irela
->r_info
);
950 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
952 if (contents
== NULL
)
955 if (!bfd_get_section_contents (input_section
->owner
,
962 contents
+= irela
->r_offset
;
964 branch
= is_branch (contents
);
965 hint
= is_hint (contents
);
968 call
= (contents
[0] & 0xfd) == 0x31;
970 && sym_type
!= STT_FUNC
973 /* It's common for people to write assembly and forget
974 to give function symbols the right type. Handle
975 calls to such symbols, but warn so that (hopefully)
976 people will fix their code. We need the symbol
977 type to be correct to distinguish function pointer
978 initialisation from other pointer initialisations. */
979 const char *sym_name
;
982 sym_name
= h
->root
.root
.string
;
985 Elf_Internal_Shdr
*symtab_hdr
;
986 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
987 sym_name
= bfd_elf_sym_name (input_section
->owner
,
992 (*_bfd_error_handler
) (_("warning: call to non-function"
993 " symbol %s defined in %B"),
994 sym_sec
->owner
, sym_name
);
1000 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1001 || (sym_type
!= STT_FUNC
1002 && !(branch
|| hint
)
1003 && (sym_sec
->flags
& SEC_CODE
) == 0))
1006 /* Usually, symbols in non-overlay sections don't need stubs. */
1007 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1008 && !htab
->params
->non_overlay_stubs
)
1011 /* A reference from some other section to a symbol in an overlay
1012 section needs a stub. */
1013 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1014 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1016 if (call
|| sym_type
== STT_FUNC
)
1017 ret
= call_ovl_stub
;
1020 ret
= br000_ovl_stub
;
1024 unsigned int lrlive
= (contents
[1] & 0x70) >> 4;
1030 /* If this insn isn't a branch then we are possibly taking the
1031 address of a function and passing it out somehow. Soft-icache code
1032 always generates inline code to do indirect branches. */
1033 if (!(branch
|| hint
)
1034 && sym_type
== STT_FUNC
1035 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1042 count_stub (struct spu_link_hash_table
*htab
,
1045 enum _stub_type stub_type
,
1046 struct elf_link_hash_entry
*h
,
1047 const Elf_Internal_Rela
*irela
)
1049 unsigned int ovl
= 0;
1050 struct got_entry
*g
, **head
;
1053 /* If this instruction is a branch or call, we need a stub
1054 for it. One stub per function per overlay.
1055 If it isn't a branch, then we are taking the address of
1056 this function so need a stub in the non-overlay area
1057 for it. One stub per function. */
1058 if (stub_type
!= nonovl_stub
)
1059 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1062 head
= &h
->got
.glist
;
1065 if (elf_local_got_ents (ibfd
) == NULL
)
1067 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1068 * sizeof (*elf_local_got_ents (ibfd
)));
1069 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1070 if (elf_local_got_ents (ibfd
) == NULL
)
1073 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1076 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1078 htab
->stub_count
[ovl
] += 1;
1084 addend
= irela
->r_addend
;
1088 struct got_entry
*gnext
;
1090 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1091 if (g
->addend
== addend
&& g
->ovl
== 0)
1096 /* Need a new non-overlay area stub. Zap other stubs. */
1097 for (g
= *head
; g
!= NULL
; g
= gnext
)
1100 if (g
->addend
== addend
)
1102 htab
->stub_count
[g
->ovl
] -= 1;
1110 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1111 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1117 g
= bfd_malloc (sizeof *g
);
1122 g
->stub_addr
= (bfd_vma
) -1;
1126 htab
->stub_count
[ovl
] += 1;
1132 /* Support two sizes of overlay stubs, a slower more compact stub of two
1133 intructions, and a faster stub of four instructions.
1134 Soft-icache stubs are four or eight words. */
1137 ovl_stub_size (struct spu_elf_params
*params
)
1139 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1143 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1145 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1148 /* Two instruction overlay stubs look like:
1150 brsl $75,__ovly_load
1151 .word target_ovl_and_address
1153 ovl_and_address is a word with the overlay number in the top 14 bits
1154 and local store address in the bottom 18 bits.
1156 Four instruction overlay stubs look like:
1160 ila $79,target_address
1163 Software icache stubs are:
1167 .word lrlive_branchlocalstoreaddr;
1168 brasl $75,__icache_br_handler
1173 build_stub (struct bfd_link_info
*info
,
1176 enum _stub_type stub_type
,
1177 struct elf_link_hash_entry
*h
,
1178 const Elf_Internal_Rela
*irela
,
1182 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1183 unsigned int ovl
, dest_ovl
, set_id
;
1184 struct got_entry
*g
, **head
;
1186 bfd_vma addend
, from
, to
, br_dest
, patt
;
1187 unsigned int lrlive
;
1190 if (stub_type
!= nonovl_stub
)
1191 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1194 head
= &h
->got
.glist
;
1196 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1200 addend
= irela
->r_addend
;
1202 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1204 g
= bfd_malloc (sizeof *g
);
1210 g
->br_addr
= (irela
->r_offset
1211 + isec
->output_offset
1212 + isec
->output_section
->vma
);
1218 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1219 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1224 if (g
->ovl
== 0 && ovl
!= 0)
1227 if (g
->stub_addr
!= (bfd_vma
) -1)
1231 sec
= htab
->stub_sec
[ovl
];
1232 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1233 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1234 g
->stub_addr
= from
;
1235 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1236 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1237 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1239 if (((dest
| to
| from
) & 3) != 0)
1244 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1246 if (htab
->params
->ovly_flavour
== ovly_normal
1247 && !htab
->params
->compact_stub
)
1249 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1250 sec
->contents
+ sec
->size
);
1251 bfd_put_32 (sec
->owner
, LNOP
,
1252 sec
->contents
+ sec
->size
+ 4);
1253 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1254 sec
->contents
+ sec
->size
+ 8);
1256 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1257 sec
->contents
+ sec
->size
+ 12);
1259 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1260 sec
->contents
+ sec
->size
+ 12);
1262 else if (htab
->params
->ovly_flavour
== ovly_normal
1263 && htab
->params
->compact_stub
)
1266 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1267 sec
->contents
+ sec
->size
);
1269 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1270 sec
->contents
+ sec
->size
);
1271 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1272 sec
->contents
+ sec
->size
+ 4);
1274 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1275 && htab
->params
->compact_stub
)
1278 if (stub_type
== nonovl_stub
)
1280 else if (stub_type
== call_ovl_stub
)
1281 /* A brsl makes lr live and *(*sp+16) is live.
1282 Tail calls have the same liveness. */
1284 else if (!htab
->params
->lrlive_analysis
)
1285 /* Assume stack frame and lr save. */
1287 else if (irela
!= NULL
)
1289 /* Analyse branch instructions. */
1290 struct function_info
*caller
;
1293 caller
= find_function (isec
, irela
->r_offset
, info
);
1294 if (caller
->start
== NULL
)
1295 off
= irela
->r_offset
;
1298 struct function_info
*found
= NULL
;
1300 /* Find the earliest piece of this function that
1301 has frame adjusting instructions. We might
1302 see dynamic frame adjustment (eg. for alloca)
1303 in some later piece, but functions using
1304 alloca always set up a frame earlier. Frame
1305 setup instructions are always in one piece. */
1306 if (caller
->lr_store
!= (bfd_vma
) -1
1307 || caller
->sp_adjust
!= (bfd_vma
) -1)
1309 while (caller
->start
!= NULL
)
1311 caller
= caller
->start
;
1312 if (caller
->lr_store
!= (bfd_vma
) -1
1313 || caller
->sp_adjust
!= (bfd_vma
) -1)
1321 if (off
> caller
->sp_adjust
)
1323 if (off
> caller
->lr_store
)
1324 /* Only *(*sp+16) is live. */
1327 /* If no lr save, then we must be in a
1328 leaf function with a frame.
1329 lr is still live. */
1332 else if (off
> caller
->lr_store
)
1334 /* Between lr save and stack adjust. */
1336 /* This should never happen since prologues won't
1341 /* On entry to function. */
1344 if (stub_type
!= br000_ovl_stub
1345 && lrlive
!= stub_type
- br000_ovl_stub
)
1346 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1347 "from analysis (%u)\n"),
1348 isec
, irela
->r_offset
, lrlive
,
1349 stub_type
- br000_ovl_stub
);
1352 /* If given lrlive info via .brinfo, use it. */
1353 if (stub_type
> br000_ovl_stub
)
1354 lrlive
= stub_type
- br000_ovl_stub
;
1357 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1358 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1359 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1361 /* The branch that uses this stub goes to stub_addr + 4. We'll
1362 set up an xor pattern that can be used by the icache manager
1363 to modify this branch to go directly to its destination. */
1365 br_dest
= g
->stub_addr
;
1368 /* Except in the case of _SPUEAR_ stubs, the branch in
1369 question is the one in the stub itself. */
1370 BFD_ASSERT (stub_type
== nonovl_stub
);
1371 g
->br_addr
= g
->stub_addr
;
1375 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1376 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1377 sec
->contents
+ sec
->size
);
1378 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1379 sec
->contents
+ sec
->size
+ 4);
1380 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1381 sec
->contents
+ sec
->size
+ 8);
1382 patt
= dest
^ br_dest
;
1383 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1384 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1385 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1386 sec
->contents
+ sec
->size
+ 12);
1389 /* Extra space for linked list entries. */
1395 sec
->size
+= ovl_stub_size (htab
->params
);
1397 if (htab
->params
->emit_stub_syms
)
1403 len
= 8 + sizeof (".ovl_call.") - 1;
1405 len
+= strlen (h
->root
.root
.string
);
1410 add
= (int) irela
->r_addend
& 0xffffffff;
1413 name
= bfd_malloc (len
);
1417 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1419 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1421 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1422 dest_sec
->id
& 0xffffffff,
1423 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1425 sprintf (name
+ len
- 9, "+%x", add
);
1427 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1431 if (h
->root
.type
== bfd_link_hash_new
)
1433 h
->root
.type
= bfd_link_hash_defined
;
1434 h
->root
.u
.def
.section
= sec
;
1435 h
->size
= ovl_stub_size (htab
->params
);
1436 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1440 h
->ref_regular_nonweak
= 1;
1441 h
->forced_local
= 1;
1449 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1453 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1455 /* Symbols starting with _SPUEAR_ need a stub because they may be
1456 invoked by the PPU. */
1457 struct bfd_link_info
*info
= inf
;
1458 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1461 if ((h
->root
.type
== bfd_link_hash_defined
1462 || h
->root
.type
== bfd_link_hash_defweak
)
1464 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1465 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1466 && sym_sec
->output_section
!= bfd_abs_section_ptr
1467 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1468 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1469 || htab
->params
->non_overlay_stubs
))
1471 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1478 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1480 /* Symbols starting with _SPUEAR_ need a stub because they may be
1481 invoked by the PPU. */
1482 struct bfd_link_info
*info
= inf
;
1483 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1486 if ((h
->root
.type
== bfd_link_hash_defined
1487 || h
->root
.type
== bfd_link_hash_defweak
)
1489 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1490 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1491 && sym_sec
->output_section
!= bfd_abs_section_ptr
1492 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1493 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1494 || htab
->params
->non_overlay_stubs
))
1496 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1497 h
->root
.u
.def
.value
, sym_sec
);
1503 /* Size or build stubs. */
1506 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1508 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1511 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1513 extern const bfd_target bfd_elf32_spu_vec
;
1514 Elf_Internal_Shdr
*symtab_hdr
;
1516 Elf_Internal_Sym
*local_syms
= NULL
;
1518 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1521 /* We'll need the symbol table in a second. */
1522 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1523 if (symtab_hdr
->sh_info
== 0)
1526 /* Walk over each section attached to the input bfd. */
1527 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1529 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1531 /* If there aren't any relocs, then there's nothing more to do. */
1532 if ((isec
->flags
& SEC_RELOC
) == 0
1533 || isec
->reloc_count
== 0)
1536 if (!maybe_needs_stubs (isec
))
1539 /* Get the relocs. */
1540 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1542 if (internal_relocs
== NULL
)
1543 goto error_ret_free_local
;
1545 /* Now examine each relocation. */
1546 irela
= internal_relocs
;
1547 irelaend
= irela
+ isec
->reloc_count
;
1548 for (; irela
< irelaend
; irela
++)
1550 enum elf_spu_reloc_type r_type
;
1551 unsigned int r_indx
;
1553 Elf_Internal_Sym
*sym
;
1554 struct elf_link_hash_entry
*h
;
1555 enum _stub_type stub_type
;
1557 r_type
= ELF32_R_TYPE (irela
->r_info
);
1558 r_indx
= ELF32_R_SYM (irela
->r_info
);
1560 if (r_type
>= R_SPU_max
)
1562 bfd_set_error (bfd_error_bad_value
);
1563 error_ret_free_internal
:
1564 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1565 free (internal_relocs
);
1566 error_ret_free_local
:
1567 if (local_syms
!= NULL
1568 && (symtab_hdr
->contents
1569 != (unsigned char *) local_syms
))
1574 /* Determine the reloc target section. */
1575 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1576 goto error_ret_free_internal
;
1578 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1580 if (stub_type
== no_stub
)
1582 else if (stub_type
== stub_error
)
1583 goto error_ret_free_internal
;
1585 if (htab
->stub_count
== NULL
)
1588 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1589 htab
->stub_count
= bfd_zmalloc (amt
);
1590 if (htab
->stub_count
== NULL
)
1591 goto error_ret_free_internal
;
1596 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1597 goto error_ret_free_internal
;
1604 dest
= h
->root
.u
.def
.value
;
1606 dest
= sym
->st_value
;
1607 dest
+= irela
->r_addend
;
1608 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1610 goto error_ret_free_internal
;
1614 /* We're done with the internal relocs, free them. */
1615 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1616 free (internal_relocs
);
1619 if (local_syms
!= NULL
1620 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1622 if (!info
->keep_memory
)
1625 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1632 /* Allocate space for overlay call and return stubs.
1633 Return 0 on error, 1 if no stubs, 2 otherwise. */
1636 spu_elf_size_stubs (struct bfd_link_info
*info
)
1638 struct spu_link_hash_table
*htab
;
1645 if (!process_stubs (info
, FALSE
))
1648 htab
= spu_hash_table (info
);
1649 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1653 if (htab
->stub_count
== NULL
)
1656 ibfd
= info
->input_bfds
;
1657 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1658 htab
->stub_sec
= bfd_zmalloc (amt
);
1659 if (htab
->stub_sec
== NULL
)
1662 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1663 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1664 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1665 htab
->stub_sec
[0] = stub
;
1667 || !bfd_set_section_alignment (ibfd
, stub
,
1668 ovl_stub_size_log2 (htab
->params
)))
1670 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1671 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1672 /* Extra space for linked list entries. */
1673 stub
->size
+= htab
->stub_count
[0] * 16;
1675 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1677 asection
*osec
= htab
->ovl_sec
[i
];
1678 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1679 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1680 htab
->stub_sec
[ovl
] = stub
;
1682 || !bfd_set_section_alignment (ibfd
, stub
,
1683 ovl_stub_size_log2 (htab
->params
)))
1685 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1688 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1690 /* Space for icache manager tables.
1691 a) Tag array, one quadword per cache line.
1692 b) Rewrite "to" list, one quadword per cache line.
1693 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1694 a power-of-two number of full quadwords) per cache line. */
1697 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1698 if (htab
->ovtab
== NULL
1699 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1702 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1703 << htab
->num_lines_log2
;
1705 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1706 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1707 if (htab
->init
== NULL
1708 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1711 htab
->init
->size
= 16;
1715 /* htab->ovtab consists of two arrays.
1725 . } _ovly_buf_table[];
1728 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1729 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1730 if (htab
->ovtab
== NULL
1731 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1734 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1737 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1738 if (htab
->toe
== NULL
1739 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1741 htab
->toe
->size
= 16;
1746 /* Called from ld to place overlay manager data sections. This is done
1747 after the overlay manager itself is loaded, mainly so that the
1748 linker's htab->init section is placed after any other .ovl.init
1752 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1754 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1758 if (htab
->stub_count
== NULL
)
1761 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1763 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1765 asection
*osec
= htab
->ovl_sec
[i
];
1766 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1767 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1770 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1771 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1774 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1776 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1778 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1781 /* Functions to handle embedded spu_ovl.o object. */
1784 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1790 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1796 struct _ovl_stream
*os
;
1800 os
= (struct _ovl_stream
*) stream
;
1801 max
= (const char *) os
->end
- (const char *) os
->start
;
1803 if ((ufile_ptr
) offset
>= max
)
1807 if (count
> max
- offset
)
1808 count
= max
- offset
;
1810 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1815 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1817 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1824 return *ovl_bfd
!= NULL
;
1828 overlay_index (asection
*sec
)
1831 || sec
->output_section
== bfd_abs_section_ptr
)
1833 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1836 /* Define an STT_OBJECT symbol. */
1838 static struct elf_link_hash_entry
*
1839 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1841 struct elf_link_hash_entry
*h
;
1843 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1847 if (h
->root
.type
!= bfd_link_hash_defined
1850 h
->root
.type
= bfd_link_hash_defined
;
1851 h
->root
.u
.def
.section
= htab
->ovtab
;
1852 h
->type
= STT_OBJECT
;
1855 h
->ref_regular_nonweak
= 1;
1858 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1860 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1861 h
->root
.u
.def
.section
->owner
,
1862 h
->root
.root
.string
);
1863 bfd_set_error (bfd_error_bad_value
);
1868 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1869 h
->root
.root
.string
);
1870 bfd_set_error (bfd_error_bad_value
);
1877 /* Fill in all stubs and the overlay tables. */
1880 spu_elf_build_stubs (struct bfd_link_info
*info
)
1882 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1883 struct elf_link_hash_entry
*h
;
1889 if (htab
->stub_count
== NULL
)
1892 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1893 if (htab
->stub_sec
[i
]->size
!= 0)
1895 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1896 htab
->stub_sec
[i
]->size
);
1897 if (htab
->stub_sec
[i
]->contents
== NULL
)
1899 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1900 htab
->stub_sec
[i
]->size
= 0;
1903 for (i
= 0; i
< 2; i
++)
1905 h
= htab
->ovly_entry
[i
];
1906 BFD_ASSERT (h
!= NULL
);
1908 if ((h
->root
.type
== bfd_link_hash_defined
1909 || h
->root
.type
== bfd_link_hash_defweak
)
1912 s
= h
->root
.u
.def
.section
->output_section
;
1913 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1915 (*_bfd_error_handler
) (_("%s in overlay section"),
1916 h
->root
.root
.string
);
1917 bfd_set_error (bfd_error_bad_value
);
1925 /* Fill in all the stubs. */
1926 process_stubs (info
, TRUE
);
1927 if (!htab
->stub_err
)
1928 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1932 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1933 bfd_set_error (bfd_error_bad_value
);
1937 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1939 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1941 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1942 bfd_set_error (bfd_error_bad_value
);
1945 htab
->stub_sec
[i
]->rawsize
= 0;
1948 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1951 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1952 if (htab
->ovtab
->contents
== NULL
)
1955 p
= htab
->ovtab
->contents
;
1956 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1960 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1963 h
->root
.u
.def
.value
= 0;
1964 h
->size
= 16 << htab
->num_lines_log2
;
1967 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1970 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1971 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1973 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
1976 h
->root
.u
.def
.value
= off
;
1977 h
->size
= 16 << htab
->num_lines_log2
;
1980 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
1983 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1984 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1986 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
1989 h
->root
.u
.def
.value
= off
;
1990 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
1993 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
1996 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
1997 + htab
->num_lines_log2
);
1998 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2000 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2003 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2004 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2006 h
= define_ovtab_symbol (htab
, "__icache_base");
2009 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2010 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2011 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2013 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2016 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2017 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2019 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2022 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2023 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2025 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2028 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2029 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2031 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2034 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2035 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2037 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2040 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2041 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2043 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2046 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2047 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2049 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2051 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2053 if (htab
->init
->contents
== NULL
)
2056 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2059 h
->root
.u
.def
.value
= 0;
2060 h
->root
.u
.def
.section
= htab
->init
;
2066 /* Write out _ovly_table. */
2067 /* set low bit of .size to mark non-overlay area as present. */
2069 obfd
= htab
->ovtab
->output_section
->owner
;
2070 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2072 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2076 unsigned long off
= ovl_index
* 16;
2077 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2079 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2080 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2082 /* file_off written later in spu_elf_modify_program_headers. */
2083 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2087 h
= define_ovtab_symbol (htab
, "_ovly_table");
2090 h
->root
.u
.def
.value
= 16;
2091 h
->size
= htab
->num_overlays
* 16;
2093 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2096 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2099 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2102 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2103 h
->size
= htab
->num_buf
* 4;
2105 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2108 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2112 h
= define_ovtab_symbol (htab
, "_EAR_");
2115 h
->root
.u
.def
.section
= htab
->toe
;
2116 h
->root
.u
.def
.value
= 0;
2122 /* Check that all loadable section VMAs lie in the range
2123 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2126 spu_elf_check_vma (struct bfd_link_info
*info
)
2128 struct elf_segment_map
*m
;
2130 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2131 bfd
*abfd
= info
->output_bfd
;
2132 bfd_vma hi
= htab
->params
->local_store_hi
;
2133 bfd_vma lo
= htab
->params
->local_store_lo
;
2135 htab
->local_store
= hi
+ 1 - lo
;
2137 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2138 if (m
->p_type
== PT_LOAD
)
2139 for (i
= 0; i
< m
->count
; i
++)
2140 if (m
->sections
[i
]->size
!= 0
2141 && (m
->sections
[i
]->vma
< lo
2142 || m
->sections
[i
]->vma
> hi
2143 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2144 return m
->sections
[i
];
2149 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2150 Search for stack adjusting insns, and return the sp delta.
2151 If a store of lr is found save the instruction offset to *LR_STORE.
2152 If a stack adjusting instruction is found, save that offset to
2156 find_function_stack_adjust (asection
*sec
,
2163 memset (reg
, 0, sizeof (reg
));
2164 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2166 unsigned char buf
[4];
2170 /* Assume no relocs on stack adjusing insns. */
2171 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2175 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2177 if (buf
[0] == 0x24 /* stqd */)
2179 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2184 /* Partly decoded immediate field. */
2185 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2187 if (buf
[0] == 0x1c /* ai */)
2190 imm
= (imm
^ 0x200) - 0x200;
2191 reg
[rt
] = reg
[ra
] + imm
;
2193 if (rt
== 1 /* sp */)
2197 *sp_adjust
= offset
;
2201 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2203 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2205 reg
[rt
] = reg
[ra
] + reg
[rb
];
2210 *sp_adjust
= offset
;
2214 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2216 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2218 reg
[rt
] = reg
[rb
] - reg
[ra
];
2223 *sp_adjust
= offset
;
2227 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2229 if (buf
[0] >= 0x42 /* ila */)
2230 imm
|= (buf
[0] & 1) << 17;
2235 if (buf
[0] == 0x40 /* il */)
2237 if ((buf
[1] & 0x80) == 0)
2239 imm
= (imm
^ 0x8000) - 0x8000;
2241 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2247 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2249 reg
[rt
] |= imm
& 0xffff;
2252 else if (buf
[0] == 0x04 /* ori */)
2255 imm
= (imm
^ 0x200) - 0x200;
2256 reg
[rt
] = reg
[ra
] | imm
;
2259 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2261 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2262 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2263 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2264 | ((imm
& 0x1000) ? 0x000000ff : 0));
2267 else if (buf
[0] == 0x16 /* andbi */)
2273 reg
[rt
] = reg
[ra
] & imm
;
2276 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2278 /* Used in pic reg load. Say rt is trashed. Won't be used
2279 in stack adjust, but we need to continue past this branch. */
2283 else if (is_branch (buf
) || is_indirect_branch (buf
))
2284 /* If we hit a branch then we must be out of the prologue. */
2291 /* qsort predicate to sort symbols by section and value. */
2293 static Elf_Internal_Sym
*sort_syms_syms
;
2294 static asection
**sort_syms_psecs
;
2297 sort_syms (const void *a
, const void *b
)
2299 Elf_Internal_Sym
*const *s1
= a
;
2300 Elf_Internal_Sym
*const *s2
= b
;
2301 asection
*sec1
,*sec2
;
2302 bfd_signed_vma delta
;
2304 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2305 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2308 return sec1
->index
- sec2
->index
;
2310 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2312 return delta
< 0 ? -1 : 1;
2314 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2316 return delta
< 0 ? -1 : 1;
2318 return *s1
< *s2
? -1 : 1;
2321 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2322 entries for section SEC. */
2324 static struct spu_elf_stack_info
*
2325 alloc_stack_info (asection
*sec
, int max_fun
)
2327 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2330 amt
= sizeof (struct spu_elf_stack_info
);
2331 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2332 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2333 if (sec_data
->u
.i
.stack_info
!= NULL
)
2334 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2335 return sec_data
->u
.i
.stack_info
;
2338 /* Add a new struct function_info describing a (part of a) function
2339 starting at SYM_H. Keep the array sorted by address. */
2341 static struct function_info
*
2342 maybe_insert_function (asection
*sec
,
2345 bfd_boolean is_func
)
2347 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2348 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2354 sinfo
= alloc_stack_info (sec
, 20);
2361 Elf_Internal_Sym
*sym
= sym_h
;
2362 off
= sym
->st_value
;
2363 size
= sym
->st_size
;
2367 struct elf_link_hash_entry
*h
= sym_h
;
2368 off
= h
->root
.u
.def
.value
;
2372 for (i
= sinfo
->num_fun
; --i
>= 0; )
2373 if (sinfo
->fun
[i
].lo
<= off
)
2378 /* Don't add another entry for an alias, but do update some
2380 if (sinfo
->fun
[i
].lo
== off
)
2382 /* Prefer globals over local syms. */
2383 if (global
&& !sinfo
->fun
[i
].global
)
2385 sinfo
->fun
[i
].global
= TRUE
;
2386 sinfo
->fun
[i
].u
.h
= sym_h
;
2389 sinfo
->fun
[i
].is_func
= TRUE
;
2390 return &sinfo
->fun
[i
];
2392 /* Ignore a zero-size symbol inside an existing function. */
2393 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2394 return &sinfo
->fun
[i
];
2397 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2399 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2400 bfd_size_type old
= amt
;
2402 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2403 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2404 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2405 sinfo
= bfd_realloc (sinfo
, amt
);
2408 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2409 sec_data
->u
.i
.stack_info
= sinfo
;
2412 if (++i
< sinfo
->num_fun
)
2413 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2414 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2415 sinfo
->fun
[i
].is_func
= is_func
;
2416 sinfo
->fun
[i
].global
= global
;
2417 sinfo
->fun
[i
].sec
= sec
;
2419 sinfo
->fun
[i
].u
.h
= sym_h
;
2421 sinfo
->fun
[i
].u
.sym
= sym_h
;
2422 sinfo
->fun
[i
].lo
= off
;
2423 sinfo
->fun
[i
].hi
= off
+ size
;
2424 sinfo
->fun
[i
].lr_store
= -1;
2425 sinfo
->fun
[i
].sp_adjust
= -1;
2426 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2427 &sinfo
->fun
[i
].lr_store
,
2428 &sinfo
->fun
[i
].sp_adjust
);
2429 sinfo
->num_fun
+= 1;
2430 return &sinfo
->fun
[i
];
2433 /* Return the name of FUN. */
2436 func_name (struct function_info
*fun
)
2440 Elf_Internal_Shdr
*symtab_hdr
;
2442 while (fun
->start
!= NULL
)
2446 return fun
->u
.h
->root
.root
.string
;
2449 if (fun
->u
.sym
->st_name
== 0)
2451 size_t len
= strlen (sec
->name
);
2452 char *name
= bfd_malloc (len
+ 10);
2455 sprintf (name
, "%s+%lx", sec
->name
,
2456 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2460 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2461 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2464 /* Read the instruction at OFF in SEC. Return true iff the instruction
2465 is a nop, lnop, or stop 0 (all zero insn). */
2468 is_nop (asection
*sec
, bfd_vma off
)
2470 unsigned char insn
[4];
2472 if (off
+ 4 > sec
->size
2473 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2475 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2477 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2482 /* Extend the range of FUN to cover nop padding up to LIMIT.
2483 Return TRUE iff some instruction other than a NOP was found. */
2486 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2488 bfd_vma off
= (fun
->hi
+ 3) & -4;
2490 while (off
< limit
&& is_nop (fun
->sec
, off
))
2501 /* Check and fix overlapping function ranges. Return TRUE iff there
2502 are gaps in the current info we have about functions in SEC. */
2505 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2507 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2508 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2510 bfd_boolean gaps
= FALSE
;
2515 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2516 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2518 /* Fix overlapping symbols. */
2519 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2520 const char *f2
= func_name (&sinfo
->fun
[i
]);
2522 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2523 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2525 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2528 if (sinfo
->num_fun
== 0)
2532 if (sinfo
->fun
[0].lo
!= 0)
2534 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2536 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2538 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2539 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2541 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2547 /* Search current function info for a function that contains address
2548 OFFSET in section SEC. */
2550 static struct function_info
*
2551 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2553 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2554 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2558 hi
= sinfo
->num_fun
;
2561 mid
= (lo
+ hi
) / 2;
2562 if (offset
< sinfo
->fun
[mid
].lo
)
2564 else if (offset
>= sinfo
->fun
[mid
].hi
)
2567 return &sinfo
->fun
[mid
];
2569 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2571 bfd_set_error (bfd_error_bad_value
);
2575 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2576 if CALLEE was new. If this function return FALSE, CALLEE should
2580 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2582 struct call_info
**pp
, *p
;
2584 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2585 if (p
->fun
== callee
->fun
)
2587 /* Tail calls use less stack than normal calls. Retain entry
2588 for normal call over one for tail call. */
2589 p
->is_tail
&= callee
->is_tail
;
2592 p
->fun
->start
= NULL
;
2593 p
->fun
->is_func
= TRUE
;
2596 /* Reorder list so most recent call is first. */
2598 p
->next
= caller
->call_list
;
2599 caller
->call_list
= p
;
2602 callee
->next
= caller
->call_list
;
2604 caller
->call_list
= callee
;
2608 /* Copy CALL and insert the copy into CALLER. */
2611 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2613 struct call_info
*callee
;
2614 callee
= bfd_malloc (sizeof (*callee
));
2618 if (!insert_callee (caller
, callee
))
2623 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2624 overlay stub sections. */
2627 interesting_section (asection
*s
)
2629 return (s
->output_section
!= bfd_abs_section_ptr
2630 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2631 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2635 /* Rummage through the relocs for SEC, looking for function calls.
2636 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2637 mark destination symbols on calls as being functions. Also
2638 look at branches, which may be tail calls or go to hot/cold
2639 section part of same function. */
2642 mark_functions_via_relocs (asection
*sec
,
2643 struct bfd_link_info
*info
,
2646 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2647 Elf_Internal_Shdr
*symtab_hdr
;
2649 unsigned int priority
= 0;
2650 static bfd_boolean warned
;
2652 if (!interesting_section (sec
)
2653 || sec
->reloc_count
== 0)
2656 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2658 if (internal_relocs
== NULL
)
2661 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2662 psyms
= &symtab_hdr
->contents
;
2663 irela
= internal_relocs
;
2664 irelaend
= irela
+ sec
->reloc_count
;
2665 for (; irela
< irelaend
; irela
++)
2667 enum elf_spu_reloc_type r_type
;
2668 unsigned int r_indx
;
2670 Elf_Internal_Sym
*sym
;
2671 struct elf_link_hash_entry
*h
;
2673 bfd_boolean reject
, is_call
;
2674 struct function_info
*caller
;
2675 struct call_info
*callee
;
2678 r_type
= ELF32_R_TYPE (irela
->r_info
);
2679 if (r_type
!= R_SPU_REL16
2680 && r_type
!= R_SPU_ADDR16
)
2683 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2687 r_indx
= ELF32_R_SYM (irela
->r_info
);
2688 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2692 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2698 unsigned char insn
[4];
2700 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2701 irela
->r_offset
, 4))
2703 if (is_branch (insn
))
2705 is_call
= (insn
[0] & 0xfd) == 0x31;
2706 priority
= insn
[1] & 0x0f;
2708 priority
|= insn
[2];
2710 priority
|= insn
[3];
2712 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2713 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2716 info
->callbacks
->einfo
2717 (_("%B(%A+0x%v): call to non-code section"
2718 " %B(%A), analysis incomplete\n"),
2719 sec
->owner
, sec
, irela
->r_offset
,
2720 sym_sec
->owner
, sym_sec
);
2728 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2736 /* For --auto-overlay, count possible stubs we need for
2737 function pointer references. */
2738 unsigned int sym_type
;
2742 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2743 if (sym_type
== STT_FUNC
)
2744 spu_hash_table (info
)->non_ovly_stub
+= 1;
2749 val
= h
->root
.u
.def
.value
;
2751 val
= sym
->st_value
;
2752 val
+= irela
->r_addend
;
2756 struct function_info
*fun
;
2758 if (irela
->r_addend
!= 0)
2760 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2763 fake
->st_value
= val
;
2765 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2769 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2771 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2774 if (irela
->r_addend
!= 0
2775 && fun
->u
.sym
!= sym
)
2780 caller
= find_function (sec
, irela
->r_offset
, info
);
2783 callee
= bfd_malloc (sizeof *callee
);
2787 callee
->fun
= find_function (sym_sec
, val
, info
);
2788 if (callee
->fun
== NULL
)
2790 callee
->is_tail
= !is_call
;
2791 callee
->is_pasted
= FALSE
;
2792 callee
->priority
= priority
;
2794 if (callee
->fun
->last_caller
!= sec
)
2796 callee
->fun
->last_caller
= sec
;
2797 callee
->fun
->call_count
+= 1;
2799 if (!insert_callee (caller
, callee
))
2802 && !callee
->fun
->is_func
2803 && callee
->fun
->stack
== 0)
2805 /* This is either a tail call or a branch from one part of
2806 the function to another, ie. hot/cold section. If the
2807 destination has been called by some other function then
2808 it is a separate function. We also assume that functions
2809 are not split across input files. */
2810 if (sec
->owner
!= sym_sec
->owner
)
2812 callee
->fun
->start
= NULL
;
2813 callee
->fun
->is_func
= TRUE
;
2815 else if (callee
->fun
->start
== NULL
)
2817 struct function_info
*caller_start
= caller
;
2818 while (caller_start
->start
)
2819 caller_start
= caller_start
->start
;
2821 if (caller_start
!= callee
->fun
)
2822 callee
->fun
->start
= caller_start
;
2826 struct function_info
*callee_start
;
2827 struct function_info
*caller_start
;
2828 callee_start
= callee
->fun
;
2829 while (callee_start
->start
)
2830 callee_start
= callee_start
->start
;
2831 caller_start
= caller
;
2832 while (caller_start
->start
)
2833 caller_start
= caller_start
->start
;
2834 if (caller_start
!= callee_start
)
2836 callee
->fun
->start
= NULL
;
2837 callee
->fun
->is_func
= TRUE
;
2846 /* Handle something like .init or .fini, which has a piece of a function.
2847 These sections are pasted together to form a single function. */
2850 pasted_function (asection
*sec
)
2852 struct bfd_link_order
*l
;
2853 struct _spu_elf_section_data
*sec_data
;
2854 struct spu_elf_stack_info
*sinfo
;
2855 Elf_Internal_Sym
*fake
;
2856 struct function_info
*fun
, *fun_start
;
2858 fake
= bfd_zmalloc (sizeof (*fake
));
2862 fake
->st_size
= sec
->size
;
2864 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2865 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2869 /* Find a function immediately preceding this section. */
2871 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2873 if (l
->u
.indirect
.section
== sec
)
2875 if (fun_start
!= NULL
)
2877 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2881 fun
->start
= fun_start
;
2883 callee
->is_tail
= TRUE
;
2884 callee
->is_pasted
= TRUE
;
2886 if (!insert_callee (fun_start
, callee
))
2892 if (l
->type
== bfd_indirect_link_order
2893 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2894 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2895 && sinfo
->num_fun
!= 0)
2896 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2899 /* Don't return an error if we did not find a function preceding this
2900 section. The section may have incorrect flags. */
2904 /* Map address ranges in code sections to functions. */
2907 discover_functions (struct bfd_link_info
*info
)
2911 Elf_Internal_Sym
***psym_arr
;
2912 asection
***sec_arr
;
2913 bfd_boolean gaps
= FALSE
;
2916 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2919 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2920 if (psym_arr
== NULL
)
2922 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2923 if (sec_arr
== NULL
)
2926 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2928 ibfd
= ibfd
->link_next
, bfd_idx
++)
2930 extern const bfd_target bfd_elf32_spu_vec
;
2931 Elf_Internal_Shdr
*symtab_hdr
;
2934 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2935 asection
**psecs
, **p
;
2937 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2940 /* Read all the symbols. */
2941 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2942 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2946 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2947 if (interesting_section (sec
))
2955 if (symtab_hdr
->contents
!= NULL
)
2957 /* Don't use cached symbols since the generic ELF linker
2958 code only reads local symbols, and we need globals too. */
2959 free (symtab_hdr
->contents
);
2960 symtab_hdr
->contents
= NULL
;
2962 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2964 symtab_hdr
->contents
= (void *) syms
;
2968 /* Select defined function symbols that are going to be output. */
2969 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2972 psym_arr
[bfd_idx
] = psyms
;
2973 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2976 sec_arr
[bfd_idx
] = psecs
;
2977 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2978 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2979 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2983 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2984 if (s
!= NULL
&& interesting_section (s
))
2987 symcount
= psy
- psyms
;
2990 /* Sort them by section and offset within section. */
2991 sort_syms_syms
= syms
;
2992 sort_syms_psecs
= psecs
;
2993 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2995 /* Now inspect the function symbols. */
2996 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2998 asection
*s
= psecs
[*psy
- syms
];
2999 Elf_Internal_Sym
**psy2
;
3001 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3002 if (psecs
[*psy2
- syms
] != s
)
3005 if (!alloc_stack_info (s
, psy2
- psy
))
3010 /* First install info about properly typed and sized functions.
3011 In an ideal world this will cover all code sections, except
3012 when partitioning functions into hot and cold sections,
3013 and the horrible pasted together .init and .fini functions. */
3014 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3017 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3019 asection
*s
= psecs
[sy
- syms
];
3020 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3025 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3026 if (interesting_section (sec
))
3027 gaps
|= check_function_ranges (sec
, info
);
3032 /* See if we can discover more function symbols by looking at
3034 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3036 ibfd
= ibfd
->link_next
, bfd_idx
++)
3040 if (psym_arr
[bfd_idx
] == NULL
)
3043 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3044 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3048 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3050 ibfd
= ibfd
->link_next
, bfd_idx
++)
3052 Elf_Internal_Shdr
*symtab_hdr
;
3054 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3057 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3060 psecs
= sec_arr
[bfd_idx
];
3062 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3063 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3066 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3067 if (interesting_section (sec
))
3068 gaps
|= check_function_ranges (sec
, info
);
3072 /* Finally, install all globals. */
3073 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3077 s
= psecs
[sy
- syms
];
3079 /* Global syms might be improperly typed functions. */
3080 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3081 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3083 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3089 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3091 extern const bfd_target bfd_elf32_spu_vec
;
3094 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3097 /* Some of the symbols we've installed as marking the
3098 beginning of functions may have a size of zero. Extend
3099 the range of such functions to the beginning of the
3100 next symbol of interest. */
3101 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3102 if (interesting_section (sec
))
3104 struct _spu_elf_section_data
*sec_data
;
3105 struct spu_elf_stack_info
*sinfo
;
3107 sec_data
= spu_elf_section_data (sec
);
3108 sinfo
= sec_data
->u
.i
.stack_info
;
3109 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3112 bfd_vma hi
= sec
->size
;
3114 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3116 sinfo
->fun
[fun_idx
].hi
= hi
;
3117 hi
= sinfo
->fun
[fun_idx
].lo
;
3120 sinfo
->fun
[0].lo
= 0;
3122 /* No symbols in this section. Must be .init or .fini
3123 or something similar. */
3124 else if (!pasted_function (sec
))
3130 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3132 ibfd
= ibfd
->link_next
, bfd_idx
++)
3134 if (psym_arr
[bfd_idx
] == NULL
)
3137 free (psym_arr
[bfd_idx
]);
3138 free (sec_arr
[bfd_idx
]);
3147 /* Iterate over all function_info we have collected, calling DOIT on
3148 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3152 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3153 struct bfd_link_info
*,
3155 struct bfd_link_info
*info
,
3161 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3163 extern const bfd_target bfd_elf32_spu_vec
;
3166 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3169 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3171 struct _spu_elf_section_data
*sec_data
;
3172 struct spu_elf_stack_info
*sinfo
;
3174 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3175 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3178 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3179 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3180 if (!doit (&sinfo
->fun
[i
], info
, param
))
3188 /* Transfer call info attached to struct function_info entries for
3189 all of a given function's sections to the first entry. */
3192 transfer_calls (struct function_info
*fun
,
3193 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3194 void *param ATTRIBUTE_UNUSED
)
3196 struct function_info
*start
= fun
->start
;
3200 struct call_info
*call
, *call_next
;
3202 while (start
->start
!= NULL
)
3203 start
= start
->start
;
3204 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3206 call_next
= call
->next
;
3207 if (!insert_callee (start
, call
))
3210 fun
->call_list
= NULL
;
3215 /* Mark nodes in the call graph that are called by some other node. */
3218 mark_non_root (struct function_info
*fun
,
3219 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3220 void *param ATTRIBUTE_UNUSED
)
3222 struct call_info
*call
;
3227 for (call
= fun
->call_list
; call
; call
= call
->next
)
3229 call
->fun
->non_root
= TRUE
;
3230 mark_non_root (call
->fun
, 0, 0);
3235 /* Remove cycles from the call graph. Set depth of nodes. */
3238 remove_cycles (struct function_info
*fun
,
3239 struct bfd_link_info
*info
,
3242 struct call_info
**callp
, *call
;
3243 unsigned int depth
= *(unsigned int *) param
;
3244 unsigned int max_depth
= depth
;
3248 fun
->marking
= TRUE
;
3250 callp
= &fun
->call_list
;
3251 while ((call
= *callp
) != NULL
)
3253 call
->max_depth
= depth
+ !call
->is_pasted
;
3254 if (!call
->fun
->visit2
)
3256 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3258 if (max_depth
< call
->max_depth
)
3259 max_depth
= call
->max_depth
;
3261 else if (call
->fun
->marking
)
3263 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3265 if (!htab
->params
->auto_overlay
3266 && htab
->params
->stack_analysis
)
3268 const char *f1
= func_name (fun
);
3269 const char *f2
= func_name (call
->fun
);
3271 info
->callbacks
->info (_("Stack analysis will ignore the call "
3275 *callp
= call
->next
;
3279 callp
= &call
->next
;
3281 fun
->marking
= FALSE
;
3282 *(unsigned int *) param
= max_depth
;
3286 /* Check that we actually visited all nodes in remove_cycles. If we
3287 didn't, then there is some cycle in the call graph not attached to
3288 any root node. Arbitrarily choose a node in the cycle as a new
3289 root and break the cycle. */
3292 mark_detached_root (struct function_info
*fun
,
3293 struct bfd_link_info
*info
,
3298 fun
->non_root
= FALSE
;
3299 *(unsigned int *) param
= 0;
3300 return remove_cycles (fun
, info
, param
);
3303 /* Populate call_list for each function. */
3306 build_call_tree (struct bfd_link_info
*info
)
3311 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3313 extern const bfd_target bfd_elf32_spu_vec
;
3316 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3319 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3320 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3324 /* Transfer call info from hot/cold section part of function
3326 if (!spu_hash_table (info
)->params
->auto_overlay
3327 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3330 /* Find the call graph root(s). */
3331 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3334 /* Remove cycles from the call graph. We start from the root node(s)
3335 so that we break cycles in a reasonable place. */
3337 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3340 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3343 /* qsort predicate to sort calls by priority, max_depth then count. */
3346 sort_calls (const void *a
, const void *b
)
3348 struct call_info
*const *c1
= a
;
3349 struct call_info
*const *c2
= b
;
3352 delta
= (*c2
)->priority
- (*c1
)->priority
;
3356 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3360 delta
= (*c2
)->count
- (*c1
)->count
;
3364 return (char *) c1
- (char *) c2
;
3368 unsigned int max_overlay_size
;
3371 /* Set linker_mark and gc_mark on any sections that we will put in
3372 overlays. These flags are used by the generic ELF linker, but we
3373 won't be continuing on to bfd_elf_final_link so it is OK to use
3374 them. linker_mark is clear before we get here. Set segment_mark
3375 on sections that are part of a pasted function (excluding the last
3378 Set up function rodata section if --overlay-rodata. We don't
3379 currently include merged string constant rodata sections since
3381 Sort the call graph so that the deepest nodes will be visited
3385 mark_overlay_section (struct function_info
*fun
,
3386 struct bfd_link_info
*info
,
3389 struct call_info
*call
;
3391 struct _mos_param
*mos_param
= param
;
3392 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3398 if (!fun
->sec
->linker_mark
3399 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3400 || htab
->params
->non_ia_text
3401 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0))
3405 fun
->sec
->linker_mark
= 1;
3406 fun
->sec
->gc_mark
= 1;
3407 fun
->sec
->segment_mark
= 0;
3408 /* Ensure SEC_CODE is set on this text section (it ought to
3409 be!), and SEC_CODE is clear on rodata sections. We use
3410 this flag to differentiate the two overlay section types. */
3411 fun
->sec
->flags
|= SEC_CODE
;
3413 size
= fun
->sec
->size
;
3414 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3418 /* Find the rodata section corresponding to this function's
3420 if (strcmp (fun
->sec
->name
, ".text") == 0)
3422 name
= bfd_malloc (sizeof (".rodata"));
3425 memcpy (name
, ".rodata", sizeof (".rodata"));
3427 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3429 size_t len
= strlen (fun
->sec
->name
);
3430 name
= bfd_malloc (len
+ 3);
3433 memcpy (name
, ".rodata", sizeof (".rodata"));
3434 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3436 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3438 size_t len
= strlen (fun
->sec
->name
) + 1;
3439 name
= bfd_malloc (len
);
3442 memcpy (name
, fun
->sec
->name
, len
);
3448 asection
*rodata
= NULL
;
3449 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3450 if (group_sec
== NULL
)
3451 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3453 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3455 if (strcmp (group_sec
->name
, name
) == 0)
3460 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3462 fun
->rodata
= rodata
;
3465 size
+= fun
->rodata
->size
;
3466 if (htab
->params
->line_size
!= 0
3467 && size
> htab
->params
->line_size
)
3469 size
-= fun
->rodata
->size
;
3474 fun
->rodata
->linker_mark
= 1;
3475 fun
->rodata
->gc_mark
= 1;
3476 fun
->rodata
->flags
&= ~SEC_CODE
;
3482 if (mos_param
->max_overlay_size
< size
)
3483 mos_param
->max_overlay_size
= size
;
3486 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3491 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3495 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3496 calls
[count
++] = call
;
3498 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3500 fun
->call_list
= NULL
;
3504 calls
[count
]->next
= fun
->call_list
;
3505 fun
->call_list
= calls
[count
];
3510 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3512 if (call
->is_pasted
)
3514 /* There can only be one is_pasted call per function_info. */
3515 BFD_ASSERT (!fun
->sec
->segment_mark
);
3516 fun
->sec
->segment_mark
= 1;
3518 if (!mark_overlay_section (call
->fun
, info
, param
))
3522 /* Don't put entry code into an overlay. The overlay manager needs
3523 a stack! Also, don't mark .ovl.init as an overlay. */
3524 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3525 == info
->output_bfd
->start_address
3526 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3528 fun
->sec
->linker_mark
= 0;
3529 if (fun
->rodata
!= NULL
)
3530 fun
->rodata
->linker_mark
= 0;
3535 /* If non-zero then unmark functions called from those within sections
3536 that we need to unmark. Unfortunately this isn't reliable since the
3537 call graph cannot know the destination of function pointer calls. */
3538 #define RECURSE_UNMARK 0
3541 asection
*exclude_input_section
;
3542 asection
*exclude_output_section
;
3543 unsigned long clearing
;
3546 /* Undo some of mark_overlay_section's work. */
3549 unmark_overlay_section (struct function_info
*fun
,
3550 struct bfd_link_info
*info
,
3553 struct call_info
*call
;
3554 struct _uos_param
*uos_param
= param
;
3555 unsigned int excluded
= 0;
3563 if (fun
->sec
== uos_param
->exclude_input_section
3564 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3568 uos_param
->clearing
+= excluded
;
3570 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3572 fun
->sec
->linker_mark
= 0;
3574 fun
->rodata
->linker_mark
= 0;
3577 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3578 if (!unmark_overlay_section (call
->fun
, info
, param
))
3582 uos_param
->clearing
-= excluded
;
3587 unsigned int lib_size
;
3588 asection
**lib_sections
;
3591 /* Add sections we have marked as belonging to overlays to an array
3592 for consideration as non-overlay sections. The array consist of
3593 pairs of sections, (text,rodata), for functions in the call graph. */
3596 collect_lib_sections (struct function_info
*fun
,
3597 struct bfd_link_info
*info
,
3600 struct _cl_param
*lib_param
= param
;
3601 struct call_info
*call
;
3608 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3611 size
= fun
->sec
->size
;
3613 size
+= fun
->rodata
->size
;
3615 if (size
<= lib_param
->lib_size
)
3617 *lib_param
->lib_sections
++ = fun
->sec
;
3618 fun
->sec
->gc_mark
= 0;
3619 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3621 *lib_param
->lib_sections
++ = fun
->rodata
;
3622 fun
->rodata
->gc_mark
= 0;
3625 *lib_param
->lib_sections
++ = NULL
;
3628 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3629 collect_lib_sections (call
->fun
, info
, param
);
3634 /* qsort predicate to sort sections by call count. */
3637 sort_lib (const void *a
, const void *b
)
3639 asection
*const *s1
= a
;
3640 asection
*const *s2
= b
;
3641 struct _spu_elf_section_data
*sec_data
;
3642 struct spu_elf_stack_info
*sinfo
;
3646 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3647 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3650 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3651 delta
-= sinfo
->fun
[i
].call_count
;
3654 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3655 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3658 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3659 delta
+= sinfo
->fun
[i
].call_count
;
3668 /* Remove some sections from those marked to be in overlays. Choose
3669 those that are called from many places, likely library functions. */
3672 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3675 asection
**lib_sections
;
3676 unsigned int i
, lib_count
;
3677 struct _cl_param collect_lib_param
;
3678 struct function_info dummy_caller
;
3679 struct spu_link_hash_table
*htab
;
3681 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3683 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3685 extern const bfd_target bfd_elf32_spu_vec
;
3688 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3691 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3692 if (sec
->linker_mark
3693 && sec
->size
< lib_size
3694 && (sec
->flags
& SEC_CODE
) != 0)
3697 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3698 if (lib_sections
== NULL
)
3699 return (unsigned int) -1;
3700 collect_lib_param
.lib_size
= lib_size
;
3701 collect_lib_param
.lib_sections
= lib_sections
;
3702 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3704 return (unsigned int) -1;
3705 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3707 /* Sort sections so that those with the most calls are first. */
3709 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3711 htab
= spu_hash_table (info
);
3712 for (i
= 0; i
< lib_count
; i
++)
3714 unsigned int tmp
, stub_size
;
3716 struct _spu_elf_section_data
*sec_data
;
3717 struct spu_elf_stack_info
*sinfo
;
3719 sec
= lib_sections
[2 * i
];
3720 /* If this section is OK, its size must be less than lib_size. */
3722 /* If it has a rodata section, then add that too. */
3723 if (lib_sections
[2 * i
+ 1])
3724 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3725 /* Add any new overlay call stubs needed by the section. */
3728 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3729 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3732 struct call_info
*call
;
3734 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3735 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3736 if (call
->fun
->sec
->linker_mark
)
3738 struct call_info
*p
;
3739 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3740 if (p
->fun
== call
->fun
)
3743 stub_size
+= ovl_stub_size (htab
->params
);
3746 if (tmp
+ stub_size
< lib_size
)
3748 struct call_info
**pp
, *p
;
3750 /* This section fits. Mark it as non-overlay. */
3751 lib_sections
[2 * i
]->linker_mark
= 0;
3752 if (lib_sections
[2 * i
+ 1])
3753 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3754 lib_size
-= tmp
+ stub_size
;
3755 /* Call stubs to the section we just added are no longer
3757 pp
= &dummy_caller
.call_list
;
3758 while ((p
= *pp
) != NULL
)
3759 if (!p
->fun
->sec
->linker_mark
)
3761 lib_size
+= ovl_stub_size (htab
->params
);
3767 /* Add new call stubs to dummy_caller. */
3768 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3769 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3772 struct call_info
*call
;
3774 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3775 for (call
= sinfo
->fun
[k
].call_list
;
3778 if (call
->fun
->sec
->linker_mark
)
3780 struct call_info
*callee
;
3781 callee
= bfd_malloc (sizeof (*callee
));
3783 return (unsigned int) -1;
3785 if (!insert_callee (&dummy_caller
, callee
))
3791 while (dummy_caller
.call_list
!= NULL
)
3793 struct call_info
*call
= dummy_caller
.call_list
;
3794 dummy_caller
.call_list
= call
->next
;
3797 for (i
= 0; i
< 2 * lib_count
; i
++)
3798 if (lib_sections
[i
])
3799 lib_sections
[i
]->gc_mark
= 1;
3800 free (lib_sections
);
3804 /* Build an array of overlay sections. The deepest node's section is
3805 added first, then its parent node's section, then everything called
3806 from the parent section. The idea being to group sections to
3807 minimise calls between different overlays. */
3810 collect_overlays (struct function_info
*fun
,
3811 struct bfd_link_info
*info
,
3814 struct call_info
*call
;
3815 bfd_boolean added_fun
;
3816 asection
***ovly_sections
= param
;
3822 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3823 if (!call
->is_pasted
)
3825 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3831 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3833 fun
->sec
->gc_mark
= 0;
3834 *(*ovly_sections
)++ = fun
->sec
;
3835 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3837 fun
->rodata
->gc_mark
= 0;
3838 *(*ovly_sections
)++ = fun
->rodata
;
3841 *(*ovly_sections
)++ = NULL
;
3844 /* Pasted sections must stay with the first section. We don't
3845 put pasted sections in the array, just the first section.
3846 Mark subsequent sections as already considered. */
3847 if (fun
->sec
->segment_mark
)
3849 struct function_info
*call_fun
= fun
;
3852 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3853 if (call
->is_pasted
)
3855 call_fun
= call
->fun
;
3856 call_fun
->sec
->gc_mark
= 0;
3857 if (call_fun
->rodata
)
3858 call_fun
->rodata
->gc_mark
= 0;
3864 while (call_fun
->sec
->segment_mark
);
3868 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3869 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3874 struct _spu_elf_section_data
*sec_data
;
3875 struct spu_elf_stack_info
*sinfo
;
3877 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3878 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3881 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3882 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3890 struct _sum_stack_param
{
3892 size_t overall_stack
;
3893 bfd_boolean emit_stack_syms
;
3896 /* Descend the call graph for FUN, accumulating total stack required. */
3899 sum_stack (struct function_info
*fun
,
3900 struct bfd_link_info
*info
,
3903 struct call_info
*call
;
3904 struct function_info
*max
;
3905 size_t stack
, cum_stack
;
3907 bfd_boolean has_call
;
3908 struct _sum_stack_param
*sum_stack_param
= param
;
3909 struct spu_link_hash_table
*htab
;
3911 cum_stack
= fun
->stack
;
3912 sum_stack_param
->cum_stack
= cum_stack
;
3918 for (call
= fun
->call_list
; call
; call
= call
->next
)
3920 if (!call
->is_pasted
)
3922 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3924 stack
= sum_stack_param
->cum_stack
;
3925 /* Include caller stack for normal calls, don't do so for
3926 tail calls. fun->stack here is local stack usage for
3928 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3929 stack
+= fun
->stack
;
3930 if (cum_stack
< stack
)
3937 sum_stack_param
->cum_stack
= cum_stack
;
3939 /* Now fun->stack holds cumulative stack. */
3940 fun
->stack
= cum_stack
;
3944 && sum_stack_param
->overall_stack
< cum_stack
)
3945 sum_stack_param
->overall_stack
= cum_stack
;
3947 htab
= spu_hash_table (info
);
3948 if (htab
->params
->auto_overlay
)
3951 f1
= func_name (fun
);
3952 if (htab
->params
->stack_analysis
)
3955 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3956 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3957 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3961 info
->callbacks
->minfo (_(" calls:\n"));
3962 for (call
= fun
->call_list
; call
; call
= call
->next
)
3963 if (!call
->is_pasted
)
3965 const char *f2
= func_name (call
->fun
);
3966 const char *ann1
= call
->fun
== max
? "*" : " ";
3967 const char *ann2
= call
->is_tail
? "t" : " ";
3969 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3974 if (sum_stack_param
->emit_stack_syms
)
3976 char *name
= bfd_malloc (18 + strlen (f1
));
3977 struct elf_link_hash_entry
*h
;
3982 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3983 sprintf (name
, "__stack_%s", f1
);
3985 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3987 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3990 && (h
->root
.type
== bfd_link_hash_new
3991 || h
->root
.type
== bfd_link_hash_undefined
3992 || h
->root
.type
== bfd_link_hash_undefweak
))
3994 h
->root
.type
= bfd_link_hash_defined
;
3995 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3996 h
->root
.u
.def
.value
= cum_stack
;
4001 h
->ref_regular_nonweak
= 1;
4002 h
->forced_local
= 1;
4010 /* SEC is part of a pasted function. Return the call_info for the
4011 next section of this function. */
4013 static struct call_info
*
4014 find_pasted_call (asection
*sec
)
4016 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4017 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4018 struct call_info
*call
;
4021 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4022 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4023 if (call
->is_pasted
)
4029 /* qsort predicate to sort bfds by file name. */
4032 sort_bfds (const void *a
, const void *b
)
4034 bfd
*const *abfd1
= a
;
4035 bfd
*const *abfd2
= b
;
4037 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4041 print_one_overlay_section (FILE *script
,
4044 unsigned int ovlynum
,
4045 unsigned int *ovly_map
,
4046 asection
**ovly_sections
,
4047 struct bfd_link_info
*info
)
4051 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4053 asection
*sec
= ovly_sections
[2 * j
];
4055 if (fprintf (script
, " %s%c%s (%s)\n",
4056 (sec
->owner
->my_archive
!= NULL
4057 ? sec
->owner
->my_archive
->filename
: ""),
4058 info
->path_separator
,
4059 sec
->owner
->filename
,
4062 if (sec
->segment_mark
)
4064 struct call_info
*call
= find_pasted_call (sec
);
4065 while (call
!= NULL
)
4067 struct function_info
*call_fun
= call
->fun
;
4068 sec
= call_fun
->sec
;
4069 if (fprintf (script
, " %s%c%s (%s)\n",
4070 (sec
->owner
->my_archive
!= NULL
4071 ? sec
->owner
->my_archive
->filename
: ""),
4072 info
->path_separator
,
4073 sec
->owner
->filename
,
4076 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4077 if (call
->is_pasted
)
4083 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4085 asection
*sec
= ovly_sections
[2 * j
+ 1];
4087 && fprintf (script
, " %s%c%s (%s)\n",
4088 (sec
->owner
->my_archive
!= NULL
4089 ? sec
->owner
->my_archive
->filename
: ""),
4090 info
->path_separator
,
4091 sec
->owner
->filename
,
4095 sec
= ovly_sections
[2 * j
];
4096 if (sec
->segment_mark
)
4098 struct call_info
*call
= find_pasted_call (sec
);
4099 while (call
!= NULL
)
4101 struct function_info
*call_fun
= call
->fun
;
4102 sec
= call_fun
->rodata
;
4104 && fprintf (script
, " %s%c%s (%s)\n",
4105 (sec
->owner
->my_archive
!= NULL
4106 ? sec
->owner
->my_archive
->filename
: ""),
4107 info
->path_separator
,
4108 sec
->owner
->filename
,
4111 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4112 if (call
->is_pasted
)
4121 /* Handle --auto-overlay. */
4124 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4128 struct elf_segment_map
*m
;
4129 unsigned int fixed_size
, lo
, hi
;
4130 struct spu_link_hash_table
*htab
;
4131 unsigned int base
, i
, count
, bfd_count
;
4132 unsigned int region
, ovlynum
;
4133 asection
**ovly_sections
, **ovly_p
;
4134 unsigned int *ovly_map
;
4136 unsigned int total_overlay_size
, overlay_size
;
4137 const char *ovly_mgr_entry
;
4138 struct elf_link_hash_entry
*h
;
4139 struct _mos_param mos_param
;
4140 struct _uos_param uos_param
;
4141 struct function_info dummy_caller
;
4143 /* Find the extents of our loadable image. */
4144 lo
= (unsigned int) -1;
4146 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4147 if (m
->p_type
== PT_LOAD
)
4148 for (i
= 0; i
< m
->count
; i
++)
4149 if (m
->sections
[i
]->size
!= 0)
4151 if (m
->sections
[i
]->vma
< lo
)
4152 lo
= m
->sections
[i
]->vma
;
4153 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4154 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4156 fixed_size
= hi
+ 1 - lo
;
4158 if (!discover_functions (info
))
4161 if (!build_call_tree (info
))
4164 htab
= spu_hash_table (info
);
4165 if (htab
->reserved
== 0)
4167 struct _sum_stack_param sum_stack_param
;
4169 sum_stack_param
.emit_stack_syms
= 0;
4170 sum_stack_param
.overall_stack
= 0;
4171 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4173 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
4176 /* No need for overlays if everything already fits. */
4177 if (fixed_size
+ htab
->reserved
<= htab
->local_store
4178 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4180 htab
->params
->auto_overlay
= 0;
4184 uos_param
.exclude_input_section
= 0;
4185 uos_param
.exclude_output_section
4186 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4188 ovly_mgr_entry
= "__ovly_load";
4189 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4190 ovly_mgr_entry
= "__icache_br_handler";
4191 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4192 FALSE
, FALSE
, FALSE
);
4194 && (h
->root
.type
== bfd_link_hash_defined
4195 || h
->root
.type
== bfd_link_hash_defweak
)
4198 /* We have a user supplied overlay manager. */
4199 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4203 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4204 builtin version to .text, and will adjust .text size. */
4205 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4208 /* Mark overlay sections, and find max overlay section size. */
4209 mos_param
.max_overlay_size
= 0;
4210 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4213 /* We can't put the overlay manager or interrupt routines in
4215 uos_param
.clearing
= 0;
4216 if ((uos_param
.exclude_input_section
4217 || uos_param
.exclude_output_section
)
4218 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4222 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4224 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4225 if (bfd_arr
== NULL
)
4228 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4231 total_overlay_size
= 0;
4232 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4234 extern const bfd_target bfd_elf32_spu_vec
;
4236 unsigned int old_count
;
4238 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4242 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4243 if (sec
->linker_mark
)
4245 if ((sec
->flags
& SEC_CODE
) != 0)
4247 fixed_size
-= sec
->size
;
4248 total_overlay_size
+= sec
->size
;
4250 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4251 && sec
->output_section
->owner
== info
->output_bfd
4252 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4253 fixed_size
-= sec
->size
;
4254 if (count
!= old_count
)
4255 bfd_arr
[bfd_count
++] = ibfd
;
4258 /* Since the overlay link script selects sections by file name and
4259 section name, ensure that file names are unique. */
4262 bfd_boolean ok
= TRUE
;
4264 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4265 for (i
= 1; i
< bfd_count
; ++i
)
4266 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4268 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4270 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4271 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4272 bfd_arr
[i
]->filename
,
4273 bfd_arr
[i
]->my_archive
->filename
);
4275 info
->callbacks
->einfo (_("%s duplicated\n"),
4276 bfd_arr
[i
]->filename
);
4282 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4283 "object files in auto-overlay script\n"));
4284 bfd_set_error (bfd_error_bad_value
);
4290 fixed_size
+= htab
->reserved
;
4291 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4292 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4294 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4296 /* Stubs in the non-icache area are bigger. */
4297 fixed_size
+= htab
->non_ovly_stub
* 16;
4298 /* Space for icache manager tables.
4299 a) Tag array, one quadword per cache line.
4300 - word 0: ia address of present line, init to zero. */
4301 fixed_size
+= 16 << htab
->num_lines_log2
;
4302 /* b) Rewrite "to" list, one quadword per cache line. */
4303 fixed_size
+= 16 << htab
->num_lines_log2
;
4304 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4305 to a power-of-two number of full quadwords) per cache line. */
4306 fixed_size
+= 16 << (htab
->fromelem_size_log2
4307 + htab
->num_lines_log2
);
4308 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4313 /* Guess number of overlays. Assuming overlay buffer is on
4314 average only half full should be conservative. */
4315 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4316 / (htab
->local_store
- fixed_size
));
4317 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4318 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4322 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4323 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4324 "size of 0x%v exceeds local store\n"),
4325 (bfd_vma
) fixed_size
,
4326 (bfd_vma
) mos_param
.max_overlay_size
);
4328 /* Now see if we should put some functions in the non-overlay area. */
4329 else if (fixed_size
< htab
->overlay_fixed
)
4331 unsigned int max_fixed
, lib_size
;
4333 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4334 if (max_fixed
> htab
->overlay_fixed
)
4335 max_fixed
= htab
->overlay_fixed
;
4336 lib_size
= max_fixed
- fixed_size
;
4337 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4338 if (lib_size
== (unsigned int) -1)
4340 fixed_size
= max_fixed
- lib_size
;
4343 /* Build an array of sections, suitably sorted to place into
4345 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4346 if (ovly_sections
== NULL
)
4348 ovly_p
= ovly_sections
;
4349 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4351 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4352 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4353 if (ovly_map
== NULL
)
4356 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4357 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4358 if (htab
->params
->line_size
!= 0)
4359 overlay_size
= htab
->params
->line_size
;
4362 while (base
< count
)
4364 unsigned int size
= 0;
4366 for (i
= base
; i
< count
; i
++)
4370 unsigned int num_stubs
;
4371 struct call_info
*call
, *pasty
;
4372 struct _spu_elf_section_data
*sec_data
;
4373 struct spu_elf_stack_info
*sinfo
;
4376 /* See whether we can add this section to the current
4377 overlay without overflowing our overlay buffer. */
4378 sec
= ovly_sections
[2 * i
];
4379 tmp
= size
+ sec
->size
;
4380 if (ovly_sections
[2 * i
+ 1])
4381 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
4382 if (tmp
> overlay_size
)
4384 if (sec
->segment_mark
)
4386 /* Pasted sections must stay together, so add their
4388 struct call_info
*pasty
= find_pasted_call (sec
);
4389 while (pasty
!= NULL
)
4391 struct function_info
*call_fun
= pasty
->fun
;
4392 tmp
+= call_fun
->sec
->size
;
4393 if (call_fun
->rodata
)
4394 tmp
+= call_fun
->rodata
->size
;
4395 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4396 if (pasty
->is_pasted
)
4400 if (tmp
> overlay_size
)
4403 /* If we add this section, we might need new overlay call
4404 stubs. Add any overlay section calls to dummy_call. */
4406 sec_data
= spu_elf_section_data (sec
);
4407 sinfo
= sec_data
->u
.i
.stack_info
;
4408 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4409 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4410 if (call
->is_pasted
)
4412 BFD_ASSERT (pasty
== NULL
);
4415 else if (call
->fun
->sec
->linker_mark
)
4417 if (!copy_callee (&dummy_caller
, call
))
4420 while (pasty
!= NULL
)
4422 struct function_info
*call_fun
= pasty
->fun
;
4424 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4425 if (call
->is_pasted
)
4427 BFD_ASSERT (pasty
== NULL
);
4430 else if (!copy_callee (&dummy_caller
, call
))
4434 /* Calculate call stub size. */
4436 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4441 /* If the call is within this overlay, we won't need a
4443 for (k
= base
; k
< i
+ 1; k
++)
4444 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4450 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4451 && num_stubs
> htab
->params
->max_branch
)
4453 if (tmp
+ num_stubs
* ovl_stub_size (htab
->params
)
4461 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4462 ovly_sections
[2 * i
]->owner
,
4463 ovly_sections
[2 * i
],
4464 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4465 bfd_set_error (bfd_error_bad_value
);
4469 while (dummy_caller
.call_list
!= NULL
)
4471 struct call_info
*call
= dummy_caller
.call_list
;
4472 dummy_caller
.call_list
= call
->next
;
4478 ovly_map
[base
++] = ovlynum
;
4481 script
= htab
->params
->spu_elf_open_overlay_script ();
4483 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4486 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4488 if (fprintf (script
,
4489 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4490 " . = ALIGN (%u);\n"
4491 " .ovl.init : { *(.ovl.init) }\n"
4492 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4493 htab
->params
->line_size
) <= 0)
4498 while (base
< count
)
4500 unsigned int indx
= ovlynum
- 1;
4501 unsigned int vma
, lma
;
4503 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4504 lma
= indx
<< htab
->line_size_log2
;
4506 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4507 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4508 ovlynum
, vma
, lma
) <= 0)
4511 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4512 ovly_map
, ovly_sections
, info
);
4513 if (base
== (unsigned) -1)
4516 if (fprintf (script
, " }\n") <= 0)
4522 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4523 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4528 if (fprintf (script
,
4529 " . = ALIGN (16);\n"
4530 " .ovl.init : { *(.ovl.init) }\n"
4531 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4534 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4538 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4546 /* We need to set lma since we are overlaying .ovl.init. */
4547 if (fprintf (script
,
4548 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4553 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4557 while (base
< count
)
4559 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4562 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4563 ovly_map
, ovly_sections
, info
);
4564 if (base
== (unsigned) -1)
4567 if (fprintf (script
, " }\n") <= 0)
4570 ovlynum
+= htab
->params
->num_lines
;
4571 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4575 if (fprintf (script
, " }\n") <= 0)
4582 free (ovly_sections
);
4584 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4586 if (fclose (script
) != 0)
4589 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4590 (*htab
->params
->spu_elf_relink
) ();
4595 bfd_set_error (bfd_error_system_call
);
4597 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4601 /* Provide an estimate of total stack required. */
4604 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4606 struct spu_link_hash_table
*htab
;
4607 struct _sum_stack_param sum_stack_param
;
4609 if (!discover_functions (info
))
4612 if (!build_call_tree (info
))
4615 htab
= spu_hash_table (info
);
4616 if (htab
->params
->stack_analysis
)
4618 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4619 info
->callbacks
->minfo (_("\nStack size for functions. "
4620 "Annotations: '*' max stack, 't' tail call\n"));
4623 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4624 sum_stack_param
.overall_stack
= 0;
4625 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4628 if (htab
->params
->stack_analysis
)
4629 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4630 (bfd_vma
) sum_stack_param
.overall_stack
);
4634 /* Perform a final link. */
4637 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4639 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4641 if (htab
->params
->auto_overlay
)
4642 spu_elf_auto_overlay (info
);
4644 if ((htab
->params
->stack_analysis
4645 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4646 && htab
->params
->lrlive_analysis
))
4647 && !spu_elf_stack_analysis (info
))
4648 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4650 if (!spu_elf_build_stubs (info
))
4651 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4653 return bfd_elf_final_link (output_bfd
, info
);
4656 /* Called when not normally emitting relocs, ie. !info->relocatable
4657 and !info->emitrelocations. Returns a count of special relocs
4658 that need to be emitted. */
4661 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4663 Elf_Internal_Rela
*relocs
;
4664 unsigned int count
= 0;
4666 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4670 Elf_Internal_Rela
*rel
;
4671 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4673 for (rel
= relocs
; rel
< relend
; rel
++)
4675 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4676 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4680 if (elf_section_data (sec
)->relocs
!= relocs
)
4687 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4690 spu_elf_relocate_section (bfd
*output_bfd
,
4691 struct bfd_link_info
*info
,
4693 asection
*input_section
,
4695 Elf_Internal_Rela
*relocs
,
4696 Elf_Internal_Sym
*local_syms
,
4697 asection
**local_sections
)
4699 Elf_Internal_Shdr
*symtab_hdr
;
4700 struct elf_link_hash_entry
**sym_hashes
;
4701 Elf_Internal_Rela
*rel
, *relend
;
4702 struct spu_link_hash_table
*htab
;
4705 bfd_boolean emit_these_relocs
= FALSE
;
4706 bfd_boolean is_ea_sym
;
4708 unsigned int iovl
= 0;
4710 htab
= spu_hash_table (info
);
4711 stubs
= (htab
->stub_sec
!= NULL
4712 && maybe_needs_stubs (input_section
));
4713 iovl
= overlay_index (input_section
);
4714 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4715 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4716 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4719 relend
= relocs
+ input_section
->reloc_count
;
4720 for (; rel
< relend
; rel
++)
4723 reloc_howto_type
*howto
;
4724 unsigned int r_symndx
;
4725 Elf_Internal_Sym
*sym
;
4727 struct elf_link_hash_entry
*h
;
4728 const char *sym_name
;
4731 bfd_reloc_status_type r
;
4732 bfd_boolean unresolved_reloc
;
4734 enum _stub_type stub_type
;
4736 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4737 r_type
= ELF32_R_TYPE (rel
->r_info
);
4738 howto
= elf_howto_table
+ r_type
;
4739 unresolved_reloc
= FALSE
;
4744 if (r_symndx
< symtab_hdr
->sh_info
)
4746 sym
= local_syms
+ r_symndx
;
4747 sec
= local_sections
[r_symndx
];
4748 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4749 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4753 if (sym_hashes
== NULL
)
4756 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4758 while (h
->root
.type
== bfd_link_hash_indirect
4759 || h
->root
.type
== bfd_link_hash_warning
)
4760 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4763 if (h
->root
.type
== bfd_link_hash_defined
4764 || h
->root
.type
== bfd_link_hash_defweak
)
4766 sec
= h
->root
.u
.def
.section
;
4768 || sec
->output_section
== NULL
)
4769 /* Set a flag that will be cleared later if we find a
4770 relocation value for this symbol. output_section
4771 is typically NULL for symbols satisfied by a shared
4773 unresolved_reloc
= TRUE
;
4775 relocation
= (h
->root
.u
.def
.value
4776 + sec
->output_section
->vma
4777 + sec
->output_offset
);
4779 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4781 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4782 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4784 else if (!info
->relocatable
4785 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4788 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4789 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4790 if (!info
->callbacks
->undefined_symbol (info
,
4791 h
->root
.root
.string
,
4794 rel
->r_offset
, err
))
4798 sym_name
= h
->root
.root
.string
;
4801 if (sec
!= NULL
&& elf_discarded_section (sec
))
4803 /* For relocs against symbols from removed linkonce sections,
4804 or sections discarded by a linker script, we just want the
4805 section contents zeroed. Avoid any special processing. */
4806 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4812 if (info
->relocatable
)
4815 is_ea_sym
= (ea
!= NULL
4817 && sec
->output_section
== ea
);
4819 /* If this symbol is in an overlay area, we may need to relocate
4820 to the overlay stub. */
4821 addend
= rel
->r_addend
;
4824 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4825 contents
, info
)) != no_stub
)
4827 unsigned int ovl
= 0;
4828 struct got_entry
*g
, **head
;
4830 if (stub_type
!= nonovl_stub
)
4834 head
= &h
->got
.glist
;
4836 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4838 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4839 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4840 ? g
->br_addr
== (rel
->r_offset
4841 + input_section
->output_offset
4842 + input_section
->output_section
->vma
)
4843 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4848 relocation
= g
->stub_addr
;
4853 /* For soft icache, encode the overlay index into addresses. */
4854 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4855 && (r_type
== R_SPU_ADDR16_HI
4856 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4859 unsigned int ovl
= overlay_index (sec
);
4862 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4863 relocation
+= set_id
<< 18;
4868 if (unresolved_reloc
)
4870 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4874 /* ._ea is a special section that isn't allocated in SPU
4875 memory, but rather occupies space in PPU memory as
4876 part of an embedded ELF image. If this reloc is
4877 against a symbol defined in ._ea, then transform the
4878 reloc into an equivalent one without a symbol
4879 relative to the start of the ELF image. */
4880 rel
->r_addend
+= (relocation
4882 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4883 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4885 emit_these_relocs
= TRUE
;
4889 unresolved_reloc
= TRUE
;
4891 if (unresolved_reloc
)
4893 (*_bfd_error_handler
)
4894 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4896 bfd_get_section_name (input_bfd
, input_section
),
4897 (long) rel
->r_offset
,
4903 r
= _bfd_final_link_relocate (howto
,
4907 rel
->r_offset
, relocation
, addend
);
4909 if (r
!= bfd_reloc_ok
)
4911 const char *msg
= (const char *) 0;
4915 case bfd_reloc_overflow
:
4916 if (!((*info
->callbacks
->reloc_overflow
)
4917 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4918 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4922 case bfd_reloc_undefined
:
4923 if (!((*info
->callbacks
->undefined_symbol
)
4924 (info
, sym_name
, input_bfd
, input_section
,
4925 rel
->r_offset
, TRUE
)))
4929 case bfd_reloc_outofrange
:
4930 msg
= _("internal error: out of range error");
4933 case bfd_reloc_notsupported
:
4934 msg
= _("internal error: unsupported relocation error");
4937 case bfd_reloc_dangerous
:
4938 msg
= _("internal error: dangerous error");
4942 msg
= _("internal error: unknown error");
4947 if (!((*info
->callbacks
->warning
)
4948 (info
, msg
, sym_name
, input_bfd
, input_section
,
4957 && emit_these_relocs
4958 && !info
->emitrelocations
)
4960 Elf_Internal_Rela
*wrel
;
4961 Elf_Internal_Shdr
*rel_hdr
;
4963 wrel
= rel
= relocs
;
4964 relend
= relocs
+ input_section
->reloc_count
;
4965 for (; rel
< relend
; rel
++)
4969 r_type
= ELF32_R_TYPE (rel
->r_info
);
4970 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4973 input_section
->reloc_count
= wrel
- relocs
;
4974 /* Backflips for _bfd_elf_link_output_relocs. */
4975 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4976 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4983 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4986 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4987 const char *sym_name ATTRIBUTE_UNUSED
,
4988 Elf_Internal_Sym
*sym
,
4989 asection
*sym_sec ATTRIBUTE_UNUSED
,
4990 struct elf_link_hash_entry
*h
)
4992 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4994 if (!info
->relocatable
4995 && htab
->stub_sec
!= NULL
4997 && (h
->root
.type
== bfd_link_hash_defined
4998 || h
->root
.type
== bfd_link_hash_defweak
)
5000 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5002 struct got_entry
*g
;
5004 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5005 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5006 ? g
->br_addr
== g
->stub_addr
5007 : g
->addend
== 0 && g
->ovl
== 0)
5009 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5010 (htab
->stub_sec
[0]->output_section
->owner
,
5011 htab
->stub_sec
[0]->output_section
));
5012 sym
->st_value
= g
->stub_addr
;
5020 static int spu_plugin
= 0;
5023 spu_elf_plugin (int val
)
5028 /* Set ELF header e_type for plugins. */
5031 spu_elf_post_process_headers (bfd
*abfd
,
5032 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5036 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5038 i_ehdrp
->e_type
= ET_DYN
;
5042 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5043 segments for overlays. */
5046 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5053 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5054 extra
= htab
->num_overlays
;
5060 sec
= bfd_get_section_by_name (abfd
, ".toe");
5061 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5067 /* Remove .toe section from other PT_LOAD segments and put it in
5068 a segment of its own. Put overlays in separate segments too. */
5071 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5074 struct elf_segment_map
*m
, *m_overlay
;
5075 struct elf_segment_map
**p
, **p_overlay
;
5081 toe
= bfd_get_section_by_name (abfd
, ".toe");
5082 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5083 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5084 for (i
= 0; i
< m
->count
; i
++)
5085 if ((s
= m
->sections
[i
]) == toe
5086 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5088 struct elf_segment_map
*m2
;
5091 if (i
+ 1 < m
->count
)
5093 amt
= sizeof (struct elf_segment_map
);
5094 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5095 m2
= bfd_zalloc (abfd
, amt
);
5098 m2
->count
= m
->count
- (i
+ 1);
5099 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5100 m2
->count
* sizeof (m
->sections
[0]));
5101 m2
->p_type
= PT_LOAD
;
5109 amt
= sizeof (struct elf_segment_map
);
5110 m2
= bfd_zalloc (abfd
, amt
);
5113 m2
->p_type
= PT_LOAD
;
5115 m2
->sections
[0] = s
;
5123 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5124 PT_LOAD segments. This can cause the .ovl.init section to be
5125 overwritten with the contents of some overlay segment. To work
5126 around this issue, we ensure that all PF_OVERLAY segments are
5127 sorted first amongst the program headers; this ensures that even
5128 with a broken loader, the .ovl.init section (which is not marked
5129 as PF_OVERLAY) will be placed into SPU local store on startup. */
5131 /* Move all overlay segments onto a separate list. */
5132 p
= &elf_tdata (abfd
)->segment_map
;
5133 p_overlay
= &m_overlay
;
5136 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5137 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5139 struct elf_segment_map
*m
= *p
;
5142 p_overlay
= &m
->next
;
5149 /* Re-insert overlay segments at the head of the segment map. */
5150 *p_overlay
= elf_tdata (abfd
)->segment_map
;
5151 elf_tdata (abfd
)->segment_map
= m_overlay
;
5156 /* Tweak the section type of .note.spu_name. */
5159 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5160 Elf_Internal_Shdr
*hdr
,
5163 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5164 hdr
->sh_type
= SHT_NOTE
;
5168 /* Tweak phdrs before writing them out. */
5171 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5173 const struct elf_backend_data
*bed
;
5174 struct elf_obj_tdata
*tdata
;
5175 Elf_Internal_Phdr
*phdr
, *last
;
5176 struct spu_link_hash_table
*htab
;
5183 bed
= get_elf_backend_data (abfd
);
5184 tdata
= elf_tdata (abfd
);
5186 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5187 htab
= spu_hash_table (info
);
5188 if (htab
->num_overlays
!= 0)
5190 struct elf_segment_map
*m
;
5193 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5195 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5197 /* Mark this as an overlay header. */
5198 phdr
[i
].p_flags
|= PF_OVERLAY
;
5200 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5201 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5203 bfd_byte
*p
= htab
->ovtab
->contents
;
5204 unsigned int off
= o
* 16 + 8;
5206 /* Write file_off into _ovly_table. */
5207 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5210 /* Soft-icache has its file offset put in .ovl.init. */
5211 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5213 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5215 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5219 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5220 of 16. This should always be possible when using the standard
5221 linker scripts, but don't create overlapping segments if
5222 someone is playing games with linker scripts. */
5224 for (i
= count
; i
-- != 0; )
5225 if (phdr
[i
].p_type
== PT_LOAD
)
5229 adjust
= -phdr
[i
].p_filesz
& 15;
5232 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5235 adjust
= -phdr
[i
].p_memsz
& 15;
5238 && phdr
[i
].p_filesz
!= 0
5239 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5240 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5243 if (phdr
[i
].p_filesz
!= 0)
5247 if (i
== (unsigned int) -1)
5248 for (i
= count
; i
-- != 0; )
5249 if (phdr
[i
].p_type
== PT_LOAD
)
5253 adjust
= -phdr
[i
].p_filesz
& 15;
5254 phdr
[i
].p_filesz
+= adjust
;
5256 adjust
= -phdr
[i
].p_memsz
& 15;
5257 phdr
[i
].p_memsz
+= adjust
;
5263 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5264 #define TARGET_BIG_NAME "elf32-spu"
5265 #define ELF_ARCH bfd_arch_spu
5266 #define ELF_MACHINE_CODE EM_SPU
5267 /* This matches the alignment need for DMA. */
5268 #define ELF_MAXPAGESIZE 0x80
5269 #define elf_backend_rela_normal 1
5270 #define elf_backend_can_gc_sections 1
5272 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5273 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5274 #define elf_info_to_howto spu_elf_info_to_howto
5275 #define elf_backend_count_relocs spu_elf_count_relocs
5276 #define elf_backend_relocate_section spu_elf_relocate_section
5277 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5278 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5279 #define elf_backend_object_p spu_elf_object_p
5280 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5281 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5283 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5284 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5285 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5286 #define elf_backend_post_process_headers spu_elf_post_process_headers
5287 #define elf_backend_fake_sections spu_elf_fake_sections
5288 #define elf_backend_special_sections spu_elf_special_sections
5289 #define bfd_elf32_bfd_final_link spu_elf_final_link
5291 #include "elf32-target.h"