1 /* SPU specific support for 32-bit ELF
3 Copyright (C) 2006-2015 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
91 HOWTO (R_SPU_ADD_PIC
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
92 bfd_elf_generic_reloc
, "SPU_ADD_PIC",
93 FALSE
, 0, 0x00000000, FALSE
),
96 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
97 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
98 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
109 case BFD_RELOC_SPU_IMM10W
:
111 case BFD_RELOC_SPU_IMM16W
:
113 case BFD_RELOC_SPU_LO16
:
114 return R_SPU_ADDR16_LO
;
115 case BFD_RELOC_SPU_HI16
:
116 return R_SPU_ADDR16_HI
;
117 case BFD_RELOC_SPU_IMM18
:
119 case BFD_RELOC_SPU_PCREL16
:
121 case BFD_RELOC_SPU_IMM7
:
123 case BFD_RELOC_SPU_IMM8
:
125 case BFD_RELOC_SPU_PCREL9a
:
127 case BFD_RELOC_SPU_PCREL9b
:
129 case BFD_RELOC_SPU_IMM10
:
130 return R_SPU_ADDR10I
;
131 case BFD_RELOC_SPU_IMM16
:
132 return R_SPU_ADDR16I
;
135 case BFD_RELOC_32_PCREL
:
137 case BFD_RELOC_SPU_PPU32
:
139 case BFD_RELOC_SPU_PPU64
:
141 case BFD_RELOC_SPU_ADD_PIC
:
142 return R_SPU_ADD_PIC
;
147 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
149 Elf_Internal_Rela
*dst
)
151 enum elf_spu_reloc_type r_type
;
153 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
154 /* PR 17512: file: 90c2a92e. */
155 if (r_type
>= R_SPU_max
)
157 (*_bfd_error_handler
) (_("%A: unrecognised SPU reloc number: %d"),
159 bfd_set_error (bfd_error_bad_value
);
162 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
165 static reloc_howto_type
*
166 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
167 bfd_reloc_code_real_type code
)
169 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
171 if (r_type
== R_SPU_NONE
)
174 return elf_howto_table
+ r_type
;
177 static reloc_howto_type
*
178 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
183 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
184 if (elf_howto_table
[i
].name
!= NULL
185 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
186 return &elf_howto_table
[i
];
191 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
193 static bfd_reloc_status_type
194 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
195 void *data
, asection
*input_section
,
196 bfd
*output_bfd
, char **error_message
)
198 bfd_size_type octets
;
202 /* If this is a relocatable link (output_bfd test tells us), just
203 call the generic function. Any adjustment will be done at final
205 if (output_bfd
!= NULL
)
206 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
207 input_section
, output_bfd
, error_message
);
209 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
210 return bfd_reloc_outofrange
;
211 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
213 /* Get symbol value. */
215 if (!bfd_is_com_section (symbol
->section
))
217 if (symbol
->section
->output_section
)
218 val
+= symbol
->section
->output_section
->vma
;
220 val
+= reloc_entry
->addend
;
222 /* Make it pc-relative. */
223 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
226 if (val
+ 256 >= 512)
227 return bfd_reloc_overflow
;
229 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
231 /* Move two high bits of value to REL9I and REL9 position.
232 The mask will take care of selecting the right field. */
233 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
234 insn
&= ~reloc_entry
->howto
->dst_mask
;
235 insn
|= val
& reloc_entry
->howto
->dst_mask
;
236 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
241 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
243 if (!sec
->used_by_bfd
)
245 struct _spu_elf_section_data
*sdata
;
247 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
250 sec
->used_by_bfd
= sdata
;
253 return _bfd_elf_new_section_hook (abfd
, sec
);
256 /* Set up overlay info for executables. */
259 spu_elf_object_p (bfd
*abfd
)
261 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
263 unsigned int i
, num_ovl
, num_buf
;
264 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
265 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
266 Elf_Internal_Phdr
*last_phdr
= NULL
;
268 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
269 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
274 if (last_phdr
== NULL
275 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
278 for (j
= 1; j
< elf_numsections (abfd
); j
++)
280 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
282 if (ELF_SECTION_SIZE (shdr
, phdr
) != 0
283 && ELF_SECTION_IN_SEGMENT (shdr
, phdr
))
285 asection
*sec
= shdr
->bfd_section
;
286 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
287 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
295 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
296 strip --strip-unneeded will not remove them. */
299 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
301 if (sym
->name
!= NULL
302 && sym
->section
!= bfd_abs_section_ptr
303 && strncmp (sym
->name
, "_EAR_", 5) == 0)
304 sym
->flags
|= BSF_KEEP
;
307 /* SPU ELF linker hash table. */
309 struct spu_link_hash_table
311 struct elf_link_hash_table elf
;
313 struct spu_elf_params
*params
;
315 /* Shortcuts to overlay sections. */
321 /* Count of stubs in each overlay section. */
322 unsigned int *stub_count
;
324 /* The stub section for each overlay section. */
327 struct elf_link_hash_entry
*ovly_entry
[2];
329 /* Number of overlay buffers. */
330 unsigned int num_buf
;
332 /* Total number of overlays. */
333 unsigned int num_overlays
;
335 /* For soft icache. */
336 unsigned int line_size_log2
;
337 unsigned int num_lines_log2
;
338 unsigned int fromelem_size_log2
;
340 /* How much memory we have. */
341 unsigned int local_store
;
343 /* Count of overlay stubs needed in non-overlay area. */
344 unsigned int non_ovly_stub
;
346 /* Pointer to the fixup section */
350 unsigned int stub_err
: 1;
353 /* Hijack the generic got fields for overlay stub accounting. */
357 struct got_entry
*next
;
366 #define spu_hash_table(p) \
367 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
368 == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
372 struct function_info
*fun
;
373 struct call_info
*next
;
375 unsigned int max_depth
;
376 unsigned int is_tail
: 1;
377 unsigned int is_pasted
: 1;
378 unsigned int broken_cycle
: 1;
379 unsigned int priority
: 13;
384 /* List of functions called. Also branches to hot/cold part of
386 struct call_info
*call_list
;
387 /* For hot/cold part of function, point to owner. */
388 struct function_info
*start
;
389 /* Symbol at start of function. */
391 Elf_Internal_Sym
*sym
;
392 struct elf_link_hash_entry
*h
;
394 /* Function section. */
397 /* Where last called from, and number of sections called from. */
398 asection
*last_caller
;
399 unsigned int call_count
;
400 /* Address range of (this part of) function. */
402 /* Offset where we found a store of lr, or -1 if none found. */
404 /* Offset where we found the stack adjustment insn. */
408 /* Distance from root of call tree. Tail and hot/cold branches
409 count as one deeper. We aren't counting stack frames here. */
411 /* Set if global symbol. */
412 unsigned int global
: 1;
413 /* Set if known to be start of function (as distinct from a hunk
414 in hot/cold section. */
415 unsigned int is_func
: 1;
416 /* Set if not a root node. */
417 unsigned int non_root
: 1;
418 /* Flags used during call tree traversal. It's cheaper to replicate
419 the visit flags than have one which needs clearing after a traversal. */
420 unsigned int visit1
: 1;
421 unsigned int visit2
: 1;
422 unsigned int marking
: 1;
423 unsigned int visit3
: 1;
424 unsigned int visit4
: 1;
425 unsigned int visit5
: 1;
426 unsigned int visit6
: 1;
427 unsigned int visit7
: 1;
430 struct spu_elf_stack_info
434 /* Variable size array describing functions, one per contiguous
435 address range belonging to a function. */
436 struct function_info fun
[1];
439 static struct function_info
*find_function (asection
*, bfd_vma
,
440 struct bfd_link_info
*);
442 /* Create a spu ELF linker hash table. */
444 static struct bfd_link_hash_table
*
445 spu_elf_link_hash_table_create (bfd
*abfd
)
447 struct spu_link_hash_table
*htab
;
449 htab
= bfd_zmalloc (sizeof (*htab
));
453 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
454 _bfd_elf_link_hash_newfunc
,
455 sizeof (struct elf_link_hash_entry
),
462 htab
->elf
.init_got_refcount
.refcount
= 0;
463 htab
->elf
.init_got_refcount
.glist
= NULL
;
464 htab
->elf
.init_got_offset
.offset
= 0;
465 htab
->elf
.init_got_offset
.glist
= NULL
;
466 return &htab
->elf
.root
;
470 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
472 bfd_vma max_branch_log2
;
474 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
475 htab
->params
= params
;
476 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
477 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
479 /* For the software i-cache, we provide a "from" list whose size
480 is a power-of-two number of quadwords, big enough to hold one
481 byte per outgoing branch. Compute this number here. */
482 max_branch_log2
= bfd_log2 (htab
->params
->max_branch
);
483 htab
->fromelem_size_log2
= max_branch_log2
> 4 ? max_branch_log2
- 4 : 0;
486 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
487 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
488 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
491 get_sym_h (struct elf_link_hash_entry
**hp
,
492 Elf_Internal_Sym
**symp
,
494 Elf_Internal_Sym
**locsymsp
,
495 unsigned long r_symndx
,
498 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
500 if (r_symndx
>= symtab_hdr
->sh_info
)
502 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
503 struct elf_link_hash_entry
*h
;
505 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
506 while (h
->root
.type
== bfd_link_hash_indirect
507 || h
->root
.type
== bfd_link_hash_warning
)
508 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
518 asection
*symsec
= NULL
;
519 if (h
->root
.type
== bfd_link_hash_defined
520 || h
->root
.type
== bfd_link_hash_defweak
)
521 symsec
= h
->root
.u
.def
.section
;
527 Elf_Internal_Sym
*sym
;
528 Elf_Internal_Sym
*locsyms
= *locsymsp
;
532 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
534 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
536 0, NULL
, NULL
, NULL
);
541 sym
= locsyms
+ r_symndx
;
550 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
556 /* Create the note section if not already present. This is done early so
557 that the linker maps the sections to the right place in the output. */
560 spu_elf_create_sections (struct bfd_link_info
*info
)
562 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
565 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
566 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
571 /* Make SPU_PTNOTE_SPUNAME section. */
578 ibfd
= info
->input_bfds
;
579 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
580 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
582 || !bfd_set_section_alignment (ibfd
, s
, 4))
585 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
586 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
587 size
+= (name_len
+ 3) & -4;
589 if (!bfd_set_section_size (ibfd
, s
, size
))
592 data
= bfd_zalloc (ibfd
, size
);
596 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
597 bfd_put_32 (ibfd
, name_len
, data
+ 4);
598 bfd_put_32 (ibfd
, 1, data
+ 8);
599 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
600 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
601 bfd_get_filename (info
->output_bfd
), name_len
);
605 if (htab
->params
->emit_fixups
)
610 if (htab
->elf
.dynobj
== NULL
)
611 htab
->elf
.dynobj
= ibfd
;
612 ibfd
= htab
->elf
.dynobj
;
613 flags
= (SEC_LOAD
| SEC_ALLOC
| SEC_READONLY
| SEC_HAS_CONTENTS
614 | SEC_IN_MEMORY
| SEC_LINKER_CREATED
);
615 s
= bfd_make_section_anyway_with_flags (ibfd
, ".fixup", flags
);
616 if (s
== NULL
|| !bfd_set_section_alignment (ibfd
, s
, 2))
624 /* qsort predicate to sort sections by vma. */
627 sort_sections (const void *a
, const void *b
)
629 const asection
*const *s1
= a
;
630 const asection
*const *s2
= b
;
631 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
634 return delta
< 0 ? -1 : 1;
636 return (*s1
)->index
- (*s2
)->index
;
639 /* Identify overlays in the output bfd, and number them.
640 Returns 0 on error, 1 if no overlays, 2 if overlays. */
643 spu_elf_find_overlays (struct bfd_link_info
*info
)
645 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
646 asection
**alloc_sec
;
647 unsigned int i
, n
, ovl_index
, num_buf
;
650 static const char *const entry_names
[2][2] = {
651 { "__ovly_load", "__icache_br_handler" },
652 { "__ovly_return", "__icache_call_handler" }
655 if (info
->output_bfd
->section_count
< 2)
659 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
660 if (alloc_sec
== NULL
)
663 /* Pick out all the alloced sections. */
664 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
665 if ((s
->flags
& SEC_ALLOC
) != 0
666 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
676 /* Sort them by vma. */
677 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
679 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
680 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
682 unsigned int prev_buf
= 0, set_id
= 0;
684 /* Look for an overlapping vma to find the first overlay section. */
685 bfd_vma vma_start
= 0;
687 for (i
= 1; i
< n
; i
++)
690 if (s
->vma
< ovl_end
)
692 asection
*s0
= alloc_sec
[i
- 1];
696 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
701 ovl_end
= s
->vma
+ s
->size
;
704 /* Now find any sections within the cache area. */
705 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
708 if (s
->vma
>= ovl_end
)
711 /* A section in an overlay area called .ovl.init is not
712 an overlay, in the sense that it might be loaded in
713 by the overlay manager, but rather the initial
714 section contents for the overlay buffer. */
715 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
717 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
718 set_id
= (num_buf
== prev_buf
)? set_id
+ 1 : 0;
721 if ((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
723 info
->callbacks
->einfo (_("%X%P: overlay section %A "
724 "does not start on a cache line.\n"),
726 bfd_set_error (bfd_error_bad_value
);
729 else if (s
->size
> htab
->params
->line_size
)
731 info
->callbacks
->einfo (_("%X%P: overlay section %A "
732 "is larger than a cache line.\n"),
734 bfd_set_error (bfd_error_bad_value
);
738 alloc_sec
[ovl_index
++] = s
;
739 spu_elf_section_data (s
)->u
.o
.ovl_index
740 = (set_id
<< htab
->num_lines_log2
) + num_buf
;
741 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
745 /* Ensure there are no more overlay sections. */
749 if (s
->vma
< ovl_end
)
751 info
->callbacks
->einfo (_("%X%P: overlay section %A "
752 "is not in cache area.\n"),
754 bfd_set_error (bfd_error_bad_value
);
758 ovl_end
= s
->vma
+ s
->size
;
763 /* Look for overlapping vmas. Any with overlap must be overlays.
764 Count them. Also count the number of overlay regions. */
765 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
768 if (s
->vma
< ovl_end
)
770 asection
*s0
= alloc_sec
[i
- 1];
772 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
775 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
777 alloc_sec
[ovl_index
] = s0
;
778 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
779 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
782 ovl_end
= s
->vma
+ s
->size
;
784 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
786 alloc_sec
[ovl_index
] = s
;
787 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
788 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
789 if (s0
->vma
!= s
->vma
)
791 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
792 "and %A do not start at the "
795 bfd_set_error (bfd_error_bad_value
);
798 if (ovl_end
< s
->vma
+ s
->size
)
799 ovl_end
= s
->vma
+ s
->size
;
803 ovl_end
= s
->vma
+ s
->size
;
807 htab
->num_overlays
= ovl_index
;
808 htab
->num_buf
= num_buf
;
809 htab
->ovl_sec
= alloc_sec
;
814 for (i
= 0; i
< 2; i
++)
817 struct elf_link_hash_entry
*h
;
819 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
820 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
824 if (h
->root
.type
== bfd_link_hash_new
)
826 h
->root
.type
= bfd_link_hash_undefined
;
828 h
->ref_regular_nonweak
= 1;
831 htab
->ovly_entry
[i
] = h
;
837 /* Non-zero to use bra in overlay stubs rather than br. */
840 #define BRA 0x30000000
841 #define BRASL 0x31000000
842 #define BR 0x32000000
843 #define BRSL 0x33000000
844 #define NOP 0x40200000
845 #define LNOP 0x00200000
846 #define ILA 0x42000000
848 /* Return true for all relative and absolute branch instructions.
856 brhnz 00100011 0.. */
859 is_branch (const unsigned char *insn
)
861 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
864 /* Return true for all indirect branch instructions.
872 bihnz 00100101 011 */
875 is_indirect_branch (const unsigned char *insn
)
877 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
880 /* Return true for branch hint instructions.
885 is_hint (const unsigned char *insn
)
887 return (insn
[0] & 0xfc) == 0x10;
890 /* True if INPUT_SECTION might need overlay stubs. */
893 maybe_needs_stubs (asection
*input_section
)
895 /* No stubs for debug sections and suchlike. */
896 if ((input_section
->flags
& SEC_ALLOC
) == 0)
899 /* No stubs for link-once sections that will be discarded. */
900 if (input_section
->output_section
== bfd_abs_section_ptr
)
903 /* Don't create stubs for .eh_frame references. */
904 if (strcmp (input_section
->name
, ".eh_frame") == 0)
926 /* Return non-zero if this reloc symbol should go via an overlay stub.
927 Return 2 if the stub must be in non-overlay area. */
929 static enum _stub_type
930 needs_ovl_stub (struct elf_link_hash_entry
*h
,
931 Elf_Internal_Sym
*sym
,
933 asection
*input_section
,
934 Elf_Internal_Rela
*irela
,
936 struct bfd_link_info
*info
)
938 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
939 enum elf_spu_reloc_type r_type
;
940 unsigned int sym_type
;
941 bfd_boolean branch
, hint
, call
;
942 enum _stub_type ret
= no_stub
;
946 || sym_sec
->output_section
== bfd_abs_section_ptr
947 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
952 /* Ensure no stubs for user supplied overlay manager syms. */
953 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
956 /* setjmp always goes via an overlay stub, because then the return
957 and hence the longjmp goes via __ovly_return. That magically
958 makes setjmp/longjmp between overlays work. */
959 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
960 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
967 sym_type
= ELF_ST_TYPE (sym
->st_info
);
969 r_type
= ELF32_R_TYPE (irela
->r_info
);
973 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
975 if (contents
== NULL
)
978 if (!bfd_get_section_contents (input_section
->owner
,
985 contents
+= irela
->r_offset
;
987 branch
= is_branch (contents
);
988 hint
= is_hint (contents
);
991 call
= (contents
[0] & 0xfd) == 0x31;
993 && sym_type
!= STT_FUNC
996 /* It's common for people to write assembly and forget
997 to give function symbols the right type. Handle
998 calls to such symbols, but warn so that (hopefully)
999 people will fix their code. We need the symbol
1000 type to be correct to distinguish function pointer
1001 initialisation from other pointer initialisations. */
1002 const char *sym_name
;
1005 sym_name
= h
->root
.root
.string
;
1008 Elf_Internal_Shdr
*symtab_hdr
;
1009 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
1010 sym_name
= bfd_elf_sym_name (input_section
->owner
,
1015 (*_bfd_error_handler
) (_("warning: call to non-function"
1016 " symbol %s defined in %B"),
1017 sym_sec
->owner
, sym_name
);
1023 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
1024 || (sym_type
!= STT_FUNC
1025 && !(branch
|| hint
)
1026 && (sym_sec
->flags
& SEC_CODE
) == 0))
1029 /* Usually, symbols in non-overlay sections don't need stubs. */
1030 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
1031 && !htab
->params
->non_overlay_stubs
)
1034 /* A reference from some other section to a symbol in an overlay
1035 section needs a stub. */
1036 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1037 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1039 unsigned int lrlive
= 0;
1041 lrlive
= (contents
[1] & 0x70) >> 4;
1043 if (!lrlive
&& (call
|| sym_type
== STT_FUNC
))
1044 ret
= call_ovl_stub
;
1046 ret
= br000_ovl_stub
+ lrlive
;
1049 /* If this insn isn't a branch then we are possibly taking the
1050 address of a function and passing it out somehow. Soft-icache code
1051 always generates inline code to do indirect branches. */
1052 if (!(branch
|| hint
)
1053 && sym_type
== STT_FUNC
1054 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1061 count_stub (struct spu_link_hash_table
*htab
,
1064 enum _stub_type stub_type
,
1065 struct elf_link_hash_entry
*h
,
1066 const Elf_Internal_Rela
*irela
)
1068 unsigned int ovl
= 0;
1069 struct got_entry
*g
, **head
;
1072 /* If this instruction is a branch or call, we need a stub
1073 for it. One stub per function per overlay.
1074 If it isn't a branch, then we are taking the address of
1075 this function so need a stub in the non-overlay area
1076 for it. One stub per function. */
1077 if (stub_type
!= nonovl_stub
)
1078 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1081 head
= &h
->got
.glist
;
1084 if (elf_local_got_ents (ibfd
) == NULL
)
1086 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1087 * sizeof (*elf_local_got_ents (ibfd
)));
1088 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1089 if (elf_local_got_ents (ibfd
) == NULL
)
1092 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1095 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1097 htab
->stub_count
[ovl
] += 1;
1103 addend
= irela
->r_addend
;
1107 struct got_entry
*gnext
;
1109 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1110 if (g
->addend
== addend
&& g
->ovl
== 0)
1115 /* Need a new non-overlay area stub. Zap other stubs. */
1116 for (g
= *head
; g
!= NULL
; g
= gnext
)
1119 if (g
->addend
== addend
)
1121 htab
->stub_count
[g
->ovl
] -= 1;
1129 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1130 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1136 g
= bfd_malloc (sizeof *g
);
1141 g
->stub_addr
= (bfd_vma
) -1;
1145 htab
->stub_count
[ovl
] += 1;
1151 /* Support two sizes of overlay stubs, a slower more compact stub of two
1152 instructions, and a faster stub of four instructions.
1153 Soft-icache stubs are four or eight words. */
1156 ovl_stub_size (struct spu_elf_params
*params
)
1158 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1162 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1164 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1167 /* Two instruction overlay stubs look like:
1169 brsl $75,__ovly_load
1170 .word target_ovl_and_address
1172 ovl_and_address is a word with the overlay number in the top 14 bits
1173 and local store address in the bottom 18 bits.
1175 Four instruction overlay stubs look like:
1179 ila $79,target_address
1182 Software icache stubs are:
1186 .word lrlive_branchlocalstoreaddr;
1187 brasl $75,__icache_br_handler
1192 build_stub (struct bfd_link_info
*info
,
1195 enum _stub_type stub_type
,
1196 struct elf_link_hash_entry
*h
,
1197 const Elf_Internal_Rela
*irela
,
1201 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1202 unsigned int ovl
, dest_ovl
, set_id
;
1203 struct got_entry
*g
, **head
;
1205 bfd_vma addend
, from
, to
, br_dest
, patt
;
1206 unsigned int lrlive
;
1209 if (stub_type
!= nonovl_stub
)
1210 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1213 head
= &h
->got
.glist
;
1215 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1219 addend
= irela
->r_addend
;
1221 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1223 g
= bfd_malloc (sizeof *g
);
1229 g
->br_addr
= (irela
->r_offset
1230 + isec
->output_offset
1231 + isec
->output_section
->vma
);
1237 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1238 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1243 if (g
->ovl
== 0 && ovl
!= 0)
1246 if (g
->stub_addr
!= (bfd_vma
) -1)
1250 sec
= htab
->stub_sec
[ovl
];
1251 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1252 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1253 g
->stub_addr
= from
;
1254 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1255 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1256 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1258 if (((dest
| to
| from
) & 3) != 0)
1263 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1265 if (htab
->params
->ovly_flavour
== ovly_normal
1266 && !htab
->params
->compact_stub
)
1268 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1269 sec
->contents
+ sec
->size
);
1270 bfd_put_32 (sec
->owner
, LNOP
,
1271 sec
->contents
+ sec
->size
+ 4);
1272 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1273 sec
->contents
+ sec
->size
+ 8);
1275 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1276 sec
->contents
+ sec
->size
+ 12);
1278 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1279 sec
->contents
+ sec
->size
+ 12);
1281 else if (htab
->params
->ovly_flavour
== ovly_normal
1282 && htab
->params
->compact_stub
)
1285 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1286 sec
->contents
+ sec
->size
);
1288 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1289 sec
->contents
+ sec
->size
);
1290 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1291 sec
->contents
+ sec
->size
+ 4);
1293 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
1294 && htab
->params
->compact_stub
)
1297 if (stub_type
== nonovl_stub
)
1299 else if (stub_type
== call_ovl_stub
)
1300 /* A brsl makes lr live and *(*sp+16) is live.
1301 Tail calls have the same liveness. */
1303 else if (!htab
->params
->lrlive_analysis
)
1304 /* Assume stack frame and lr save. */
1306 else if (irela
!= NULL
)
1308 /* Analyse branch instructions. */
1309 struct function_info
*caller
;
1312 caller
= find_function (isec
, irela
->r_offset
, info
);
1313 if (caller
->start
== NULL
)
1314 off
= irela
->r_offset
;
1317 struct function_info
*found
= NULL
;
1319 /* Find the earliest piece of this function that
1320 has frame adjusting instructions. We might
1321 see dynamic frame adjustment (eg. for alloca)
1322 in some later piece, but functions using
1323 alloca always set up a frame earlier. Frame
1324 setup instructions are always in one piece. */
1325 if (caller
->lr_store
!= (bfd_vma
) -1
1326 || caller
->sp_adjust
!= (bfd_vma
) -1)
1328 while (caller
->start
!= NULL
)
1330 caller
= caller
->start
;
1331 if (caller
->lr_store
!= (bfd_vma
) -1
1332 || caller
->sp_adjust
!= (bfd_vma
) -1)
1340 if (off
> caller
->sp_adjust
)
1342 if (off
> caller
->lr_store
)
1343 /* Only *(*sp+16) is live. */
1346 /* If no lr save, then we must be in a
1347 leaf function with a frame.
1348 lr is still live. */
1351 else if (off
> caller
->lr_store
)
1353 /* Between lr save and stack adjust. */
1355 /* This should never happen since prologues won't
1360 /* On entry to function. */
1363 if (stub_type
!= br000_ovl_stub
1364 && lrlive
!= stub_type
- br000_ovl_stub
)
1365 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1366 "from analysis (%u)\n"),
1367 isec
, irela
->r_offset
, lrlive
,
1368 stub_type
- br000_ovl_stub
);
1371 /* If given lrlive info via .brinfo, use it. */
1372 if (stub_type
> br000_ovl_stub
)
1373 lrlive
= stub_type
- br000_ovl_stub
;
1376 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1377 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1378 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1380 /* The branch that uses this stub goes to stub_addr + 4. We'll
1381 set up an xor pattern that can be used by the icache manager
1382 to modify this branch to go directly to its destination. */
1384 br_dest
= g
->stub_addr
;
1387 /* Except in the case of _SPUEAR_ stubs, the branch in
1388 question is the one in the stub itself. */
1389 BFD_ASSERT (stub_type
== nonovl_stub
);
1390 g
->br_addr
= g
->stub_addr
;
1394 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1395 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1396 sec
->contents
+ sec
->size
);
1397 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1398 sec
->contents
+ sec
->size
+ 4);
1399 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1400 sec
->contents
+ sec
->size
+ 8);
1401 patt
= dest
^ br_dest
;
1402 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1403 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1404 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1405 sec
->contents
+ sec
->size
+ 12);
1408 /* Extra space for linked list entries. */
1414 sec
->size
+= ovl_stub_size (htab
->params
);
1416 if (htab
->params
->emit_stub_syms
)
1422 len
= 8 + sizeof (".ovl_call.") - 1;
1424 len
+= strlen (h
->root
.root
.string
);
1429 add
= (int) irela
->r_addend
& 0xffffffff;
1432 name
= bfd_malloc (len
+ 1);
1436 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1438 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1440 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1441 dest_sec
->id
& 0xffffffff,
1442 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1444 sprintf (name
+ len
- 9, "+%x", add
);
1446 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1450 if (h
->root
.type
== bfd_link_hash_new
)
1452 h
->root
.type
= bfd_link_hash_defined
;
1453 h
->root
.u
.def
.section
= sec
;
1454 h
->size
= ovl_stub_size (htab
->params
);
1455 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1459 h
->ref_regular_nonweak
= 1;
1460 h
->forced_local
= 1;
1468 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1472 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1474 /* Symbols starting with _SPUEAR_ need a stub because they may be
1475 invoked by the PPU. */
1476 struct bfd_link_info
*info
= inf
;
1477 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1480 if ((h
->root
.type
== bfd_link_hash_defined
1481 || h
->root
.type
== bfd_link_hash_defweak
)
1483 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1484 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1485 && sym_sec
->output_section
!= bfd_abs_section_ptr
1486 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1487 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1488 || htab
->params
->non_overlay_stubs
))
1490 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1497 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1499 /* Symbols starting with _SPUEAR_ need a stub because they may be
1500 invoked by the PPU. */
1501 struct bfd_link_info
*info
= inf
;
1502 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1505 if ((h
->root
.type
== bfd_link_hash_defined
1506 || h
->root
.type
== bfd_link_hash_defweak
)
1508 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1509 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1510 && sym_sec
->output_section
!= bfd_abs_section_ptr
1511 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1512 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1513 || htab
->params
->non_overlay_stubs
))
1515 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1516 h
->root
.u
.def
.value
, sym_sec
);
1522 /* Size or build stubs. */
1525 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1527 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1530 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
1532 extern const bfd_target spu_elf32_vec
;
1533 Elf_Internal_Shdr
*symtab_hdr
;
1535 Elf_Internal_Sym
*local_syms
= NULL
;
1537 if (ibfd
->xvec
!= &spu_elf32_vec
)
1540 /* We'll need the symbol table in a second. */
1541 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1542 if (symtab_hdr
->sh_info
== 0)
1545 /* Walk over each section attached to the input bfd. */
1546 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1548 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1550 /* If there aren't any relocs, then there's nothing more to do. */
1551 if ((isec
->flags
& SEC_RELOC
) == 0
1552 || isec
->reloc_count
== 0)
1555 if (!maybe_needs_stubs (isec
))
1558 /* Get the relocs. */
1559 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1561 if (internal_relocs
== NULL
)
1562 goto error_ret_free_local
;
1564 /* Now examine each relocation. */
1565 irela
= internal_relocs
;
1566 irelaend
= irela
+ isec
->reloc_count
;
1567 for (; irela
< irelaend
; irela
++)
1569 enum elf_spu_reloc_type r_type
;
1570 unsigned int r_indx
;
1572 Elf_Internal_Sym
*sym
;
1573 struct elf_link_hash_entry
*h
;
1574 enum _stub_type stub_type
;
1576 r_type
= ELF32_R_TYPE (irela
->r_info
);
1577 r_indx
= ELF32_R_SYM (irela
->r_info
);
1579 if (r_type
>= R_SPU_max
)
1581 bfd_set_error (bfd_error_bad_value
);
1582 error_ret_free_internal
:
1583 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1584 free (internal_relocs
);
1585 error_ret_free_local
:
1586 if (local_syms
!= NULL
1587 && (symtab_hdr
->contents
1588 != (unsigned char *) local_syms
))
1593 /* Determine the reloc target section. */
1594 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1595 goto error_ret_free_internal
;
1597 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1599 if (stub_type
== no_stub
)
1601 else if (stub_type
== stub_error
)
1602 goto error_ret_free_internal
;
1604 if (htab
->stub_count
== NULL
)
1607 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1608 htab
->stub_count
= bfd_zmalloc (amt
);
1609 if (htab
->stub_count
== NULL
)
1610 goto error_ret_free_internal
;
1615 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1616 goto error_ret_free_internal
;
1623 dest
= h
->root
.u
.def
.value
;
1625 dest
= sym
->st_value
;
1626 dest
+= irela
->r_addend
;
1627 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1629 goto error_ret_free_internal
;
1633 /* We're done with the internal relocs, free them. */
1634 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1635 free (internal_relocs
);
1638 if (local_syms
!= NULL
1639 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1641 if (!info
->keep_memory
)
1644 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1651 /* Allocate space for overlay call and return stubs.
1652 Return 0 on error, 1 if no overlays, 2 otherwise. */
1655 spu_elf_size_stubs (struct bfd_link_info
*info
)
1657 struct spu_link_hash_table
*htab
;
1664 if (!process_stubs (info
, FALSE
))
1667 htab
= spu_hash_table (info
);
1668 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1672 ibfd
= info
->input_bfds
;
1673 if (htab
->stub_count
!= NULL
)
1675 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1676 htab
->stub_sec
= bfd_zmalloc (amt
);
1677 if (htab
->stub_sec
== NULL
)
1680 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1681 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1682 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1683 htab
->stub_sec
[0] = stub
;
1685 || !bfd_set_section_alignment (ibfd
, stub
,
1686 ovl_stub_size_log2 (htab
->params
)))
1688 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1689 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1690 /* Extra space for linked list entries. */
1691 stub
->size
+= htab
->stub_count
[0] * 16;
1693 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1695 asection
*osec
= htab
->ovl_sec
[i
];
1696 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1697 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1698 htab
->stub_sec
[ovl
] = stub
;
1700 || !bfd_set_section_alignment (ibfd
, stub
,
1701 ovl_stub_size_log2 (htab
->params
)))
1703 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1707 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1709 /* Space for icache manager tables.
1710 a) Tag array, one quadword per cache line.
1711 b) Rewrite "to" list, one quadword per cache line.
1712 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1713 a power-of-two number of full quadwords) per cache line. */
1716 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1717 if (htab
->ovtab
== NULL
1718 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1721 htab
->ovtab
->size
= (16 + 16 + (16 << htab
->fromelem_size_log2
))
1722 << htab
->num_lines_log2
;
1724 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1725 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1726 if (htab
->init
== NULL
1727 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1730 htab
->init
->size
= 16;
1732 else if (htab
->stub_count
== NULL
)
1736 /* htab->ovtab consists of two arrays.
1746 . } _ovly_buf_table[];
1749 flags
= SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
1750 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1751 if (htab
->ovtab
== NULL
1752 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1755 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1758 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1759 if (htab
->toe
== NULL
1760 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1762 htab
->toe
->size
= 16;
1767 /* Called from ld to place overlay manager data sections. This is done
1768 after the overlay manager itself is loaded, mainly so that the
1769 linker's htab->init section is placed after any other .ovl.init
1773 spu_elf_place_overlay_data (struct bfd_link_info
*info
)
1775 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1778 if (htab
->stub_sec
!= NULL
)
1780 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[0], NULL
, ".text");
1782 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1784 asection
*osec
= htab
->ovl_sec
[i
];
1785 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1786 (*htab
->params
->place_spu_section
) (htab
->stub_sec
[ovl
], osec
, NULL
);
1790 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1791 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1793 if (htab
->ovtab
!= NULL
)
1795 const char *ovout
= ".data";
1796 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1798 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1801 if (htab
->toe
!= NULL
)
1802 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1805 /* Functions to handle embedded spu_ovl.o object. */
1808 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1814 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1820 struct _ovl_stream
*os
;
1824 os
= (struct _ovl_stream
*) stream
;
1825 max
= (const char *) os
->end
- (const char *) os
->start
;
1827 if ((ufile_ptr
) offset
>= max
)
1831 if (count
> max
- offset
)
1832 count
= max
- offset
;
1834 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1839 ovl_mgr_stat (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1843 struct _ovl_stream
*os
= (struct _ovl_stream
*) stream
;
1845 memset (sb
, 0, sizeof (*sb
));
1846 sb
->st_size
= (const char *) os
->end
- (const char *) os
->start
;
1851 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1853 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1860 return *ovl_bfd
!= NULL
;
1864 overlay_index (asection
*sec
)
1867 || sec
->output_section
== bfd_abs_section_ptr
)
1869 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1872 /* Define an STT_OBJECT symbol. */
1874 static struct elf_link_hash_entry
*
1875 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1877 struct elf_link_hash_entry
*h
;
1879 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1883 if (h
->root
.type
!= bfd_link_hash_defined
1886 h
->root
.type
= bfd_link_hash_defined
;
1887 h
->root
.u
.def
.section
= htab
->ovtab
;
1888 h
->type
= STT_OBJECT
;
1891 h
->ref_regular_nonweak
= 1;
1894 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1896 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1897 h
->root
.u
.def
.section
->owner
,
1898 h
->root
.root
.string
);
1899 bfd_set_error (bfd_error_bad_value
);
1904 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1905 h
->root
.root
.string
);
1906 bfd_set_error (bfd_error_bad_value
);
1913 /* Fill in all stubs and the overlay tables. */
1916 spu_elf_build_stubs (struct bfd_link_info
*info
)
1918 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1919 struct elf_link_hash_entry
*h
;
1925 if (htab
->num_overlays
!= 0)
1927 for (i
= 0; i
< 2; i
++)
1929 h
= htab
->ovly_entry
[i
];
1931 && (h
->root
.type
== bfd_link_hash_defined
1932 || h
->root
.type
== bfd_link_hash_defweak
)
1935 s
= h
->root
.u
.def
.section
->output_section
;
1936 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1938 (*_bfd_error_handler
) (_("%s in overlay section"),
1939 h
->root
.root
.string
);
1940 bfd_set_error (bfd_error_bad_value
);
1947 if (htab
->stub_sec
!= NULL
)
1949 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1950 if (htab
->stub_sec
[i
]->size
!= 0)
1952 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1953 htab
->stub_sec
[i
]->size
);
1954 if (htab
->stub_sec
[i
]->contents
== NULL
)
1956 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1957 htab
->stub_sec
[i
]->size
= 0;
1960 /* Fill in all the stubs. */
1961 process_stubs (info
, TRUE
);
1962 if (!htab
->stub_err
)
1963 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1967 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1968 bfd_set_error (bfd_error_bad_value
);
1972 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1974 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1976 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1977 bfd_set_error (bfd_error_bad_value
);
1980 htab
->stub_sec
[i
]->rawsize
= 0;
1984 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1987 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1988 if (htab
->ovtab
->contents
== NULL
)
1991 p
= htab
->ovtab
->contents
;
1992 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1996 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1999 h
->root
.u
.def
.value
= 0;
2000 h
->size
= 16 << htab
->num_lines_log2
;
2003 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
2006 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2007 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2009 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to");
2012 h
->root
.u
.def
.value
= off
;
2013 h
->size
= 16 << htab
->num_lines_log2
;
2016 h
= define_ovtab_symbol (htab
, "__icache_rewrite_to_size");
2019 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
2020 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2022 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from");
2025 h
->root
.u
.def
.value
= off
;
2026 h
->size
= 16 << (htab
->fromelem_size_log2
+ htab
->num_lines_log2
);
2029 h
= define_ovtab_symbol (htab
, "__icache_rewrite_from_size");
2032 h
->root
.u
.def
.value
= 16 << (htab
->fromelem_size_log2
2033 + htab
->num_lines_log2
);
2034 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2036 h
= define_ovtab_symbol (htab
, "__icache_log2_fromelemsize");
2039 h
->root
.u
.def
.value
= htab
->fromelem_size_log2
;
2040 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2042 h
= define_ovtab_symbol (htab
, "__icache_base");
2045 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
2046 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2047 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
2049 h
= define_ovtab_symbol (htab
, "__icache_linesize");
2052 h
->root
.u
.def
.value
= 1 << htab
->line_size_log2
;
2053 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2055 h
= define_ovtab_symbol (htab
, "__icache_log2_linesize");
2058 h
->root
.u
.def
.value
= htab
->line_size_log2
;
2059 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2061 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
2064 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
2065 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2067 h
= define_ovtab_symbol (htab
, "__icache_cachesize");
2070 h
->root
.u
.def
.value
= 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
);
2071 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2073 h
= define_ovtab_symbol (htab
, "__icache_log2_cachesize");
2076 h
->root
.u
.def
.value
= htab
->num_lines_log2
+ htab
->line_size_log2
;
2077 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2079 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_cachesize");
2082 h
->root
.u
.def
.value
= -(htab
->num_lines_log2
+ htab
->line_size_log2
);
2083 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
2085 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
2087 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2089 if (htab
->init
->contents
== NULL
)
2092 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2095 h
->root
.u
.def
.value
= 0;
2096 h
->root
.u
.def
.section
= htab
->init
;
2102 /* Write out _ovly_table. */
2103 /* set low bit of .size to mark non-overlay area as present. */
2105 obfd
= htab
->ovtab
->output_section
->owner
;
2106 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2108 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2112 unsigned long off
= ovl_index
* 16;
2113 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2115 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2116 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2118 /* file_off written later in spu_elf_modify_program_headers. */
2119 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2123 h
= define_ovtab_symbol (htab
, "_ovly_table");
2126 h
->root
.u
.def
.value
= 16;
2127 h
->size
= htab
->num_overlays
* 16;
2129 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2132 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2135 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2138 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2139 h
->size
= htab
->num_buf
* 4;
2141 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2144 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2148 h
= define_ovtab_symbol (htab
, "_EAR_");
2151 h
->root
.u
.def
.section
= htab
->toe
;
2152 h
->root
.u
.def
.value
= 0;
2158 /* Check that all loadable section VMAs lie in the range
2159 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2162 spu_elf_check_vma (struct bfd_link_info
*info
)
2164 struct elf_segment_map
*m
;
2166 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2167 bfd
*abfd
= info
->output_bfd
;
2168 bfd_vma hi
= htab
->params
->local_store_hi
;
2169 bfd_vma lo
= htab
->params
->local_store_lo
;
2171 htab
->local_store
= hi
+ 1 - lo
;
2173 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
2174 if (m
->p_type
== PT_LOAD
)
2175 for (i
= 0; i
< m
->count
; i
++)
2176 if (m
->sections
[i
]->size
!= 0
2177 && (m
->sections
[i
]->vma
< lo
2178 || m
->sections
[i
]->vma
> hi
2179 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2180 return m
->sections
[i
];
2185 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2186 Search for stack adjusting insns, and return the sp delta.
2187 If a store of lr is found save the instruction offset to *LR_STORE.
2188 If a stack adjusting instruction is found, save that offset to
2192 find_function_stack_adjust (asection
*sec
,
2199 memset (reg
, 0, sizeof (reg
));
2200 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2202 unsigned char buf
[4];
2206 /* Assume no relocs on stack adjusing insns. */
2207 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2211 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2213 if (buf
[0] == 0x24 /* stqd */)
2215 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2220 /* Partly decoded immediate field. */
2221 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2223 if (buf
[0] == 0x1c /* ai */)
2226 imm
= (imm
^ 0x200) - 0x200;
2227 reg
[rt
] = reg
[ra
] + imm
;
2229 if (rt
== 1 /* sp */)
2233 *sp_adjust
= offset
;
2237 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2239 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2241 reg
[rt
] = reg
[ra
] + reg
[rb
];
2246 *sp_adjust
= offset
;
2250 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2252 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2254 reg
[rt
] = reg
[rb
] - reg
[ra
];
2259 *sp_adjust
= offset
;
2263 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2265 if (buf
[0] >= 0x42 /* ila */)
2266 imm
|= (buf
[0] & 1) << 17;
2271 if (buf
[0] == 0x40 /* il */)
2273 if ((buf
[1] & 0x80) == 0)
2275 imm
= (imm
^ 0x8000) - 0x8000;
2277 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2283 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2285 reg
[rt
] |= imm
& 0xffff;
2288 else if (buf
[0] == 0x04 /* ori */)
2291 imm
= (imm
^ 0x200) - 0x200;
2292 reg
[rt
] = reg
[ra
] | imm
;
2295 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2297 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2298 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2299 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2300 | ((imm
& 0x1000) ? 0x000000ff : 0));
2303 else if (buf
[0] == 0x16 /* andbi */)
2309 reg
[rt
] = reg
[ra
] & imm
;
2312 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2314 /* Used in pic reg load. Say rt is trashed. Won't be used
2315 in stack adjust, but we need to continue past this branch. */
2319 else if (is_branch (buf
) || is_indirect_branch (buf
))
2320 /* If we hit a branch then we must be out of the prologue. */
2327 /* qsort predicate to sort symbols by section and value. */
2329 static Elf_Internal_Sym
*sort_syms_syms
;
2330 static asection
**sort_syms_psecs
;
2333 sort_syms (const void *a
, const void *b
)
2335 Elf_Internal_Sym
*const *s1
= a
;
2336 Elf_Internal_Sym
*const *s2
= b
;
2337 asection
*sec1
,*sec2
;
2338 bfd_signed_vma delta
;
2340 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2341 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2344 return sec1
->index
- sec2
->index
;
2346 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2348 return delta
< 0 ? -1 : 1;
2350 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2352 return delta
< 0 ? -1 : 1;
2354 return *s1
< *s2
? -1 : 1;
2357 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2358 entries for section SEC. */
2360 static struct spu_elf_stack_info
*
2361 alloc_stack_info (asection
*sec
, int max_fun
)
2363 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2366 amt
= sizeof (struct spu_elf_stack_info
);
2367 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2368 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2369 if (sec_data
->u
.i
.stack_info
!= NULL
)
2370 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2371 return sec_data
->u
.i
.stack_info
;
2374 /* Add a new struct function_info describing a (part of a) function
2375 starting at SYM_H. Keep the array sorted by address. */
2377 static struct function_info
*
2378 maybe_insert_function (asection
*sec
,
2381 bfd_boolean is_func
)
2383 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2384 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2390 sinfo
= alloc_stack_info (sec
, 20);
2397 Elf_Internal_Sym
*sym
= sym_h
;
2398 off
= sym
->st_value
;
2399 size
= sym
->st_size
;
2403 struct elf_link_hash_entry
*h
= sym_h
;
2404 off
= h
->root
.u
.def
.value
;
2408 for (i
= sinfo
->num_fun
; --i
>= 0; )
2409 if (sinfo
->fun
[i
].lo
<= off
)
2414 /* Don't add another entry for an alias, but do update some
2416 if (sinfo
->fun
[i
].lo
== off
)
2418 /* Prefer globals over local syms. */
2419 if (global
&& !sinfo
->fun
[i
].global
)
2421 sinfo
->fun
[i
].global
= TRUE
;
2422 sinfo
->fun
[i
].u
.h
= sym_h
;
2425 sinfo
->fun
[i
].is_func
= TRUE
;
2426 return &sinfo
->fun
[i
];
2428 /* Ignore a zero-size symbol inside an existing function. */
2429 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2430 return &sinfo
->fun
[i
];
2433 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2435 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2436 bfd_size_type old
= amt
;
2438 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2439 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2440 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2441 sinfo
= bfd_realloc (sinfo
, amt
);
2444 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2445 sec_data
->u
.i
.stack_info
= sinfo
;
2448 if (++i
< sinfo
->num_fun
)
2449 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2450 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2451 sinfo
->fun
[i
].is_func
= is_func
;
2452 sinfo
->fun
[i
].global
= global
;
2453 sinfo
->fun
[i
].sec
= sec
;
2455 sinfo
->fun
[i
].u
.h
= sym_h
;
2457 sinfo
->fun
[i
].u
.sym
= sym_h
;
2458 sinfo
->fun
[i
].lo
= off
;
2459 sinfo
->fun
[i
].hi
= off
+ size
;
2460 sinfo
->fun
[i
].lr_store
= -1;
2461 sinfo
->fun
[i
].sp_adjust
= -1;
2462 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2463 &sinfo
->fun
[i
].lr_store
,
2464 &sinfo
->fun
[i
].sp_adjust
);
2465 sinfo
->num_fun
+= 1;
2466 return &sinfo
->fun
[i
];
2469 /* Return the name of FUN. */
2472 func_name (struct function_info
*fun
)
2476 Elf_Internal_Shdr
*symtab_hdr
;
2478 while (fun
->start
!= NULL
)
2482 return fun
->u
.h
->root
.root
.string
;
2485 if (fun
->u
.sym
->st_name
== 0)
2487 size_t len
= strlen (sec
->name
);
2488 char *name
= bfd_malloc (len
+ 10);
2491 sprintf (name
, "%s+%lx", sec
->name
,
2492 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2496 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2497 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2500 /* Read the instruction at OFF in SEC. Return true iff the instruction
2501 is a nop, lnop, or stop 0 (all zero insn). */
2504 is_nop (asection
*sec
, bfd_vma off
)
2506 unsigned char insn
[4];
2508 if (off
+ 4 > sec
->size
2509 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2511 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2513 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2518 /* Extend the range of FUN to cover nop padding up to LIMIT.
2519 Return TRUE iff some instruction other than a NOP was found. */
2522 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2524 bfd_vma off
= (fun
->hi
+ 3) & -4;
2526 while (off
< limit
&& is_nop (fun
->sec
, off
))
2537 /* Check and fix overlapping function ranges. Return TRUE iff there
2538 are gaps in the current info we have about functions in SEC. */
2541 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2543 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2544 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2546 bfd_boolean gaps
= FALSE
;
2551 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2552 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2554 /* Fix overlapping symbols. */
2555 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2556 const char *f2
= func_name (&sinfo
->fun
[i
]);
2558 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2559 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2561 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2564 if (sinfo
->num_fun
== 0)
2568 if (sinfo
->fun
[0].lo
!= 0)
2570 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2572 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2574 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2575 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2577 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2583 /* Search current function info for a function that contains address
2584 OFFSET in section SEC. */
2586 static struct function_info
*
2587 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2589 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2590 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2594 hi
= sinfo
->num_fun
;
2597 mid
= (lo
+ hi
) / 2;
2598 if (offset
< sinfo
->fun
[mid
].lo
)
2600 else if (offset
>= sinfo
->fun
[mid
].hi
)
2603 return &sinfo
->fun
[mid
];
2605 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2607 bfd_set_error (bfd_error_bad_value
);
2611 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2612 if CALLEE was new. If this function return FALSE, CALLEE should
2616 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2618 struct call_info
**pp
, *p
;
2620 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2621 if (p
->fun
== callee
->fun
)
2623 /* Tail calls use less stack than normal calls. Retain entry
2624 for normal call over one for tail call. */
2625 p
->is_tail
&= callee
->is_tail
;
2628 p
->fun
->start
= NULL
;
2629 p
->fun
->is_func
= TRUE
;
2631 p
->count
+= callee
->count
;
2632 /* Reorder list so most recent call is first. */
2634 p
->next
= caller
->call_list
;
2635 caller
->call_list
= p
;
2638 callee
->next
= caller
->call_list
;
2639 caller
->call_list
= callee
;
2643 /* Copy CALL and insert the copy into CALLER. */
2646 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2648 struct call_info
*callee
;
2649 callee
= bfd_malloc (sizeof (*callee
));
2653 if (!insert_callee (caller
, callee
))
2658 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2659 overlay stub sections. */
2662 interesting_section (asection
*s
)
2664 return (s
->output_section
!= bfd_abs_section_ptr
2665 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2666 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2670 /* Rummage through the relocs for SEC, looking for function calls.
2671 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2672 mark destination symbols on calls as being functions. Also
2673 look at branches, which may be tail calls or go to hot/cold
2674 section part of same function. */
2677 mark_functions_via_relocs (asection
*sec
,
2678 struct bfd_link_info
*info
,
2681 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2682 Elf_Internal_Shdr
*symtab_hdr
;
2684 unsigned int priority
= 0;
2685 static bfd_boolean warned
;
2687 if (!interesting_section (sec
)
2688 || sec
->reloc_count
== 0)
2691 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2693 if (internal_relocs
== NULL
)
2696 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2697 psyms
= &symtab_hdr
->contents
;
2698 irela
= internal_relocs
;
2699 irelaend
= irela
+ sec
->reloc_count
;
2700 for (; irela
< irelaend
; irela
++)
2702 enum elf_spu_reloc_type r_type
;
2703 unsigned int r_indx
;
2705 Elf_Internal_Sym
*sym
;
2706 struct elf_link_hash_entry
*h
;
2708 bfd_boolean nonbranch
, is_call
;
2709 struct function_info
*caller
;
2710 struct call_info
*callee
;
2712 r_type
= ELF32_R_TYPE (irela
->r_info
);
2713 nonbranch
= r_type
!= R_SPU_REL16
&& r_type
!= R_SPU_ADDR16
;
2715 r_indx
= ELF32_R_SYM (irela
->r_info
);
2716 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2720 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2726 unsigned char insn
[4];
2728 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2729 irela
->r_offset
, 4))
2731 if (is_branch (insn
))
2733 is_call
= (insn
[0] & 0xfd) == 0x31;
2734 priority
= insn
[1] & 0x0f;
2736 priority
|= insn
[2];
2738 priority
|= insn
[3];
2740 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2741 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2744 info
->callbacks
->einfo
2745 (_("%B(%A+0x%v): call to non-code section"
2746 " %B(%A), analysis incomplete\n"),
2747 sec
->owner
, sec
, irela
->r_offset
,
2748 sym_sec
->owner
, sym_sec
);
2763 /* For --auto-overlay, count possible stubs we need for
2764 function pointer references. */
2765 unsigned int sym_type
;
2769 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2770 if (sym_type
== STT_FUNC
)
2772 if (call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2773 spu_hash_table (info
)->non_ovly_stub
+= 1;
2774 /* If the symbol type is STT_FUNC then this must be a
2775 function pointer initialisation. */
2778 /* Ignore data references. */
2779 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2780 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2782 /* Otherwise we probably have a jump table reloc for
2783 a switch statement or some other reference to a
2788 val
= h
->root
.u
.def
.value
;
2790 val
= sym
->st_value
;
2791 val
+= irela
->r_addend
;
2795 struct function_info
*fun
;
2797 if (irela
->r_addend
!= 0)
2799 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2802 fake
->st_value
= val
;
2804 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2808 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2810 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2813 if (irela
->r_addend
!= 0
2814 && fun
->u
.sym
!= sym
)
2819 caller
= find_function (sec
, irela
->r_offset
, info
);
2822 callee
= bfd_malloc (sizeof *callee
);
2826 callee
->fun
= find_function (sym_sec
, val
, info
);
2827 if (callee
->fun
== NULL
)
2829 callee
->is_tail
= !is_call
;
2830 callee
->is_pasted
= FALSE
;
2831 callee
->broken_cycle
= FALSE
;
2832 callee
->priority
= priority
;
2833 callee
->count
= nonbranch
? 0 : 1;
2834 if (callee
->fun
->last_caller
!= sec
)
2836 callee
->fun
->last_caller
= sec
;
2837 callee
->fun
->call_count
+= 1;
2839 if (!insert_callee (caller
, callee
))
2842 && !callee
->fun
->is_func
2843 && callee
->fun
->stack
== 0)
2845 /* This is either a tail call or a branch from one part of
2846 the function to another, ie. hot/cold section. If the
2847 destination has been called by some other function then
2848 it is a separate function. We also assume that functions
2849 are not split across input files. */
2850 if (sec
->owner
!= sym_sec
->owner
)
2852 callee
->fun
->start
= NULL
;
2853 callee
->fun
->is_func
= TRUE
;
2855 else if (callee
->fun
->start
== NULL
)
2857 struct function_info
*caller_start
= caller
;
2858 while (caller_start
->start
)
2859 caller_start
= caller_start
->start
;
2861 if (caller_start
!= callee
->fun
)
2862 callee
->fun
->start
= caller_start
;
2866 struct function_info
*callee_start
;
2867 struct function_info
*caller_start
;
2868 callee_start
= callee
->fun
;
2869 while (callee_start
->start
)
2870 callee_start
= callee_start
->start
;
2871 caller_start
= caller
;
2872 while (caller_start
->start
)
2873 caller_start
= caller_start
->start
;
2874 if (caller_start
!= callee_start
)
2876 callee
->fun
->start
= NULL
;
2877 callee
->fun
->is_func
= TRUE
;
2886 /* Handle something like .init or .fini, which has a piece of a function.
2887 These sections are pasted together to form a single function. */
2890 pasted_function (asection
*sec
)
2892 struct bfd_link_order
*l
;
2893 struct _spu_elf_section_data
*sec_data
;
2894 struct spu_elf_stack_info
*sinfo
;
2895 Elf_Internal_Sym
*fake
;
2896 struct function_info
*fun
, *fun_start
;
2898 fake
= bfd_zmalloc (sizeof (*fake
));
2902 fake
->st_size
= sec
->size
;
2904 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2905 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2909 /* Find a function immediately preceding this section. */
2911 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2913 if (l
->u
.indirect
.section
== sec
)
2915 if (fun_start
!= NULL
)
2917 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2921 fun
->start
= fun_start
;
2923 callee
->is_tail
= TRUE
;
2924 callee
->is_pasted
= TRUE
;
2925 callee
->broken_cycle
= FALSE
;
2926 callee
->priority
= 0;
2928 if (!insert_callee (fun_start
, callee
))
2934 if (l
->type
== bfd_indirect_link_order
2935 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2936 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2937 && sinfo
->num_fun
!= 0)
2938 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2941 /* Don't return an error if we did not find a function preceding this
2942 section. The section may have incorrect flags. */
2946 /* Map address ranges in code sections to functions. */
2949 discover_functions (struct bfd_link_info
*info
)
2953 Elf_Internal_Sym
***psym_arr
;
2954 asection
***sec_arr
;
2955 bfd_boolean gaps
= FALSE
;
2958 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
2961 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2962 if (psym_arr
== NULL
)
2964 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2965 if (sec_arr
== NULL
)
2968 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2970 ibfd
= ibfd
->link
.next
, bfd_idx
++)
2972 extern const bfd_target spu_elf32_vec
;
2973 Elf_Internal_Shdr
*symtab_hdr
;
2976 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2977 asection
**psecs
, **p
;
2979 if (ibfd
->xvec
!= &spu_elf32_vec
)
2982 /* Read all the symbols. */
2983 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2984 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2988 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2989 if (interesting_section (sec
))
2997 if (symtab_hdr
->contents
!= NULL
)
2999 /* Don't use cached symbols since the generic ELF linker
3000 code only reads local symbols, and we need globals too. */
3001 free (symtab_hdr
->contents
);
3002 symtab_hdr
->contents
= NULL
;
3004 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
3006 symtab_hdr
->contents
= (void *) syms
;
3010 /* Select defined function symbols that are going to be output. */
3011 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
3014 psym_arr
[bfd_idx
] = psyms
;
3015 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
3018 sec_arr
[bfd_idx
] = psecs
;
3019 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
3020 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
3021 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3025 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
3026 if (s
!= NULL
&& interesting_section (s
))
3029 symcount
= psy
- psyms
;
3032 /* Sort them by section and offset within section. */
3033 sort_syms_syms
= syms
;
3034 sort_syms_psecs
= psecs
;
3035 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
3037 /* Now inspect the function symbols. */
3038 for (psy
= psyms
; psy
< psyms
+ symcount
; )
3040 asection
*s
= psecs
[*psy
- syms
];
3041 Elf_Internal_Sym
**psy2
;
3043 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
3044 if (psecs
[*psy2
- syms
] != s
)
3047 if (!alloc_stack_info (s
, psy2
- psy
))
3052 /* First install info about properly typed and sized functions.
3053 In an ideal world this will cover all code sections, except
3054 when partitioning functions into hot and cold sections,
3055 and the horrible pasted together .init and .fini functions. */
3056 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
3059 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
3061 asection
*s
= psecs
[sy
- syms
];
3062 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
3067 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3068 if (interesting_section (sec
))
3069 gaps
|= check_function_ranges (sec
, info
);
3074 /* See if we can discover more function symbols by looking at
3076 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3078 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3082 if (psym_arr
[bfd_idx
] == NULL
)
3085 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3086 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
3090 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3092 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3094 Elf_Internal_Shdr
*symtab_hdr
;
3096 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
3099 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3102 psecs
= sec_arr
[bfd_idx
];
3104 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3105 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3108 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3109 if (interesting_section (sec
))
3110 gaps
|= check_function_ranges (sec
, info
);
3114 /* Finally, install all globals. */
3115 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3119 s
= psecs
[sy
- syms
];
3121 /* Global syms might be improperly typed functions. */
3122 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3123 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3125 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3131 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3133 extern const bfd_target spu_elf32_vec
;
3136 if (ibfd
->xvec
!= &spu_elf32_vec
)
3139 /* Some of the symbols we've installed as marking the
3140 beginning of functions may have a size of zero. Extend
3141 the range of such functions to the beginning of the
3142 next symbol of interest. */
3143 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3144 if (interesting_section (sec
))
3146 struct _spu_elf_section_data
*sec_data
;
3147 struct spu_elf_stack_info
*sinfo
;
3149 sec_data
= spu_elf_section_data (sec
);
3150 sinfo
= sec_data
->u
.i
.stack_info
;
3151 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3154 bfd_vma hi
= sec
->size
;
3156 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3158 sinfo
->fun
[fun_idx
].hi
= hi
;
3159 hi
= sinfo
->fun
[fun_idx
].lo
;
3162 sinfo
->fun
[0].lo
= 0;
3164 /* No symbols in this section. Must be .init or .fini
3165 or something similar. */
3166 else if (!pasted_function (sec
))
3172 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3174 ibfd
= ibfd
->link
.next
, bfd_idx
++)
3176 if (psym_arr
[bfd_idx
] == NULL
)
3179 free (psym_arr
[bfd_idx
]);
3180 free (sec_arr
[bfd_idx
]);
3189 /* Iterate over all function_info we have collected, calling DOIT on
3190 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3194 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3195 struct bfd_link_info
*,
3197 struct bfd_link_info
*info
,
3203 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3205 extern const bfd_target spu_elf32_vec
;
3208 if (ibfd
->xvec
!= &spu_elf32_vec
)
3211 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3213 struct _spu_elf_section_data
*sec_data
;
3214 struct spu_elf_stack_info
*sinfo
;
3216 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3217 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3220 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3221 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3222 if (!doit (&sinfo
->fun
[i
], info
, param
))
3230 /* Transfer call info attached to struct function_info entries for
3231 all of a given function's sections to the first entry. */
3234 transfer_calls (struct function_info
*fun
,
3235 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3236 void *param ATTRIBUTE_UNUSED
)
3238 struct function_info
*start
= fun
->start
;
3242 struct call_info
*call
, *call_next
;
3244 while (start
->start
!= NULL
)
3245 start
= start
->start
;
3246 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3248 call_next
= call
->next
;
3249 if (!insert_callee (start
, call
))
3252 fun
->call_list
= NULL
;
3257 /* Mark nodes in the call graph that are called by some other node. */
3260 mark_non_root (struct function_info
*fun
,
3261 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3262 void *param ATTRIBUTE_UNUSED
)
3264 struct call_info
*call
;
3269 for (call
= fun
->call_list
; call
; call
= call
->next
)
3271 call
->fun
->non_root
= TRUE
;
3272 mark_non_root (call
->fun
, 0, 0);
3277 /* Remove cycles from the call graph. Set depth of nodes. */
3280 remove_cycles (struct function_info
*fun
,
3281 struct bfd_link_info
*info
,
3284 struct call_info
**callp
, *call
;
3285 unsigned int depth
= *(unsigned int *) param
;
3286 unsigned int max_depth
= depth
;
3290 fun
->marking
= TRUE
;
3292 callp
= &fun
->call_list
;
3293 while ((call
= *callp
) != NULL
)
3295 call
->max_depth
= depth
+ !call
->is_pasted
;
3296 if (!call
->fun
->visit2
)
3298 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3300 if (max_depth
< call
->max_depth
)
3301 max_depth
= call
->max_depth
;
3303 else if (call
->fun
->marking
)
3305 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3307 if (!htab
->params
->auto_overlay
3308 && htab
->params
->stack_analysis
)
3310 const char *f1
= func_name (fun
);
3311 const char *f2
= func_name (call
->fun
);
3313 info
->callbacks
->info (_("Stack analysis will ignore the call "
3318 call
->broken_cycle
= TRUE
;
3320 callp
= &call
->next
;
3322 fun
->marking
= FALSE
;
3323 *(unsigned int *) param
= max_depth
;
3327 /* Check that we actually visited all nodes in remove_cycles. If we
3328 didn't, then there is some cycle in the call graph not attached to
3329 any root node. Arbitrarily choose a node in the cycle as a new
3330 root and break the cycle. */
3333 mark_detached_root (struct function_info
*fun
,
3334 struct bfd_link_info
*info
,
3339 fun
->non_root
= FALSE
;
3340 *(unsigned int *) param
= 0;
3341 return remove_cycles (fun
, info
, param
);
3344 /* Populate call_list for each function. */
3347 build_call_tree (struct bfd_link_info
*info
)
3352 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3354 extern const bfd_target spu_elf32_vec
;
3357 if (ibfd
->xvec
!= &spu_elf32_vec
)
3360 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3361 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3365 /* Transfer call info from hot/cold section part of function
3367 if (!spu_hash_table (info
)->params
->auto_overlay
3368 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3371 /* Find the call graph root(s). */
3372 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3375 /* Remove cycles from the call graph. We start from the root node(s)
3376 so that we break cycles in a reasonable place. */
3378 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3381 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3384 /* qsort predicate to sort calls by priority, max_depth then count. */
3387 sort_calls (const void *a
, const void *b
)
3389 struct call_info
*const *c1
= a
;
3390 struct call_info
*const *c2
= b
;
3393 delta
= (*c2
)->priority
- (*c1
)->priority
;
3397 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3401 delta
= (*c2
)->count
- (*c1
)->count
;
3405 return (char *) c1
- (char *) c2
;
3409 unsigned int max_overlay_size
;
3412 /* Set linker_mark and gc_mark on any sections that we will put in
3413 overlays. These flags are used by the generic ELF linker, but we
3414 won't be continuing on to bfd_elf_final_link so it is OK to use
3415 them. linker_mark is clear before we get here. Set segment_mark
3416 on sections that are part of a pasted function (excluding the last
3419 Set up function rodata section if --overlay-rodata. We don't
3420 currently include merged string constant rodata sections since
3422 Sort the call graph so that the deepest nodes will be visited
3426 mark_overlay_section (struct function_info
*fun
,
3427 struct bfd_link_info
*info
,
3430 struct call_info
*call
;
3432 struct _mos_param
*mos_param
= param
;
3433 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3439 if (!fun
->sec
->linker_mark
3440 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3441 || htab
->params
->non_ia_text
3442 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0
3443 || strcmp (fun
->sec
->name
, ".init") == 0
3444 || strcmp (fun
->sec
->name
, ".fini") == 0))
3448 fun
->sec
->linker_mark
= 1;
3449 fun
->sec
->gc_mark
= 1;
3450 fun
->sec
->segment_mark
= 0;
3451 /* Ensure SEC_CODE is set on this text section (it ought to
3452 be!), and SEC_CODE is clear on rodata sections. We use
3453 this flag to differentiate the two overlay section types. */
3454 fun
->sec
->flags
|= SEC_CODE
;
3456 size
= fun
->sec
->size
;
3457 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3461 /* Find the rodata section corresponding to this function's
3463 if (strcmp (fun
->sec
->name
, ".text") == 0)
3465 name
= bfd_malloc (sizeof (".rodata"));
3468 memcpy (name
, ".rodata", sizeof (".rodata"));
3470 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3472 size_t len
= strlen (fun
->sec
->name
);
3473 name
= bfd_malloc (len
+ 3);
3476 memcpy (name
, ".rodata", sizeof (".rodata"));
3477 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3479 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3481 size_t len
= strlen (fun
->sec
->name
) + 1;
3482 name
= bfd_malloc (len
);
3485 memcpy (name
, fun
->sec
->name
, len
);
3491 asection
*rodata
= NULL
;
3492 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3493 if (group_sec
== NULL
)
3494 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3496 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3498 if (strcmp (group_sec
->name
, name
) == 0)
3503 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3505 fun
->rodata
= rodata
;
3508 size
+= fun
->rodata
->size
;
3509 if (htab
->params
->line_size
!= 0
3510 && size
> htab
->params
->line_size
)
3512 size
-= fun
->rodata
->size
;
3517 fun
->rodata
->linker_mark
= 1;
3518 fun
->rodata
->gc_mark
= 1;
3519 fun
->rodata
->flags
&= ~SEC_CODE
;
3525 if (mos_param
->max_overlay_size
< size
)
3526 mos_param
->max_overlay_size
= size
;
3529 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3534 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3538 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3539 calls
[count
++] = call
;
3541 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3543 fun
->call_list
= NULL
;
3547 calls
[count
]->next
= fun
->call_list
;
3548 fun
->call_list
= calls
[count
];
3553 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3555 if (call
->is_pasted
)
3557 /* There can only be one is_pasted call per function_info. */
3558 BFD_ASSERT (!fun
->sec
->segment_mark
);
3559 fun
->sec
->segment_mark
= 1;
3561 if (!call
->broken_cycle
3562 && !mark_overlay_section (call
->fun
, info
, param
))
3566 /* Don't put entry code into an overlay. The overlay manager needs
3567 a stack! Also, don't mark .ovl.init as an overlay. */
3568 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3569 == info
->output_bfd
->start_address
3570 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3572 fun
->sec
->linker_mark
= 0;
3573 if (fun
->rodata
!= NULL
)
3574 fun
->rodata
->linker_mark
= 0;
3579 /* If non-zero then unmark functions called from those within sections
3580 that we need to unmark. Unfortunately this isn't reliable since the
3581 call graph cannot know the destination of function pointer calls. */
3582 #define RECURSE_UNMARK 0
3585 asection
*exclude_input_section
;
3586 asection
*exclude_output_section
;
3587 unsigned long clearing
;
3590 /* Undo some of mark_overlay_section's work. */
3593 unmark_overlay_section (struct function_info
*fun
,
3594 struct bfd_link_info
*info
,
3597 struct call_info
*call
;
3598 struct _uos_param
*uos_param
= param
;
3599 unsigned int excluded
= 0;
3607 if (fun
->sec
== uos_param
->exclude_input_section
3608 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3612 uos_param
->clearing
+= excluded
;
3614 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3616 fun
->sec
->linker_mark
= 0;
3618 fun
->rodata
->linker_mark
= 0;
3621 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3622 if (!call
->broken_cycle
3623 && !unmark_overlay_section (call
->fun
, info
, param
))
3627 uos_param
->clearing
-= excluded
;
3632 unsigned int lib_size
;
3633 asection
**lib_sections
;
3636 /* Add sections we have marked as belonging to overlays to an array
3637 for consideration as non-overlay sections. The array consist of
3638 pairs of sections, (text,rodata), for functions in the call graph. */
3641 collect_lib_sections (struct function_info
*fun
,
3642 struct bfd_link_info
*info
,
3645 struct _cl_param
*lib_param
= param
;
3646 struct call_info
*call
;
3653 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3656 size
= fun
->sec
->size
;
3658 size
+= fun
->rodata
->size
;
3660 if (size
<= lib_param
->lib_size
)
3662 *lib_param
->lib_sections
++ = fun
->sec
;
3663 fun
->sec
->gc_mark
= 0;
3664 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3666 *lib_param
->lib_sections
++ = fun
->rodata
;
3667 fun
->rodata
->gc_mark
= 0;
3670 *lib_param
->lib_sections
++ = NULL
;
3673 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3674 if (!call
->broken_cycle
)
3675 collect_lib_sections (call
->fun
, info
, param
);
3680 /* qsort predicate to sort sections by call count. */
3683 sort_lib (const void *a
, const void *b
)
3685 asection
*const *s1
= a
;
3686 asection
*const *s2
= b
;
3687 struct _spu_elf_section_data
*sec_data
;
3688 struct spu_elf_stack_info
*sinfo
;
3692 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3693 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3696 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3697 delta
-= sinfo
->fun
[i
].call_count
;
3700 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3701 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3704 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3705 delta
+= sinfo
->fun
[i
].call_count
;
3714 /* Remove some sections from those marked to be in overlays. Choose
3715 those that are called from many places, likely library functions. */
3718 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3721 asection
**lib_sections
;
3722 unsigned int i
, lib_count
;
3723 struct _cl_param collect_lib_param
;
3724 struct function_info dummy_caller
;
3725 struct spu_link_hash_table
*htab
;
3727 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3729 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
3731 extern const bfd_target spu_elf32_vec
;
3734 if (ibfd
->xvec
!= &spu_elf32_vec
)
3737 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3738 if (sec
->linker_mark
3739 && sec
->size
< lib_size
3740 && (sec
->flags
& SEC_CODE
) != 0)
3743 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3744 if (lib_sections
== NULL
)
3745 return (unsigned int) -1;
3746 collect_lib_param
.lib_size
= lib_size
;
3747 collect_lib_param
.lib_sections
= lib_sections
;
3748 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3750 return (unsigned int) -1;
3751 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3753 /* Sort sections so that those with the most calls are first. */
3755 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3757 htab
= spu_hash_table (info
);
3758 for (i
= 0; i
< lib_count
; i
++)
3760 unsigned int tmp
, stub_size
;
3762 struct _spu_elf_section_data
*sec_data
;
3763 struct spu_elf_stack_info
*sinfo
;
3765 sec
= lib_sections
[2 * i
];
3766 /* If this section is OK, its size must be less than lib_size. */
3768 /* If it has a rodata section, then add that too. */
3769 if (lib_sections
[2 * i
+ 1])
3770 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3771 /* Add any new overlay call stubs needed by the section. */
3774 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3775 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3778 struct call_info
*call
;
3780 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3781 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3782 if (call
->fun
->sec
->linker_mark
)
3784 struct call_info
*p
;
3785 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3786 if (p
->fun
== call
->fun
)
3789 stub_size
+= ovl_stub_size (htab
->params
);
3792 if (tmp
+ stub_size
< lib_size
)
3794 struct call_info
**pp
, *p
;
3796 /* This section fits. Mark it as non-overlay. */
3797 lib_sections
[2 * i
]->linker_mark
= 0;
3798 if (lib_sections
[2 * i
+ 1])
3799 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3800 lib_size
-= tmp
+ stub_size
;
3801 /* Call stubs to the section we just added are no longer
3803 pp
= &dummy_caller
.call_list
;
3804 while ((p
= *pp
) != NULL
)
3805 if (!p
->fun
->sec
->linker_mark
)
3807 lib_size
+= ovl_stub_size (htab
->params
);
3813 /* Add new call stubs to dummy_caller. */
3814 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3815 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3818 struct call_info
*call
;
3820 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3821 for (call
= sinfo
->fun
[k
].call_list
;
3824 if (call
->fun
->sec
->linker_mark
)
3826 struct call_info
*callee
;
3827 callee
= bfd_malloc (sizeof (*callee
));
3829 return (unsigned int) -1;
3831 if (!insert_callee (&dummy_caller
, callee
))
3837 while (dummy_caller
.call_list
!= NULL
)
3839 struct call_info
*call
= dummy_caller
.call_list
;
3840 dummy_caller
.call_list
= call
->next
;
3843 for (i
= 0; i
< 2 * lib_count
; i
++)
3844 if (lib_sections
[i
])
3845 lib_sections
[i
]->gc_mark
= 1;
3846 free (lib_sections
);
3850 /* Build an array of overlay sections. The deepest node's section is
3851 added first, then its parent node's section, then everything called
3852 from the parent section. The idea being to group sections to
3853 minimise calls between different overlays. */
3856 collect_overlays (struct function_info
*fun
,
3857 struct bfd_link_info
*info
,
3860 struct call_info
*call
;
3861 bfd_boolean added_fun
;
3862 asection
***ovly_sections
= param
;
3868 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3869 if (!call
->is_pasted
&& !call
->broken_cycle
)
3871 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3877 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3879 fun
->sec
->gc_mark
= 0;
3880 *(*ovly_sections
)++ = fun
->sec
;
3881 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3883 fun
->rodata
->gc_mark
= 0;
3884 *(*ovly_sections
)++ = fun
->rodata
;
3887 *(*ovly_sections
)++ = NULL
;
3890 /* Pasted sections must stay with the first section. We don't
3891 put pasted sections in the array, just the first section.
3892 Mark subsequent sections as already considered. */
3893 if (fun
->sec
->segment_mark
)
3895 struct function_info
*call_fun
= fun
;
3898 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3899 if (call
->is_pasted
)
3901 call_fun
= call
->fun
;
3902 call_fun
->sec
->gc_mark
= 0;
3903 if (call_fun
->rodata
)
3904 call_fun
->rodata
->gc_mark
= 0;
3910 while (call_fun
->sec
->segment_mark
);
3914 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3915 if (!call
->broken_cycle
3916 && !collect_overlays (call
->fun
, info
, ovly_sections
))
3921 struct _spu_elf_section_data
*sec_data
;
3922 struct spu_elf_stack_info
*sinfo
;
3924 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3925 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3928 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3929 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3937 struct _sum_stack_param
{
3939 size_t overall_stack
;
3940 bfd_boolean emit_stack_syms
;
3943 /* Descend the call graph for FUN, accumulating total stack required. */
3946 sum_stack (struct function_info
*fun
,
3947 struct bfd_link_info
*info
,
3950 struct call_info
*call
;
3951 struct function_info
*max
;
3952 size_t stack
, cum_stack
;
3954 bfd_boolean has_call
;
3955 struct _sum_stack_param
*sum_stack_param
= param
;
3956 struct spu_link_hash_table
*htab
;
3958 cum_stack
= fun
->stack
;
3959 sum_stack_param
->cum_stack
= cum_stack
;
3965 for (call
= fun
->call_list
; call
; call
= call
->next
)
3967 if (call
->broken_cycle
)
3969 if (!call
->is_pasted
)
3971 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3973 stack
= sum_stack_param
->cum_stack
;
3974 /* Include caller stack for normal calls, don't do so for
3975 tail calls. fun->stack here is local stack usage for
3977 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3978 stack
+= fun
->stack
;
3979 if (cum_stack
< stack
)
3986 sum_stack_param
->cum_stack
= cum_stack
;
3988 /* Now fun->stack holds cumulative stack. */
3989 fun
->stack
= cum_stack
;
3993 && sum_stack_param
->overall_stack
< cum_stack
)
3994 sum_stack_param
->overall_stack
= cum_stack
;
3996 htab
= spu_hash_table (info
);
3997 if (htab
->params
->auto_overlay
)
4000 f1
= func_name (fun
);
4001 if (htab
->params
->stack_analysis
)
4004 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
4005 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
4006 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
4010 info
->callbacks
->minfo (_(" calls:\n"));
4011 for (call
= fun
->call_list
; call
; call
= call
->next
)
4012 if (!call
->is_pasted
&& !call
->broken_cycle
)
4014 const char *f2
= func_name (call
->fun
);
4015 const char *ann1
= call
->fun
== max
? "*" : " ";
4016 const char *ann2
= call
->is_tail
? "t" : " ";
4018 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
4023 if (sum_stack_param
->emit_stack_syms
)
4025 char *name
= bfd_malloc (18 + strlen (f1
));
4026 struct elf_link_hash_entry
*h
;
4031 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
4032 sprintf (name
, "__stack_%s", f1
);
4034 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
4036 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
4039 && (h
->root
.type
== bfd_link_hash_new
4040 || h
->root
.type
== bfd_link_hash_undefined
4041 || h
->root
.type
== bfd_link_hash_undefweak
))
4043 h
->root
.type
= bfd_link_hash_defined
;
4044 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
4045 h
->root
.u
.def
.value
= cum_stack
;
4050 h
->ref_regular_nonweak
= 1;
4051 h
->forced_local
= 1;
4059 /* SEC is part of a pasted function. Return the call_info for the
4060 next section of this function. */
4062 static struct call_info
*
4063 find_pasted_call (asection
*sec
)
4065 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
4066 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
4067 struct call_info
*call
;
4070 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4071 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
4072 if (call
->is_pasted
)
4078 /* qsort predicate to sort bfds by file name. */
4081 sort_bfds (const void *a
, const void *b
)
4083 bfd
*const *abfd1
= a
;
4084 bfd
*const *abfd2
= b
;
4086 return filename_cmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
4090 print_one_overlay_section (FILE *script
,
4093 unsigned int ovlynum
,
4094 unsigned int *ovly_map
,
4095 asection
**ovly_sections
,
4096 struct bfd_link_info
*info
)
4100 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4102 asection
*sec
= ovly_sections
[2 * j
];
4104 if (fprintf (script
, " %s%c%s (%s)\n",
4105 (sec
->owner
->my_archive
!= NULL
4106 ? sec
->owner
->my_archive
->filename
: ""),
4107 info
->path_separator
,
4108 sec
->owner
->filename
,
4111 if (sec
->segment_mark
)
4113 struct call_info
*call
= find_pasted_call (sec
);
4114 while (call
!= NULL
)
4116 struct function_info
*call_fun
= call
->fun
;
4117 sec
= call_fun
->sec
;
4118 if (fprintf (script
, " %s%c%s (%s)\n",
4119 (sec
->owner
->my_archive
!= NULL
4120 ? sec
->owner
->my_archive
->filename
: ""),
4121 info
->path_separator
,
4122 sec
->owner
->filename
,
4125 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4126 if (call
->is_pasted
)
4132 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4134 asection
*sec
= ovly_sections
[2 * j
+ 1];
4136 && fprintf (script
, " %s%c%s (%s)\n",
4137 (sec
->owner
->my_archive
!= NULL
4138 ? sec
->owner
->my_archive
->filename
: ""),
4139 info
->path_separator
,
4140 sec
->owner
->filename
,
4144 sec
= ovly_sections
[2 * j
];
4145 if (sec
->segment_mark
)
4147 struct call_info
*call
= find_pasted_call (sec
);
4148 while (call
!= NULL
)
4150 struct function_info
*call_fun
= call
->fun
;
4151 sec
= call_fun
->rodata
;
4153 && fprintf (script
, " %s%c%s (%s)\n",
4154 (sec
->owner
->my_archive
!= NULL
4155 ? sec
->owner
->my_archive
->filename
: ""),
4156 info
->path_separator
,
4157 sec
->owner
->filename
,
4160 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4161 if (call
->is_pasted
)
4170 /* Handle --auto-overlay. */
4173 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4177 struct elf_segment_map
*m
;
4178 unsigned int fixed_size
, lo
, hi
;
4179 unsigned int reserved
;
4180 struct spu_link_hash_table
*htab
;
4181 unsigned int base
, i
, count
, bfd_count
;
4182 unsigned int region
, ovlynum
;
4183 asection
**ovly_sections
, **ovly_p
;
4184 unsigned int *ovly_map
;
4186 unsigned int total_overlay_size
, overlay_size
;
4187 const char *ovly_mgr_entry
;
4188 struct elf_link_hash_entry
*h
;
4189 struct _mos_param mos_param
;
4190 struct _uos_param uos_param
;
4191 struct function_info dummy_caller
;
4193 /* Find the extents of our loadable image. */
4194 lo
= (unsigned int) -1;
4196 for (m
= elf_seg_map (info
->output_bfd
); m
!= NULL
; m
= m
->next
)
4197 if (m
->p_type
== PT_LOAD
)
4198 for (i
= 0; i
< m
->count
; i
++)
4199 if (m
->sections
[i
]->size
!= 0)
4201 if (m
->sections
[i
]->vma
< lo
)
4202 lo
= m
->sections
[i
]->vma
;
4203 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4204 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4206 fixed_size
= hi
+ 1 - lo
;
4208 if (!discover_functions (info
))
4211 if (!build_call_tree (info
))
4214 htab
= spu_hash_table (info
);
4215 reserved
= htab
->params
->auto_overlay_reserved
;
4218 struct _sum_stack_param sum_stack_param
;
4220 sum_stack_param
.emit_stack_syms
= 0;
4221 sum_stack_param
.overall_stack
= 0;
4222 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4224 reserved
= (sum_stack_param
.overall_stack
4225 + htab
->params
->extra_stack_space
);
4228 /* No need for overlays if everything already fits. */
4229 if (fixed_size
+ reserved
<= htab
->local_store
4230 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4232 htab
->params
->auto_overlay
= 0;
4236 uos_param
.exclude_input_section
= 0;
4237 uos_param
.exclude_output_section
4238 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4240 ovly_mgr_entry
= "__ovly_load";
4241 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4242 ovly_mgr_entry
= "__icache_br_handler";
4243 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4244 FALSE
, FALSE
, FALSE
);
4246 && (h
->root
.type
== bfd_link_hash_defined
4247 || h
->root
.type
== bfd_link_hash_defweak
)
4250 /* We have a user supplied overlay manager. */
4251 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4255 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4256 builtin version to .text, and will adjust .text size. */
4257 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4260 /* Mark overlay sections, and find max overlay section size. */
4261 mos_param
.max_overlay_size
= 0;
4262 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4265 /* We can't put the overlay manager or interrupt routines in
4267 uos_param
.clearing
= 0;
4268 if ((uos_param
.exclude_input_section
4269 || uos_param
.exclude_output_section
)
4270 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4274 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4276 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4277 if (bfd_arr
== NULL
)
4280 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4283 total_overlay_size
= 0;
4284 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
4286 extern const bfd_target spu_elf32_vec
;
4288 unsigned int old_count
;
4290 if (ibfd
->xvec
!= &spu_elf32_vec
)
4294 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4295 if (sec
->linker_mark
)
4297 if ((sec
->flags
& SEC_CODE
) != 0)
4299 fixed_size
-= sec
->size
;
4300 total_overlay_size
+= sec
->size
;
4302 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4303 && sec
->output_section
->owner
== info
->output_bfd
4304 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4305 fixed_size
-= sec
->size
;
4306 if (count
!= old_count
)
4307 bfd_arr
[bfd_count
++] = ibfd
;
4310 /* Since the overlay link script selects sections by file name and
4311 section name, ensure that file names are unique. */
4314 bfd_boolean ok
= TRUE
;
4316 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4317 for (i
= 1; i
< bfd_count
; ++i
)
4318 if (filename_cmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4320 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4322 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4323 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4324 bfd_arr
[i
]->filename
,
4325 bfd_arr
[i
]->my_archive
->filename
);
4327 info
->callbacks
->einfo (_("%s duplicated\n"),
4328 bfd_arr
[i
]->filename
);
4334 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4335 "object files in auto-overlay script\n"));
4336 bfd_set_error (bfd_error_bad_value
);
4342 fixed_size
+= reserved
;
4343 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4344 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4346 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4348 /* Stubs in the non-icache area are bigger. */
4349 fixed_size
+= htab
->non_ovly_stub
* 16;
4350 /* Space for icache manager tables.
4351 a) Tag array, one quadword per cache line.
4352 - word 0: ia address of present line, init to zero. */
4353 fixed_size
+= 16 << htab
->num_lines_log2
;
4354 /* b) Rewrite "to" list, one quadword per cache line. */
4355 fixed_size
+= 16 << htab
->num_lines_log2
;
4356 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4357 to a power-of-two number of full quadwords) per cache line. */
4358 fixed_size
+= 16 << (htab
->fromelem_size_log2
4359 + htab
->num_lines_log2
);
4360 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4365 /* Guess number of overlays. Assuming overlay buffer is on
4366 average only half full should be conservative. */
4367 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4368 / (htab
->local_store
- fixed_size
));
4369 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4370 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4374 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4375 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4376 "size of 0x%v exceeds local store\n"),
4377 (bfd_vma
) fixed_size
,
4378 (bfd_vma
) mos_param
.max_overlay_size
);
4380 /* Now see if we should put some functions in the non-overlay area. */
4381 else if (fixed_size
< htab
->params
->auto_overlay_fixed
)
4383 unsigned int max_fixed
, lib_size
;
4385 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4386 if (max_fixed
> htab
->params
->auto_overlay_fixed
)
4387 max_fixed
= htab
->params
->auto_overlay_fixed
;
4388 lib_size
= max_fixed
- fixed_size
;
4389 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4390 if (lib_size
== (unsigned int) -1)
4392 fixed_size
= max_fixed
- lib_size
;
4395 /* Build an array of sections, suitably sorted to place into
4397 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4398 if (ovly_sections
== NULL
)
4400 ovly_p
= ovly_sections
;
4401 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4403 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4404 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4405 if (ovly_map
== NULL
)
4408 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4409 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4410 if (htab
->params
->line_size
!= 0)
4411 overlay_size
= htab
->params
->line_size
;
4414 while (base
< count
)
4416 unsigned int size
= 0, rosize
= 0, roalign
= 0;
4418 for (i
= base
; i
< count
; i
++)
4420 asection
*sec
, *rosec
;
4421 unsigned int tmp
, rotmp
;
4422 unsigned int num_stubs
;
4423 struct call_info
*call
, *pasty
;
4424 struct _spu_elf_section_data
*sec_data
;
4425 struct spu_elf_stack_info
*sinfo
;
4428 /* See whether we can add this section to the current
4429 overlay without overflowing our overlay buffer. */
4430 sec
= ovly_sections
[2 * i
];
4431 tmp
= align_power (size
, sec
->alignment_power
) + sec
->size
;
4433 rosec
= ovly_sections
[2 * i
+ 1];
4436 rotmp
= align_power (rotmp
, rosec
->alignment_power
) + rosec
->size
;
4437 if (roalign
< rosec
->alignment_power
)
4438 roalign
= rosec
->alignment_power
;
4440 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4442 if (sec
->segment_mark
)
4444 /* Pasted sections must stay together, so add their
4446 pasty
= find_pasted_call (sec
);
4447 while (pasty
!= NULL
)
4449 struct function_info
*call_fun
= pasty
->fun
;
4450 tmp
= (align_power (tmp
, call_fun
->sec
->alignment_power
)
4451 + call_fun
->sec
->size
);
4452 if (call_fun
->rodata
)
4454 rotmp
= (align_power (rotmp
,
4455 call_fun
->rodata
->alignment_power
)
4456 + call_fun
->rodata
->size
);
4457 if (roalign
< rosec
->alignment_power
)
4458 roalign
= rosec
->alignment_power
;
4460 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4461 if (pasty
->is_pasted
)
4465 if (align_power (tmp
, roalign
) + rotmp
> overlay_size
)
4468 /* If we add this section, we might need new overlay call
4469 stubs. Add any overlay section calls to dummy_call. */
4471 sec_data
= spu_elf_section_data (sec
);
4472 sinfo
= sec_data
->u
.i
.stack_info
;
4473 for (k
= 0; k
< (unsigned) sinfo
->num_fun
; ++k
)
4474 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4475 if (call
->is_pasted
)
4477 BFD_ASSERT (pasty
== NULL
);
4480 else if (call
->fun
->sec
->linker_mark
)
4482 if (!copy_callee (&dummy_caller
, call
))
4485 while (pasty
!= NULL
)
4487 struct function_info
*call_fun
= pasty
->fun
;
4489 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4490 if (call
->is_pasted
)
4492 BFD_ASSERT (pasty
== NULL
);
4495 else if (!copy_callee (&dummy_caller
, call
))
4499 /* Calculate call stub size. */
4501 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4503 unsigned int stub_delta
= 1;
4505 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4506 stub_delta
= call
->count
;
4507 num_stubs
+= stub_delta
;
4509 /* If the call is within this overlay, we won't need a
4511 for (k
= base
; k
< i
+ 1; k
++)
4512 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4514 num_stubs
-= stub_delta
;
4518 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4519 && num_stubs
> htab
->params
->max_branch
)
4521 if (align_power (tmp
, roalign
) + rotmp
4522 + num_stubs
* ovl_stub_size (htab
->params
) > overlay_size
)
4530 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4531 ovly_sections
[2 * i
]->owner
,
4532 ovly_sections
[2 * i
],
4533 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4534 bfd_set_error (bfd_error_bad_value
);
4538 while (dummy_caller
.call_list
!= NULL
)
4540 struct call_info
*call
= dummy_caller
.call_list
;
4541 dummy_caller
.call_list
= call
->next
;
4547 ovly_map
[base
++] = ovlynum
;
4550 script
= htab
->params
->spu_elf_open_overlay_script ();
4552 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4554 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4557 if (fprintf (script
,
4558 " . = ALIGN (%u);\n"
4559 " .ovl.init : { *(.ovl.init) }\n"
4560 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4561 htab
->params
->line_size
) <= 0)
4566 while (base
< count
)
4568 unsigned int indx
= ovlynum
- 1;
4569 unsigned int vma
, lma
;
4571 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4572 lma
= vma
+ (((indx
>> htab
->num_lines_log2
) + 1) << 18);
4574 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4575 ": AT (LOADADDR (.ovl.init) + %u) {\n",
4576 ovlynum
, vma
, lma
) <= 0)
4579 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4580 ovly_map
, ovly_sections
, info
);
4581 if (base
== (unsigned) -1)
4584 if (fprintf (script
, " }\n") <= 0)
4590 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4591 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4594 if (fprintf (script
, "}\nINSERT AFTER .toe;\n") <= 0)
4599 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4602 if (fprintf (script
,
4603 " . = ALIGN (16);\n"
4604 " .ovl.init : { *(.ovl.init) }\n"
4605 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4608 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4612 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4620 /* We need to set lma since we are overlaying .ovl.init. */
4621 if (fprintf (script
,
4622 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4627 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4631 while (base
< count
)
4633 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4636 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4637 ovly_map
, ovly_sections
, info
);
4638 if (base
== (unsigned) -1)
4641 if (fprintf (script
, " }\n") <= 0)
4644 ovlynum
+= htab
->params
->num_lines
;
4645 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4649 if (fprintf (script
, " }\n") <= 0)
4653 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4658 free (ovly_sections
);
4660 if (fclose (script
) != 0)
4663 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4664 (*htab
->params
->spu_elf_relink
) ();
4669 bfd_set_error (bfd_error_system_call
);
4671 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4675 /* Provide an estimate of total stack required. */
4678 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4680 struct spu_link_hash_table
*htab
;
4681 struct _sum_stack_param sum_stack_param
;
4683 if (!discover_functions (info
))
4686 if (!build_call_tree (info
))
4689 htab
= spu_hash_table (info
);
4690 if (htab
->params
->stack_analysis
)
4692 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4693 info
->callbacks
->minfo (_("\nStack size for functions. "
4694 "Annotations: '*' max stack, 't' tail call\n"));
4697 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4698 sum_stack_param
.overall_stack
= 0;
4699 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4702 if (htab
->params
->stack_analysis
)
4703 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4704 (bfd_vma
) sum_stack_param
.overall_stack
);
4708 /* Perform a final link. */
4711 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4713 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4715 if (htab
->params
->auto_overlay
)
4716 spu_elf_auto_overlay (info
);
4718 if ((htab
->params
->stack_analysis
4719 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4720 && htab
->params
->lrlive_analysis
))
4721 && !spu_elf_stack_analysis (info
))
4722 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4724 if (!spu_elf_build_stubs (info
))
4725 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4727 return bfd_elf_final_link (output_bfd
, info
);
4730 /* Called when not normally emitting relocs, ie. !info->relocatable
4731 and !info->emitrelocations. Returns a count of special relocs
4732 that need to be emitted. */
4735 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4737 Elf_Internal_Rela
*relocs
;
4738 unsigned int count
= 0;
4740 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4744 Elf_Internal_Rela
*rel
;
4745 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4747 for (rel
= relocs
; rel
< relend
; rel
++)
4749 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4750 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4754 if (elf_section_data (sec
)->relocs
!= relocs
)
4761 /* Functions for adding fixup records to .fixup */
4763 #define FIXUP_RECORD_SIZE 4
4765 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4766 bfd_put_32 (output_bfd, addr, \
4767 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4768 #define FIXUP_GET(output_bfd,htab,index) \
4769 bfd_get_32 (output_bfd, \
4770 htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4772 /* Store OFFSET in .fixup. This assumes it will be called with an
4773 increasing OFFSET. When this OFFSET fits with the last base offset,
4774 it just sets a bit, otherwise it adds a new fixup record. */
4776 spu_elf_emit_fixup (bfd
* output_bfd
, struct bfd_link_info
*info
,
4779 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4780 asection
*sfixup
= htab
->sfixup
;
4781 bfd_vma qaddr
= offset
& ~(bfd_vma
) 15;
4782 bfd_vma bit
= ((bfd_vma
) 8) >> ((offset
& 15) >> 2);
4783 if (sfixup
->reloc_count
== 0)
4785 FIXUP_PUT (output_bfd
, htab
, 0, qaddr
| bit
);
4786 sfixup
->reloc_count
++;
4790 bfd_vma base
= FIXUP_GET (output_bfd
, htab
, sfixup
->reloc_count
- 1);
4791 if (qaddr
!= (base
& ~(bfd_vma
) 15))
4793 if ((sfixup
->reloc_count
+ 1) * FIXUP_RECORD_SIZE
> sfixup
->size
)
4794 (*_bfd_error_handler
) (_("fatal error while creating .fixup"));
4795 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
, qaddr
| bit
);
4796 sfixup
->reloc_count
++;
4799 FIXUP_PUT (output_bfd
, htab
, sfixup
->reloc_count
- 1, base
| bit
);
4803 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4806 spu_elf_relocate_section (bfd
*output_bfd
,
4807 struct bfd_link_info
*info
,
4809 asection
*input_section
,
4811 Elf_Internal_Rela
*relocs
,
4812 Elf_Internal_Sym
*local_syms
,
4813 asection
**local_sections
)
4815 Elf_Internal_Shdr
*symtab_hdr
;
4816 struct elf_link_hash_entry
**sym_hashes
;
4817 Elf_Internal_Rela
*rel
, *relend
;
4818 struct spu_link_hash_table
*htab
;
4821 bfd_boolean emit_these_relocs
= FALSE
;
4822 bfd_boolean is_ea_sym
;
4824 unsigned int iovl
= 0;
4826 htab
= spu_hash_table (info
);
4827 stubs
= (htab
->stub_sec
!= NULL
4828 && maybe_needs_stubs (input_section
));
4829 iovl
= overlay_index (input_section
);
4830 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4831 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4832 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4835 relend
= relocs
+ input_section
->reloc_count
;
4836 for (; rel
< relend
; rel
++)
4839 reloc_howto_type
*howto
;
4840 unsigned int r_symndx
;
4841 Elf_Internal_Sym
*sym
;
4843 struct elf_link_hash_entry
*h
;
4844 const char *sym_name
;
4847 bfd_reloc_status_type r
;
4848 bfd_boolean unresolved_reloc
;
4849 enum _stub_type stub_type
;
4851 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4852 r_type
= ELF32_R_TYPE (rel
->r_info
);
4853 howto
= elf_howto_table
+ r_type
;
4854 unresolved_reloc
= FALSE
;
4858 if (r_symndx
< symtab_hdr
->sh_info
)
4860 sym
= local_syms
+ r_symndx
;
4861 sec
= local_sections
[r_symndx
];
4862 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4863 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4867 if (sym_hashes
== NULL
)
4870 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4872 if (info
->wrap_hash
!= NULL
4873 && (input_section
->flags
& SEC_DEBUGGING
) != 0)
4874 h
= ((struct elf_link_hash_entry
*)
4875 unwrap_hash_lookup (info
, input_bfd
, &h
->root
));
4877 while (h
->root
.type
== bfd_link_hash_indirect
4878 || h
->root
.type
== bfd_link_hash_warning
)
4879 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4882 if (h
->root
.type
== bfd_link_hash_defined
4883 || h
->root
.type
== bfd_link_hash_defweak
)
4885 sec
= h
->root
.u
.def
.section
;
4887 || sec
->output_section
== NULL
)
4888 /* Set a flag that will be cleared later if we find a
4889 relocation value for this symbol. output_section
4890 is typically NULL for symbols satisfied by a shared
4892 unresolved_reloc
= TRUE
;
4894 relocation
= (h
->root
.u
.def
.value
4895 + sec
->output_section
->vma
4896 + sec
->output_offset
);
4898 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4900 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4901 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4903 else if (!info
->relocatable
4904 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4907 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4908 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4909 if (!info
->callbacks
->undefined_symbol (info
,
4910 h
->root
.root
.string
,
4913 rel
->r_offset
, err
))
4916 sym_name
= h
->root
.root
.string
;
4919 if (sec
!= NULL
&& discarded_section (sec
))
4920 RELOC_AGAINST_DISCARDED_SECTION (info
, input_bfd
, input_section
,
4921 rel
, 1, relend
, howto
, 0, contents
);
4923 if (info
->relocatable
)
4926 /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4927 if (r_type
== R_SPU_ADD_PIC
4929 && !(h
->def_regular
|| ELF_COMMON_DEF_P (h
)))
4931 bfd_byte
*loc
= contents
+ rel
->r_offset
;
4937 is_ea_sym
= (ea
!= NULL
4939 && sec
->output_section
== ea
);
4941 /* If this symbol is in an overlay area, we may need to relocate
4942 to the overlay stub. */
4943 addend
= rel
->r_addend
;
4946 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4947 contents
, info
)) != no_stub
)
4949 unsigned int ovl
= 0;
4950 struct got_entry
*g
, **head
;
4952 if (stub_type
!= nonovl_stub
)
4956 head
= &h
->got
.glist
;
4958 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4960 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4961 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4963 && g
->br_addr
== (rel
->r_offset
4964 + input_section
->output_offset
4965 + input_section
->output_section
->vma
))
4966 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4971 relocation
= g
->stub_addr
;
4976 /* For soft icache, encode the overlay index into addresses. */
4977 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4978 && (r_type
== R_SPU_ADDR16_HI
4979 || r_type
== R_SPU_ADDR32
|| r_type
== R_SPU_REL32
)
4982 unsigned int ovl
= overlay_index (sec
);
4985 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4986 relocation
+= set_id
<< 18;
4991 if (htab
->params
->emit_fixups
&& !info
->relocatable
4992 && (input_section
->flags
& SEC_ALLOC
) != 0
4993 && r_type
== R_SPU_ADDR32
)
4996 offset
= rel
->r_offset
+ input_section
->output_section
->vma
4997 + input_section
->output_offset
;
4998 spu_elf_emit_fixup (output_bfd
, info
, offset
);
5001 if (unresolved_reloc
)
5003 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5007 /* ._ea is a special section that isn't allocated in SPU
5008 memory, but rather occupies space in PPU memory as
5009 part of an embedded ELF image. If this reloc is
5010 against a symbol defined in ._ea, then transform the
5011 reloc into an equivalent one without a symbol
5012 relative to the start of the ELF image. */
5013 rel
->r_addend
+= (relocation
5015 + elf_section_data (ea
)->this_hdr
.sh_offset
);
5016 rel
->r_info
= ELF32_R_INFO (0, r_type
);
5018 emit_these_relocs
= TRUE
;
5022 unresolved_reloc
= TRUE
;
5024 if (unresolved_reloc
5025 && _bfd_elf_section_offset (output_bfd
, info
, input_section
,
5026 rel
->r_offset
) != (bfd_vma
) -1)
5028 (*_bfd_error_handler
)
5029 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5031 bfd_get_section_name (input_bfd
, input_section
),
5032 (long) rel
->r_offset
,
5038 r
= _bfd_final_link_relocate (howto
,
5042 rel
->r_offset
, relocation
, addend
);
5044 if (r
!= bfd_reloc_ok
)
5046 const char *msg
= (const char *) 0;
5050 case bfd_reloc_overflow
:
5051 if (!((*info
->callbacks
->reloc_overflow
)
5052 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
5053 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
5057 case bfd_reloc_undefined
:
5058 if (!((*info
->callbacks
->undefined_symbol
)
5059 (info
, sym_name
, input_bfd
, input_section
,
5060 rel
->r_offset
, TRUE
)))
5064 case bfd_reloc_outofrange
:
5065 msg
= _("internal error: out of range error");
5068 case bfd_reloc_notsupported
:
5069 msg
= _("internal error: unsupported relocation error");
5072 case bfd_reloc_dangerous
:
5073 msg
= _("internal error: dangerous error");
5077 msg
= _("internal error: unknown error");
5082 if (!((*info
->callbacks
->warning
)
5083 (info
, msg
, sym_name
, input_bfd
, input_section
,
5092 && emit_these_relocs
5093 && !info
->emitrelocations
)
5095 Elf_Internal_Rela
*wrel
;
5096 Elf_Internal_Shdr
*rel_hdr
;
5098 wrel
= rel
= relocs
;
5099 relend
= relocs
+ input_section
->reloc_count
;
5100 for (; rel
< relend
; rel
++)
5104 r_type
= ELF32_R_TYPE (rel
->r_info
);
5105 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
5108 input_section
->reloc_count
= wrel
- relocs
;
5109 /* Backflips for _bfd_elf_link_output_relocs. */
5110 rel_hdr
= _bfd_elf_single_rel_hdr (input_section
);
5111 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
5119 spu_elf_finish_dynamic_sections (bfd
*output_bfd ATTRIBUTE_UNUSED
,
5120 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
5125 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
5128 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
5129 const char *sym_name ATTRIBUTE_UNUSED
,
5130 Elf_Internal_Sym
*sym
,
5131 asection
*sym_sec ATTRIBUTE_UNUSED
,
5132 struct elf_link_hash_entry
*h
)
5134 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5136 if (!info
->relocatable
5137 && htab
->stub_sec
!= NULL
5139 && (h
->root
.type
== bfd_link_hash_defined
5140 || h
->root
.type
== bfd_link_hash_defweak
)
5142 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
5144 struct got_entry
*g
;
5146 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
5147 if (htab
->params
->ovly_flavour
== ovly_soft_icache
5148 ? g
->br_addr
== g
->stub_addr
5149 : g
->addend
== 0 && g
->ovl
== 0)
5151 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
5152 (htab
->stub_sec
[0]->output_section
->owner
,
5153 htab
->stub_sec
[0]->output_section
));
5154 sym
->st_value
= g
->stub_addr
;
5162 static int spu_plugin
= 0;
5165 spu_elf_plugin (int val
)
5170 /* Set ELF header e_type for plugins. */
5173 spu_elf_post_process_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5177 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
5179 i_ehdrp
->e_type
= ET_DYN
;
5182 _bfd_elf_post_process_headers (abfd
, info
);
5185 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5186 segments for overlays. */
5189 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5196 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5197 extra
= htab
->num_overlays
;
5203 sec
= bfd_get_section_by_name (abfd
, ".toe");
5204 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5210 /* Remove .toe section from other PT_LOAD segments and put it in
5211 a segment of its own. Put overlays in separate segments too. */
5214 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5217 struct elf_segment_map
*m
, *m_overlay
;
5218 struct elf_segment_map
**p
, **p_overlay
;
5224 toe
= bfd_get_section_by_name (abfd
, ".toe");
5225 for (m
= elf_seg_map (abfd
); m
!= NULL
; m
= m
->next
)
5226 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5227 for (i
= 0; i
< m
->count
; i
++)
5228 if ((s
= m
->sections
[i
]) == toe
5229 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5231 struct elf_segment_map
*m2
;
5234 if (i
+ 1 < m
->count
)
5236 amt
= sizeof (struct elf_segment_map
);
5237 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5238 m2
= bfd_zalloc (abfd
, amt
);
5241 m2
->count
= m
->count
- (i
+ 1);
5242 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5243 m2
->count
* sizeof (m
->sections
[0]));
5244 m2
->p_type
= PT_LOAD
;
5252 amt
= sizeof (struct elf_segment_map
);
5253 m2
= bfd_zalloc (abfd
, amt
);
5256 m2
->p_type
= PT_LOAD
;
5258 m2
->sections
[0] = s
;
5266 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5267 PT_LOAD segments. This can cause the .ovl.init section to be
5268 overwritten with the contents of some overlay segment. To work
5269 around this issue, we ensure that all PF_OVERLAY segments are
5270 sorted first amongst the program headers; this ensures that even
5271 with a broken loader, the .ovl.init section (which is not marked
5272 as PF_OVERLAY) will be placed into SPU local store on startup. */
5274 /* Move all overlay segments onto a separate list. */
5275 p
= &elf_seg_map (abfd
);
5276 p_overlay
= &m_overlay
;
5279 if ((*p
)->p_type
== PT_LOAD
&& (*p
)->count
== 1
5280 && spu_elf_section_data ((*p
)->sections
[0])->u
.o
.ovl_index
!= 0)
5285 p_overlay
= &m
->next
;
5292 /* Re-insert overlay segments at the head of the segment map. */
5293 *p_overlay
= elf_seg_map (abfd
);
5294 elf_seg_map (abfd
) = m_overlay
;
5299 /* Tweak the section type of .note.spu_name. */
5302 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5303 Elf_Internal_Shdr
*hdr
,
5306 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5307 hdr
->sh_type
= SHT_NOTE
;
5311 /* Tweak phdrs before writing them out. */
5314 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5316 const struct elf_backend_data
*bed
;
5317 struct elf_obj_tdata
*tdata
;
5318 Elf_Internal_Phdr
*phdr
, *last
;
5319 struct spu_link_hash_table
*htab
;
5326 bed
= get_elf_backend_data (abfd
);
5327 tdata
= elf_tdata (abfd
);
5329 count
= elf_program_header_size (abfd
) / bed
->s
->sizeof_phdr
;
5330 htab
= spu_hash_table (info
);
5331 if (htab
->num_overlays
!= 0)
5333 struct elf_segment_map
*m
;
5336 for (i
= 0, m
= elf_seg_map (abfd
); m
; ++i
, m
= m
->next
)
5338 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5340 /* Mark this as an overlay header. */
5341 phdr
[i
].p_flags
|= PF_OVERLAY
;
5343 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5344 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5346 bfd_byte
*p
= htab
->ovtab
->contents
;
5347 unsigned int off
= o
* 16 + 8;
5349 /* Write file_off into _ovly_table. */
5350 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5353 /* Soft-icache has its file offset put in .ovl.init. */
5354 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5356 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5358 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5362 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5363 of 16. This should always be possible when using the standard
5364 linker scripts, but don't create overlapping segments if
5365 someone is playing games with linker scripts. */
5367 for (i
= count
; i
-- != 0; )
5368 if (phdr
[i
].p_type
== PT_LOAD
)
5372 adjust
= -phdr
[i
].p_filesz
& 15;
5375 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5378 adjust
= -phdr
[i
].p_memsz
& 15;
5381 && phdr
[i
].p_filesz
!= 0
5382 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5383 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5386 if (phdr
[i
].p_filesz
!= 0)
5390 if (i
== (unsigned int) -1)
5391 for (i
= count
; i
-- != 0; )
5392 if (phdr
[i
].p_type
== PT_LOAD
)
5396 adjust
= -phdr
[i
].p_filesz
& 15;
5397 phdr
[i
].p_filesz
+= adjust
;
5399 adjust
= -phdr
[i
].p_memsz
& 15;
5400 phdr
[i
].p_memsz
+= adjust
;
5407 spu_elf_size_sections (bfd
* output_bfd
, struct bfd_link_info
*info
)
5409 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5410 if (htab
->params
->emit_fixups
)
5412 asection
*sfixup
= htab
->sfixup
;
5413 int fixup_count
= 0;
5417 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link
.next
)
5421 if (bfd_get_flavour (ibfd
) != bfd_target_elf_flavour
)
5424 /* Walk over each section attached to the input bfd. */
5425 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
5427 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
5430 /* If there aren't any relocs, then there's nothing more
5432 if ((isec
->flags
& SEC_ALLOC
) == 0
5433 || (isec
->flags
& SEC_RELOC
) == 0
5434 || isec
->reloc_count
== 0)
5437 /* Get the relocs. */
5439 _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
5441 if (internal_relocs
== NULL
)
5444 /* 1 quadword can contain up to 4 R_SPU_ADDR32
5445 relocations. They are stored in a single word by
5446 saving the upper 28 bits of the address and setting the
5447 lower 4 bits to a bit mask of the words that have the
5448 relocation. BASE_END keeps track of the next quadword. */
5449 irela
= internal_relocs
;
5450 irelaend
= irela
+ isec
->reloc_count
;
5452 for (; irela
< irelaend
; irela
++)
5453 if (ELF32_R_TYPE (irela
->r_info
) == R_SPU_ADDR32
5454 && irela
->r_offset
>= base_end
)
5456 base_end
= (irela
->r_offset
& ~(bfd_vma
) 15) + 16;
5462 /* We always have a NULL fixup as a sentinel */
5463 size
= (fixup_count
+ 1) * FIXUP_RECORD_SIZE
;
5464 if (!bfd_set_section_size (output_bfd
, sfixup
, size
))
5466 sfixup
->contents
= (bfd_byte
*) bfd_zalloc (info
->input_bfds
, size
);
5467 if (sfixup
->contents
== NULL
)
5473 #define TARGET_BIG_SYM spu_elf32_vec
5474 #define TARGET_BIG_NAME "elf32-spu"
5475 #define ELF_ARCH bfd_arch_spu
5476 #define ELF_TARGET_ID SPU_ELF_DATA
5477 #define ELF_MACHINE_CODE EM_SPU
5478 /* This matches the alignment need for DMA. */
5479 #define ELF_MAXPAGESIZE 0x80
5480 #define elf_backend_rela_normal 1
5481 #define elf_backend_can_gc_sections 1
5483 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5484 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5485 #define elf_info_to_howto spu_elf_info_to_howto
5486 #define elf_backend_count_relocs spu_elf_count_relocs
5487 #define elf_backend_relocate_section spu_elf_relocate_section
5488 #define elf_backend_finish_dynamic_sections spu_elf_finish_dynamic_sections
5489 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5490 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5491 #define elf_backend_object_p spu_elf_object_p
5492 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5493 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5495 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5496 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5497 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5498 #define elf_backend_post_process_headers spu_elf_post_process_headers
5499 #define elf_backend_fake_sections spu_elf_fake_sections
5500 #define elf_backend_special_sections spu_elf_special_sections
5501 #define bfd_elf32_bfd_final_link spu_elf_final_link
5503 #include "elf32-target.h"