1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 struct spu_elf_params
*params
;
302 /* Shortcuts to overlay sections. */
307 /* Count of stubs in each overlay section. */
308 unsigned int *stub_count
;
310 /* The stub section for each overlay section. */
313 struct elf_link_hash_entry
*ovly_load
;
314 struct elf_link_hash_entry
*ovly_return
;
315 unsigned long ovly_load_r_symndx
;
317 /* Number of overlay buffers. */
318 unsigned int num_buf
;
320 /* Total number of overlays. */
321 unsigned int num_overlays
;
323 /* How much memory we have. */
324 unsigned int local_store
;
325 /* Local store --auto-overlay should reserve for non-overlay
326 functions and data. */
327 unsigned int overlay_fixed
;
328 /* Local store --auto-overlay should reserve for stack and heap. */
329 unsigned int reserved
;
330 /* If reserved is not specified, stack analysis will calculate a value
331 for the stack. This parameter adjusts that value to allow for
332 negative sp access (the ABI says 2000 bytes below sp are valid,
333 and the overlay manager uses some of this area). */
334 int extra_stack_space
;
335 /* Count of overlay stubs needed in non-overlay area. */
336 unsigned int non_ovly_stub
;
339 unsigned int stub_err
: 1;
342 /* Hijack the generic got fields for overlay stub accounting. */
346 struct got_entry
*next
;
352 #define spu_hash_table(p) \
353 ((struct spu_link_hash_table *) ((p)->hash))
357 struct function_info
*fun
;
358 struct call_info
*next
;
360 unsigned int max_depth
;
361 unsigned int is_tail
: 1;
362 unsigned int is_pasted
: 1;
367 /* List of functions called. Also branches to hot/cold part of
369 struct call_info
*call_list
;
370 /* For hot/cold part of function, point to owner. */
371 struct function_info
*start
;
372 /* Symbol at start of function. */
374 Elf_Internal_Sym
*sym
;
375 struct elf_link_hash_entry
*h
;
377 /* Function section. */
380 /* Where last called from, and number of sections called from. */
381 asection
*last_caller
;
382 unsigned int call_count
;
383 /* Address range of (this part of) function. */
387 /* Distance from root of call tree. Tail and hot/cold branches
388 count as one deeper. We aren't counting stack frames here. */
390 /* Set if global symbol. */
391 unsigned int global
: 1;
392 /* Set if known to be start of function (as distinct from a hunk
393 in hot/cold section. */
394 unsigned int is_func
: 1;
395 /* Set if not a root node. */
396 unsigned int non_root
: 1;
397 /* Flags used during call tree traversal. It's cheaper to replicate
398 the visit flags than have one which needs clearing after a traversal. */
399 unsigned int visit1
: 1;
400 unsigned int visit2
: 1;
401 unsigned int marking
: 1;
402 unsigned int visit3
: 1;
403 unsigned int visit4
: 1;
404 unsigned int visit5
: 1;
405 unsigned int visit6
: 1;
406 unsigned int visit7
: 1;
409 struct spu_elf_stack_info
413 /* Variable size array describing functions, one per contiguous
414 address range belonging to a function. */
415 struct function_info fun
[1];
418 /* Create a spu ELF linker hash table. */
420 static struct bfd_link_hash_table
*
421 spu_elf_link_hash_table_create (bfd
*abfd
)
423 struct spu_link_hash_table
*htab
;
425 htab
= bfd_malloc (sizeof (*htab
));
429 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
430 _bfd_elf_link_hash_newfunc
,
431 sizeof (struct elf_link_hash_entry
)))
437 memset (&htab
->ovtab
, 0,
438 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
440 htab
->elf
.init_got_refcount
.refcount
= 0;
441 htab
->elf
.init_got_refcount
.glist
= NULL
;
442 htab
->elf
.init_got_offset
.offset
= 0;
443 htab
->elf
.init_got_offset
.glist
= NULL
;
444 return &htab
->elf
.root
;
448 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
450 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
451 htab
->params
= params
;
454 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
455 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
456 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
459 get_sym_h (struct elf_link_hash_entry
**hp
,
460 Elf_Internal_Sym
**symp
,
462 Elf_Internal_Sym
**locsymsp
,
463 unsigned long r_symndx
,
466 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
468 if (r_symndx
>= symtab_hdr
->sh_info
)
470 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
471 struct elf_link_hash_entry
*h
;
473 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
474 while (h
->root
.type
== bfd_link_hash_indirect
475 || h
->root
.type
== bfd_link_hash_warning
)
476 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
486 asection
*symsec
= NULL
;
487 if (h
->root
.type
== bfd_link_hash_defined
488 || h
->root
.type
== bfd_link_hash_defweak
)
489 symsec
= h
->root
.u
.def
.section
;
495 Elf_Internal_Sym
*sym
;
496 Elf_Internal_Sym
*locsyms
= *locsymsp
;
500 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
502 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
504 0, NULL
, NULL
, NULL
);
509 sym
= locsyms
+ r_symndx
;
518 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
524 /* Create the note section if not already present. This is done early so
525 that the linker maps the sections to the right place in the output. */
528 spu_elf_create_sections (struct bfd_link_info
*info
)
532 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
533 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
538 /* Make SPU_PTNOTE_SPUNAME section. */
545 ibfd
= info
->input_bfds
;
546 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
547 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
549 || !bfd_set_section_alignment (ibfd
, s
, 4))
552 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
553 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
554 size
+= (name_len
+ 3) & -4;
556 if (!bfd_set_section_size (ibfd
, s
, size
))
559 data
= bfd_zalloc (ibfd
, size
);
563 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
564 bfd_put_32 (ibfd
, name_len
, data
+ 4);
565 bfd_put_32 (ibfd
, 1, data
+ 8);
566 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
567 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
568 bfd_get_filename (info
->output_bfd
), name_len
);
575 /* qsort predicate to sort sections by vma. */
578 sort_sections (const void *a
, const void *b
)
580 const asection
*const *s1
= a
;
581 const asection
*const *s2
= b
;
582 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
585 return delta
< 0 ? -1 : 1;
587 return (*s1
)->index
- (*s2
)->index
;
590 /* Identify overlays in the output bfd, and number them. */
593 spu_elf_find_overlays (struct bfd_link_info
*info
)
595 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
596 asection
**alloc_sec
;
597 unsigned int i
, n
, ovl_index
, num_buf
;
601 if (info
->output_bfd
->section_count
< 2)
605 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
606 if (alloc_sec
== NULL
)
609 /* Pick out all the alloced sections. */
610 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
611 if ((s
->flags
& SEC_ALLOC
) != 0
612 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
622 /* Sort them by vma. */
623 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
625 /* Look for overlapping vmas. Any with overlap must be overlays.
626 Count them. Also count the number of overlay regions. */
627 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
628 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
631 if (s
->vma
< ovl_end
)
633 asection
*s0
= alloc_sec
[i
- 1];
635 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
637 alloc_sec
[ovl_index
] = s0
;
638 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
639 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
641 alloc_sec
[ovl_index
] = s
;
642 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
643 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
644 if (s0
->vma
!= s
->vma
)
646 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
647 "do not start at the same address.\n"),
651 if (ovl_end
< s
->vma
+ s
->size
)
652 ovl_end
= s
->vma
+ s
->size
;
655 ovl_end
= s
->vma
+ s
->size
;
658 htab
->num_overlays
= ovl_index
;
659 htab
->num_buf
= num_buf
;
660 htab
->ovl_sec
= alloc_sec
;
661 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
662 FALSE
, FALSE
, FALSE
);
663 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
664 FALSE
, FALSE
, FALSE
);
665 return ovl_index
!= 0;
668 #define BRSL 0x33000000
669 #define BR 0x32000000
670 #define NOP 0x40200000
671 #define LNOP 0x00200000
672 #define ILA 0x42000000
674 /* Return true for all relative and absolute branch instructions.
682 brhnz 00100011 0.. */
685 is_branch (const unsigned char *insn
)
687 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
690 /* Return true for all indirect branch instructions.
698 bihnz 00100101 011 */
701 is_indirect_branch (const unsigned char *insn
)
703 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
706 /* Return true for branch hint instructions.
711 is_hint (const unsigned char *insn
)
713 return (insn
[0] & 0xfc) == 0x10;
716 /* True if INPUT_SECTION might need overlay stubs. */
719 maybe_needs_stubs (asection
*input_section
)
721 /* No stubs for debug sections and suchlike. */
722 if ((input_section
->flags
& SEC_ALLOC
) == 0)
725 /* No stubs for link-once sections that will be discarded. */
726 if (input_section
->output_section
== bfd_abs_section_ptr
)
729 /* Don't create stubs for .eh_frame references. */
730 if (strcmp (input_section
->name
, ".eh_frame") == 0)
744 /* Return non-zero if this reloc symbol should go via an overlay stub.
745 Return 2 if the stub must be in non-overlay area. */
747 static enum _stub_type
748 needs_ovl_stub (struct elf_link_hash_entry
*h
,
749 Elf_Internal_Sym
*sym
,
751 asection
*input_section
,
752 Elf_Internal_Rela
*irela
,
754 struct bfd_link_info
*info
)
756 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
757 enum elf_spu_reloc_type r_type
;
758 unsigned int sym_type
;
760 enum _stub_type ret
= no_stub
;
763 || sym_sec
->output_section
== bfd_abs_section_ptr
764 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
769 /* Ensure no stubs for user supplied overlay manager syms. */
770 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
773 /* setjmp always goes via an overlay stub, because then the return
774 and hence the longjmp goes via __ovly_return. That magically
775 makes setjmp/longjmp between overlays work. */
776 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
777 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
781 /* Usually, symbols in non-overlay sections don't need stubs. */
782 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
783 && !htab
->params
->non_overlay_stubs
)
789 sym_type
= ELF_ST_TYPE (sym
->st_info
);
791 r_type
= ELF32_R_TYPE (irela
->r_info
);
793 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
797 if (contents
== NULL
)
800 if (!bfd_get_section_contents (input_section
->owner
,
807 contents
+= irela
->r_offset
;
809 if (is_branch (contents
) || is_hint (contents
))
812 if ((contents
[0] & 0xfd) == 0x31
813 && sym_type
!= STT_FUNC
816 /* It's common for people to write assembly and forget
817 to give function symbols the right type. Handle
818 calls to such symbols, but warn so that (hopefully)
819 people will fix their code. We need the symbol
820 type to be correct to distinguish function pointer
821 initialisation from other pointer initialisations. */
822 const char *sym_name
;
825 sym_name
= h
->root
.root
.string
;
828 Elf_Internal_Shdr
*symtab_hdr
;
829 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
830 sym_name
= bfd_elf_sym_name (input_section
->owner
,
835 (*_bfd_error_handler
) (_("warning: call to non-function"
836 " symbol %s defined in %B"),
837 sym_sec
->owner
, sym_name
);
843 if (sym_type
!= STT_FUNC
845 && (sym_sec
->flags
& SEC_CODE
) == 0)
848 /* A reference from some other section to a symbol in an overlay
849 section needs a stub. */
850 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
851 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
854 /* If this insn isn't a branch then we are possibly taking the
855 address of a function and passing it out somehow. */
856 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
860 count_stub (struct spu_link_hash_table
*htab
,
863 enum _stub_type stub_type
,
864 struct elf_link_hash_entry
*h
,
865 const Elf_Internal_Rela
*irela
)
867 unsigned int ovl
= 0;
868 struct got_entry
*g
, **head
;
871 /* If this instruction is a branch or call, we need a stub
872 for it. One stub per function per overlay.
873 If it isn't a branch, then we are taking the address of
874 this function so need a stub in the non-overlay area
875 for it. One stub per function. */
876 if (stub_type
!= nonovl_stub
)
877 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
880 head
= &h
->got
.glist
;
883 if (elf_local_got_ents (ibfd
) == NULL
)
885 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
886 * sizeof (*elf_local_got_ents (ibfd
)));
887 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
888 if (elf_local_got_ents (ibfd
) == NULL
)
891 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
896 addend
= irela
->r_addend
;
900 struct got_entry
*gnext
;
902 for (g
= *head
; g
!= NULL
; g
= g
->next
)
903 if (g
->addend
== addend
&& g
->ovl
== 0)
908 /* Need a new non-overlay area stub. Zap other stubs. */
909 for (g
= *head
; g
!= NULL
; g
= gnext
)
912 if (g
->addend
== addend
)
914 htab
->stub_count
[g
->ovl
] -= 1;
922 for (g
= *head
; g
!= NULL
; g
= g
->next
)
923 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
929 g
= bfd_malloc (sizeof *g
);
934 g
->stub_addr
= (bfd_vma
) -1;
938 htab
->stub_count
[ovl
] += 1;
944 /* Support two sizes of overlay stubs, a slower more compact stub of two
945 intructions, and a faster stub of four instructions. */
948 ovl_stub_size (enum _ovly_flavour ovly_flavour
)
950 return 8 << ovly_flavour
;
953 /* Two instruction overlay stubs look like:
956 .word target_ovl_and_address
958 ovl_and_address is a word with the overlay number in the top 14 bits
959 and local store address in the bottom 18 bits.
961 Four instruction overlay stubs look like:
965 ila $79,target_address
969 build_stub (struct spu_link_hash_table
*htab
,
972 enum _stub_type stub_type
,
973 struct elf_link_hash_entry
*h
,
974 const Elf_Internal_Rela
*irela
,
978 unsigned int ovl
, dest_ovl
;
979 struct got_entry
*g
, **head
;
981 bfd_vma addend
, from
, to
;
984 if (stub_type
!= nonovl_stub
)
985 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
988 head
= &h
->got
.glist
;
990 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
994 addend
= irela
->r_addend
;
996 for (g
= *head
; g
!= NULL
; g
= g
->next
)
997 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1002 if (g
->ovl
== 0 && ovl
!= 0)
1005 if (g
->stub_addr
!= (bfd_vma
) -1)
1008 sec
= htab
->stub_sec
[ovl
];
1009 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1010 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1011 g
->stub_addr
= from
;
1012 to
= (htab
->ovly_load
->root
.u
.def
.value
1013 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
1014 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
1016 if (((dest
| to
| from
) & 3) != 0)
1021 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1023 switch (htab
->params
->ovly_flavour
)
1026 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1027 sec
->contents
+ sec
->size
);
1028 bfd_put_32 (sec
->owner
, LNOP
,
1029 sec
->contents
+ sec
->size
+ 4);
1030 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1031 sec
->contents
+ sec
->size
+ 8);
1032 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1033 sec
->contents
+ sec
->size
+ 12);
1037 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1038 sec
->contents
+ sec
->size
);
1039 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1040 sec
->contents
+ sec
->size
+ 4);
1046 sec
->size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
1048 if (htab
->params
->emit_stub_syms
)
1054 len
= 8 + sizeof (".ovl_call.") - 1;
1056 len
+= strlen (h
->root
.root
.string
);
1061 add
= (int) irela
->r_addend
& 0xffffffff;
1064 name
= bfd_malloc (len
);
1068 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1070 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1072 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1073 dest_sec
->id
& 0xffffffff,
1074 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1076 sprintf (name
+ len
- 9, "+%x", add
);
1078 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1082 if (h
->root
.type
== bfd_link_hash_new
)
1084 h
->root
.type
= bfd_link_hash_defined
;
1085 h
->root
.u
.def
.section
= sec
;
1086 h
->size
= ovl_stub_size (htab
->params
->ovly_flavour
);
1087 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1091 h
->ref_regular_nonweak
= 1;
1092 h
->forced_local
= 1;
1100 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1104 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1106 /* Symbols starting with _SPUEAR_ need a stub because they may be
1107 invoked by the PPU. */
1108 struct bfd_link_info
*info
= inf
;
1109 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1112 if ((h
->root
.type
== bfd_link_hash_defined
1113 || h
->root
.type
== bfd_link_hash_defweak
)
1115 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1116 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1117 && sym_sec
->output_section
!= bfd_abs_section_ptr
1118 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1119 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1120 || htab
->params
->non_overlay_stubs
))
1122 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1129 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1131 /* Symbols starting with _SPUEAR_ need a stub because they may be
1132 invoked by the PPU. */
1133 struct bfd_link_info
*info
= inf
;
1134 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1137 if ((h
->root
.type
== bfd_link_hash_defined
1138 || h
->root
.type
== bfd_link_hash_defweak
)
1140 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1141 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1142 && sym_sec
->output_section
!= bfd_abs_section_ptr
1143 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1144 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1145 || htab
->params
->non_overlay_stubs
))
1147 return build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1148 h
->root
.u
.def
.value
, sym_sec
);
1154 /* Size or build stubs. */
1157 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1159 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1162 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1164 extern const bfd_target bfd_elf32_spu_vec
;
1165 Elf_Internal_Shdr
*symtab_hdr
;
1167 Elf_Internal_Sym
*local_syms
= NULL
;
1169 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1172 /* We'll need the symbol table in a second. */
1173 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1174 if (symtab_hdr
->sh_info
== 0)
1177 /* Walk over each section attached to the input bfd. */
1178 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1180 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1182 /* If there aren't any relocs, then there's nothing more to do. */
1183 if ((isec
->flags
& SEC_RELOC
) == 0
1184 || isec
->reloc_count
== 0)
1187 if (!maybe_needs_stubs (isec
))
1190 /* Get the relocs. */
1191 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1193 if (internal_relocs
== NULL
)
1194 goto error_ret_free_local
;
1196 /* Now examine each relocation. */
1197 irela
= internal_relocs
;
1198 irelaend
= irela
+ isec
->reloc_count
;
1199 for (; irela
< irelaend
; irela
++)
1201 enum elf_spu_reloc_type r_type
;
1202 unsigned int r_indx
;
1204 Elf_Internal_Sym
*sym
;
1205 struct elf_link_hash_entry
*h
;
1206 enum _stub_type stub_type
;
1208 r_type
= ELF32_R_TYPE (irela
->r_info
);
1209 r_indx
= ELF32_R_SYM (irela
->r_info
);
1211 if (r_type
>= R_SPU_max
)
1213 bfd_set_error (bfd_error_bad_value
);
1214 error_ret_free_internal
:
1215 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1216 free (internal_relocs
);
1217 error_ret_free_local
:
1218 if (local_syms
!= NULL
1219 && (symtab_hdr
->contents
1220 != (unsigned char *) local_syms
))
1225 /* Determine the reloc target section. */
1226 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1227 goto error_ret_free_internal
;
1229 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1231 if (stub_type
== no_stub
)
1233 else if (stub_type
== stub_error
)
1234 goto error_ret_free_internal
;
1236 if (htab
->stub_count
== NULL
)
1239 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1240 htab
->stub_count
= bfd_zmalloc (amt
);
1241 if (htab
->stub_count
== NULL
)
1242 goto error_ret_free_internal
;
1247 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1248 goto error_ret_free_internal
;
1255 dest
= h
->root
.u
.def
.value
;
1257 dest
= sym
->st_value
;
1258 dest
+= irela
->r_addend
;
1259 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1261 goto error_ret_free_internal
;
1265 /* We're done with the internal relocs, free them. */
1266 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1267 free (internal_relocs
);
1270 if (local_syms
!= NULL
1271 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1273 if (!info
->keep_memory
)
1276 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1283 /* Allocate space for overlay call and return stubs. */
1286 spu_elf_size_stubs (struct bfd_link_info
*info
)
1288 struct spu_link_hash_table
*htab
;
1295 if (!process_stubs (info
, FALSE
))
1298 htab
= spu_hash_table (info
);
1299 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1303 if (htab
->stub_count
== NULL
)
1306 ibfd
= info
->input_bfds
;
1307 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1308 htab
->stub_sec
= bfd_zmalloc (amt
);
1309 if (htab
->stub_sec
== NULL
)
1312 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1313 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1314 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1315 htab
->stub_sec
[0] = stub
;
1317 || !bfd_set_section_alignment (ibfd
, stub
,
1318 htab
->params
->ovly_flavour
+ 3))
1320 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
->ovly_flavour
);
1321 (*htab
->params
->place_spu_section
) (stub
, NULL
, ".text");
1323 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1325 asection
*osec
= htab
->ovl_sec
[i
];
1326 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1327 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1328 htab
->stub_sec
[ovl
] = stub
;
1330 || !bfd_set_section_alignment (ibfd
, stub
,
1331 htab
->params
->ovly_flavour
+ 3))
1333 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
->ovly_flavour
);
1334 (*htab
->params
->place_spu_section
) (stub
, osec
, NULL
);
1337 /* htab->ovtab consists of two arrays.
1347 . } _ovly_buf_table[];
1350 flags
= (SEC_ALLOC
| SEC_LOAD
1351 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1352 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1353 if (htab
->ovtab
== NULL
1354 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1357 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1358 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1360 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1361 if (htab
->toe
== NULL
1362 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1364 htab
->toe
->size
= 16;
1365 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1370 /* Functions to handle embedded spu_ovl.o object. */
1373 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1379 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1385 struct _ovl_stream
*os
;
1389 os
= (struct _ovl_stream
*) stream
;
1390 max
= (const char *) os
->end
- (const char *) os
->start
;
1392 if ((ufile_ptr
) offset
>= max
)
1396 if (count
> max
- offset
)
1397 count
= max
- offset
;
1399 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1404 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1406 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1413 return *ovl_bfd
!= NULL
;
1416 /* Define an STT_OBJECT symbol. */
1418 static struct elf_link_hash_entry
*
1419 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1421 struct elf_link_hash_entry
*h
;
1423 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1427 if (h
->root
.type
!= bfd_link_hash_defined
1430 h
->root
.type
= bfd_link_hash_defined
;
1431 h
->root
.u
.def
.section
= htab
->ovtab
;
1432 h
->type
= STT_OBJECT
;
1435 h
->ref_regular_nonweak
= 1;
1438 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1440 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1441 h
->root
.u
.def
.section
->owner
,
1442 h
->root
.root
.string
);
1443 bfd_set_error (bfd_error_bad_value
);
1448 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1449 h
->root
.root
.string
);
1450 bfd_set_error (bfd_error_bad_value
);
1457 /* Fill in all stubs and the overlay tables. */
1460 spu_elf_build_stubs (struct bfd_link_info
*info
)
1462 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1463 struct elf_link_hash_entry
*h
;
1469 if (htab
->stub_count
== NULL
)
1472 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1473 if (htab
->stub_sec
[i
]->size
!= 0)
1475 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1476 htab
->stub_sec
[i
]->size
);
1477 if (htab
->stub_sec
[i
]->contents
== NULL
)
1479 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1480 htab
->stub_sec
[i
]->size
= 0;
1483 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1484 htab
->ovly_load
= h
;
1485 BFD_ASSERT (h
!= NULL
1486 && (h
->root
.type
== bfd_link_hash_defined
1487 || h
->root
.type
== bfd_link_hash_defweak
)
1490 s
= h
->root
.u
.def
.section
->output_section
;
1491 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1493 (*_bfd_error_handler
) (_("%s in overlay section"),
1494 h
->root
.root
.string
);
1495 bfd_set_error (bfd_error_bad_value
);
1499 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1500 htab
->ovly_return
= h
;
1502 /* Fill in all the stubs. */
1503 process_stubs (info
, TRUE
);
1504 if (!htab
->stub_err
)
1505 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1509 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1510 bfd_set_error (bfd_error_bad_value
);
1514 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1516 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1518 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1519 bfd_set_error (bfd_error_bad_value
);
1522 htab
->stub_sec
[i
]->rawsize
= 0;
1525 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1526 if (htab
->ovtab
->contents
== NULL
)
1529 /* Write out _ovly_table. */
1530 p
= htab
->ovtab
->contents
;
1531 /* set low bit of .size to mark non-overlay area as present. */
1533 obfd
= htab
->ovtab
->output_section
->owner
;
1534 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1536 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1540 unsigned long off
= ovl_index
* 16;
1541 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1543 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1544 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1545 /* file_off written later in spu_elf_modify_program_headers. */
1546 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1550 h
= define_ovtab_symbol (htab
, "_ovly_table");
1553 h
->root
.u
.def
.value
= 16;
1554 h
->size
= htab
->num_overlays
* 16;
1556 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1559 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1562 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1565 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1566 h
->size
= htab
->num_buf
* 4;
1568 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1571 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1574 h
= define_ovtab_symbol (htab
, "_EAR_");
1577 h
->root
.u
.def
.section
= htab
->toe
;
1578 h
->root
.u
.def
.value
= 0;
1584 /* Check that all loadable section VMAs lie in the range
1585 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1588 spu_elf_check_vma (struct bfd_link_info
*info
)
1590 struct elf_segment_map
*m
;
1592 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1593 bfd
*abfd
= info
->output_bfd
;
1594 bfd_vma hi
= htab
->params
->local_store_hi
;
1595 bfd_vma lo
= htab
->params
->local_store_lo
;
1597 htab
->local_store
= hi
+ 1 - lo
;
1599 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1600 if (m
->p_type
== PT_LOAD
)
1601 for (i
= 0; i
< m
->count
; i
++)
1602 if (m
->sections
[i
]->size
!= 0
1603 && (m
->sections
[i
]->vma
< lo
1604 || m
->sections
[i
]->vma
> hi
1605 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1606 return m
->sections
[i
];
1608 /* No need for overlays if it all fits. */
1609 htab
->params
->auto_overlay
= 0;
1613 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1614 Search for stack adjusting insns, and return the sp delta. */
1617 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1621 memset (reg
, 0, sizeof (reg
));
1622 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
1624 unsigned char buf
[4];
1628 /* Assume no relocs on stack adjusing insns. */
1629 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1632 if (buf
[0] == 0x24 /* stqd */)
1636 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1637 /* Partly decoded immediate field. */
1638 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1640 if (buf
[0] == 0x1c /* ai */)
1643 imm
= (imm
^ 0x200) - 0x200;
1644 reg
[rt
] = reg
[ra
] + imm
;
1646 if (rt
== 1 /* sp */)
1653 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1655 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1657 reg
[rt
] = reg
[ra
] + reg
[rb
];
1665 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1667 if (buf
[0] >= 0x42 /* ila */)
1668 imm
|= (buf
[0] & 1) << 17;
1673 if (buf
[0] == 0x40 /* il */)
1675 if ((buf
[1] & 0x80) == 0)
1677 imm
= (imm
^ 0x8000) - 0x8000;
1679 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1685 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1687 reg
[rt
] |= imm
& 0xffff;
1690 else if (buf
[0] == 0x04 /* ori */)
1693 imm
= (imm
^ 0x200) - 0x200;
1694 reg
[rt
] = reg
[ra
] | imm
;
1697 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
1699 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
1700 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
1701 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
1702 | ((imm
& 0x1000) ? 0x000000ff : 0));
1705 else if (buf
[0] == 0x16 /* andbi */)
1711 reg
[rt
] = reg
[ra
] & imm
;
1714 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1716 /* Used in pic reg load. Say rt is trashed. Won't be used
1717 in stack adjust, but we need to continue past this branch. */
1721 else if (is_branch (buf
) || is_indirect_branch (buf
))
1722 /* If we hit a branch then we must be out of the prologue. */
1729 /* qsort predicate to sort symbols by section and value. */
1731 static Elf_Internal_Sym
*sort_syms_syms
;
1732 static asection
**sort_syms_psecs
;
1735 sort_syms (const void *a
, const void *b
)
1737 Elf_Internal_Sym
*const *s1
= a
;
1738 Elf_Internal_Sym
*const *s2
= b
;
1739 asection
*sec1
,*sec2
;
1740 bfd_signed_vma delta
;
1742 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1743 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1746 return sec1
->index
- sec2
->index
;
1748 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1750 return delta
< 0 ? -1 : 1;
1752 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1754 return delta
< 0 ? -1 : 1;
1756 return *s1
< *s2
? -1 : 1;
1759 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1760 entries for section SEC. */
1762 static struct spu_elf_stack_info
*
1763 alloc_stack_info (asection
*sec
, int max_fun
)
1765 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1768 amt
= sizeof (struct spu_elf_stack_info
);
1769 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1770 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1771 if (sec_data
->u
.i
.stack_info
!= NULL
)
1772 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1773 return sec_data
->u
.i
.stack_info
;
1776 /* Add a new struct function_info describing a (part of a) function
1777 starting at SYM_H. Keep the array sorted by address. */
1779 static struct function_info
*
1780 maybe_insert_function (asection
*sec
,
1783 bfd_boolean is_func
)
1785 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1786 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1792 sinfo
= alloc_stack_info (sec
, 20);
1799 Elf_Internal_Sym
*sym
= sym_h
;
1800 off
= sym
->st_value
;
1801 size
= sym
->st_size
;
1805 struct elf_link_hash_entry
*h
= sym_h
;
1806 off
= h
->root
.u
.def
.value
;
1810 for (i
= sinfo
->num_fun
; --i
>= 0; )
1811 if (sinfo
->fun
[i
].lo
<= off
)
1816 /* Don't add another entry for an alias, but do update some
1818 if (sinfo
->fun
[i
].lo
== off
)
1820 /* Prefer globals over local syms. */
1821 if (global
&& !sinfo
->fun
[i
].global
)
1823 sinfo
->fun
[i
].global
= TRUE
;
1824 sinfo
->fun
[i
].u
.h
= sym_h
;
1827 sinfo
->fun
[i
].is_func
= TRUE
;
1828 return &sinfo
->fun
[i
];
1830 /* Ignore a zero-size symbol inside an existing function. */
1831 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1832 return &sinfo
->fun
[i
];
1835 if (sinfo
->num_fun
>= sinfo
->max_fun
)
1837 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1838 bfd_size_type old
= amt
;
1840 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1841 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1842 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1843 sinfo
= bfd_realloc (sinfo
, amt
);
1846 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1847 sec_data
->u
.i
.stack_info
= sinfo
;
1850 if (++i
< sinfo
->num_fun
)
1851 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1852 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1853 sinfo
->fun
[i
].is_func
= is_func
;
1854 sinfo
->fun
[i
].global
= global
;
1855 sinfo
->fun
[i
].sec
= sec
;
1857 sinfo
->fun
[i
].u
.h
= sym_h
;
1859 sinfo
->fun
[i
].u
.sym
= sym_h
;
1860 sinfo
->fun
[i
].lo
= off
;
1861 sinfo
->fun
[i
].hi
= off
+ size
;
1862 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1863 sinfo
->num_fun
+= 1;
1864 return &sinfo
->fun
[i
];
1867 /* Return the name of FUN. */
1870 func_name (struct function_info
*fun
)
1874 Elf_Internal_Shdr
*symtab_hdr
;
1876 while (fun
->start
!= NULL
)
1880 return fun
->u
.h
->root
.root
.string
;
1883 if (fun
->u
.sym
->st_name
== 0)
1885 size_t len
= strlen (sec
->name
);
1886 char *name
= bfd_malloc (len
+ 10);
1889 sprintf (name
, "%s+%lx", sec
->name
,
1890 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1894 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1895 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1898 /* Read the instruction at OFF in SEC. Return true iff the instruction
1899 is a nop, lnop, or stop 0 (all zero insn). */
1902 is_nop (asection
*sec
, bfd_vma off
)
1904 unsigned char insn
[4];
1906 if (off
+ 4 > sec
->size
1907 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1909 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1911 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1916 /* Extend the range of FUN to cover nop padding up to LIMIT.
1917 Return TRUE iff some instruction other than a NOP was found. */
1920 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1922 bfd_vma off
= (fun
->hi
+ 3) & -4;
1924 while (off
< limit
&& is_nop (fun
->sec
, off
))
1935 /* Check and fix overlapping function ranges. Return TRUE iff there
1936 are gaps in the current info we have about functions in SEC. */
1939 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1941 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1942 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1944 bfd_boolean gaps
= FALSE
;
1949 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1950 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1952 /* Fix overlapping symbols. */
1953 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1954 const char *f2
= func_name (&sinfo
->fun
[i
]);
1956 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1957 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1959 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1962 if (sinfo
->num_fun
== 0)
1966 if (sinfo
->fun
[0].lo
!= 0)
1968 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1970 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1972 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1973 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1975 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
1981 /* Search current function info for a function that contains address
1982 OFFSET in section SEC. */
1984 static struct function_info
*
1985 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
1987 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1988 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1992 hi
= sinfo
->num_fun
;
1995 mid
= (lo
+ hi
) / 2;
1996 if (offset
< sinfo
->fun
[mid
].lo
)
1998 else if (offset
>= sinfo
->fun
[mid
].hi
)
2001 return &sinfo
->fun
[mid
];
2003 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2008 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2009 if CALLEE was new. If this function return FALSE, CALLEE should
2013 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2015 struct call_info
**pp
, *p
;
2017 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2018 if (p
->fun
== callee
->fun
)
2020 /* Tail calls use less stack than normal calls. Retain entry
2021 for normal call over one for tail call. */
2022 p
->is_tail
&= callee
->is_tail
;
2025 p
->fun
->start
= NULL
;
2026 p
->fun
->is_func
= TRUE
;
2029 /* Reorder list so most recent call is first. */
2031 p
->next
= caller
->call_list
;
2032 caller
->call_list
= p
;
2035 callee
->next
= caller
->call_list
;
2037 caller
->call_list
= callee
;
2041 /* Copy CALL and insert the copy into CALLER. */
2044 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2046 struct call_info
*callee
;
2047 callee
= bfd_malloc (sizeof (*callee
));
2051 if (!insert_callee (caller
, callee
))
2056 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2057 overlay stub sections. */
2060 interesting_section (asection
*s
)
2062 return (s
->output_section
!= bfd_abs_section_ptr
2063 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2064 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2068 /* Rummage through the relocs for SEC, looking for function calls.
2069 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2070 mark destination symbols on calls as being functions. Also
2071 look at branches, which may be tail calls or go to hot/cold
2072 section part of same function. */
2075 mark_functions_via_relocs (asection
*sec
,
2076 struct bfd_link_info
*info
,
2079 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2080 Elf_Internal_Shdr
*symtab_hdr
;
2082 static bfd_boolean warned
;
2084 if (!interesting_section (sec
)
2085 || sec
->reloc_count
== 0)
2088 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2090 if (internal_relocs
== NULL
)
2093 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2094 psyms
= &symtab_hdr
->contents
;
2095 irela
= internal_relocs
;
2096 irelaend
= irela
+ sec
->reloc_count
;
2097 for (; irela
< irelaend
; irela
++)
2099 enum elf_spu_reloc_type r_type
;
2100 unsigned int r_indx
;
2102 Elf_Internal_Sym
*sym
;
2103 struct elf_link_hash_entry
*h
;
2105 bfd_boolean reject
, is_call
;
2106 struct function_info
*caller
;
2107 struct call_info
*callee
;
2110 r_type
= ELF32_R_TYPE (irela
->r_info
);
2111 if (r_type
!= R_SPU_REL16
2112 && r_type
!= R_SPU_ADDR16
)
2115 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2119 r_indx
= ELF32_R_SYM (irela
->r_info
);
2120 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2124 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2130 unsigned char insn
[4];
2132 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2133 irela
->r_offset
, 4))
2135 if (is_branch (insn
))
2137 is_call
= (insn
[0] & 0xfd) == 0x31;
2138 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2139 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2142 info
->callbacks
->einfo
2143 (_("%B(%A+0x%v): call to non-code section"
2144 " %B(%A), analysis incomplete\n"),
2145 sec
->owner
, sec
, irela
->r_offset
,
2146 sym_sec
->owner
, sym_sec
);
2154 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2162 /* For --auto-overlay, count possible stubs we need for
2163 function pointer references. */
2164 unsigned int sym_type
;
2168 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2169 if (sym_type
== STT_FUNC
)
2170 spu_hash_table (info
)->non_ovly_stub
+= 1;
2175 val
= h
->root
.u
.def
.value
;
2177 val
= sym
->st_value
;
2178 val
+= irela
->r_addend
;
2182 struct function_info
*fun
;
2184 if (irela
->r_addend
!= 0)
2186 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2189 fake
->st_value
= val
;
2191 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2195 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2197 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2200 if (irela
->r_addend
!= 0
2201 && fun
->u
.sym
!= sym
)
2206 caller
= find_function (sec
, irela
->r_offset
, info
);
2209 callee
= bfd_malloc (sizeof *callee
);
2213 callee
->fun
= find_function (sym_sec
, val
, info
);
2214 if (callee
->fun
== NULL
)
2216 callee
->is_tail
= !is_call
;
2217 callee
->is_pasted
= FALSE
;
2219 if (callee
->fun
->last_caller
!= sec
)
2221 callee
->fun
->last_caller
= sec
;
2222 callee
->fun
->call_count
+= 1;
2224 if (!insert_callee (caller
, callee
))
2227 && !callee
->fun
->is_func
2228 && callee
->fun
->stack
== 0)
2230 /* This is either a tail call or a branch from one part of
2231 the function to another, ie. hot/cold section. If the
2232 destination has been called by some other function then
2233 it is a separate function. We also assume that functions
2234 are not split across input files. */
2235 if (sec
->owner
!= sym_sec
->owner
)
2237 callee
->fun
->start
= NULL
;
2238 callee
->fun
->is_func
= TRUE
;
2240 else if (callee
->fun
->start
== NULL
)
2241 callee
->fun
->start
= caller
;
2244 struct function_info
*callee_start
;
2245 struct function_info
*caller_start
;
2246 callee_start
= callee
->fun
;
2247 while (callee_start
->start
)
2248 callee_start
= callee_start
->start
;
2249 caller_start
= caller
;
2250 while (caller_start
->start
)
2251 caller_start
= caller_start
->start
;
2252 if (caller_start
!= callee_start
)
2254 callee
->fun
->start
= NULL
;
2255 callee
->fun
->is_func
= TRUE
;
2264 /* Handle something like .init or .fini, which has a piece of a function.
2265 These sections are pasted together to form a single function. */
2268 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2270 struct bfd_link_order
*l
;
2271 struct _spu_elf_section_data
*sec_data
;
2272 struct spu_elf_stack_info
*sinfo
;
2273 Elf_Internal_Sym
*fake
;
2274 struct function_info
*fun
, *fun_start
;
2276 fake
= bfd_zmalloc (sizeof (*fake
));
2280 fake
->st_size
= sec
->size
;
2282 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2283 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2287 /* Find a function immediately preceding this section. */
2289 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2291 if (l
->u
.indirect
.section
== sec
)
2293 if (fun_start
!= NULL
)
2295 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2299 fun
->start
= fun_start
;
2301 callee
->is_tail
= TRUE
;
2302 callee
->is_pasted
= TRUE
;
2304 if (!insert_callee (fun_start
, callee
))
2310 if (l
->type
== bfd_indirect_link_order
2311 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2312 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2313 && sinfo
->num_fun
!= 0)
2314 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2317 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2321 /* Map address ranges in code sections to functions. */
2324 discover_functions (struct bfd_link_info
*info
)
2328 Elf_Internal_Sym
***psym_arr
;
2329 asection
***sec_arr
;
2330 bfd_boolean gaps
= FALSE
;
2333 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2336 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2337 if (psym_arr
== NULL
)
2339 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2340 if (sec_arr
== NULL
)
2344 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2346 ibfd
= ibfd
->link_next
, bfd_idx
++)
2348 extern const bfd_target bfd_elf32_spu_vec
;
2349 Elf_Internal_Shdr
*symtab_hdr
;
2352 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2353 asection
**psecs
, **p
;
2355 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2358 /* Read all the symbols. */
2359 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2360 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2364 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2365 if (interesting_section (sec
))
2373 if (symtab_hdr
->contents
!= NULL
)
2375 /* Don't use cached symbols since the generic ELF linker
2376 code only reads local symbols, and we need globals too. */
2377 free (symtab_hdr
->contents
);
2378 symtab_hdr
->contents
= NULL
;
2380 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2382 symtab_hdr
->contents
= (void *) syms
;
2386 /* Select defined function symbols that are going to be output. */
2387 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2390 psym_arr
[bfd_idx
] = psyms
;
2391 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2394 sec_arr
[bfd_idx
] = psecs
;
2395 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2396 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2397 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
2398 || ELF_ST_TYPE (sy
->st_info
) == STT_SECTION
)
2402 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2403 if (s
!= NULL
&& interesting_section (s
))
2406 symcount
= psy
- psyms
;
2409 /* Sort them by section and offset within section. */
2410 sort_syms_syms
= syms
;
2411 sort_syms_psecs
= psecs
;
2412 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2414 /* Now inspect the function symbols. */
2415 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2417 asection
*s
= psecs
[*psy
- syms
];
2418 Elf_Internal_Sym
**psy2
;
2420 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2421 if (psecs
[*psy2
- syms
] != s
)
2424 if (!alloc_stack_info (s
, psy2
- psy
))
2429 /* First install info about properly typed and sized functions.
2430 In an ideal world this will cover all code sections, except
2431 when partitioning functions into hot and cold sections,
2432 and the horrible pasted together .init and .fini functions. */
2433 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2436 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2438 asection
*s
= psecs
[sy
- syms
];
2439 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2444 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2445 if (interesting_section (sec
))
2446 gaps
|= check_function_ranges (sec
, info
);
2451 /* See if we can discover more function symbols by looking at
2453 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2455 ibfd
= ibfd
->link_next
, bfd_idx
++)
2459 if (psym_arr
[bfd_idx
] == NULL
)
2462 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2463 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2467 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2469 ibfd
= ibfd
->link_next
, bfd_idx
++)
2471 Elf_Internal_Shdr
*symtab_hdr
;
2473 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2476 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2479 psecs
= sec_arr
[bfd_idx
];
2481 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2482 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2485 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2486 if (interesting_section (sec
))
2487 gaps
|= check_function_ranges (sec
, info
);
2491 /* Finally, install all globals. */
2492 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2496 s
= psecs
[sy
- syms
];
2498 /* Global syms might be improperly typed functions. */
2499 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2500 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2502 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2508 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2510 extern const bfd_target bfd_elf32_spu_vec
;
2513 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2516 /* Some of the symbols we've installed as marking the
2517 beginning of functions may have a size of zero. Extend
2518 the range of such functions to the beginning of the
2519 next symbol of interest. */
2520 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2521 if (interesting_section (sec
))
2523 struct _spu_elf_section_data
*sec_data
;
2524 struct spu_elf_stack_info
*sinfo
;
2526 sec_data
= spu_elf_section_data (sec
);
2527 sinfo
= sec_data
->u
.i
.stack_info
;
2531 bfd_vma hi
= sec
->size
;
2533 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2535 sinfo
->fun
[fun_idx
].hi
= hi
;
2536 hi
= sinfo
->fun
[fun_idx
].lo
;
2539 /* No symbols in this section. Must be .init or .fini
2540 or something similar. */
2541 else if (!pasted_function (sec
, info
))
2547 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2549 ibfd
= ibfd
->link_next
, bfd_idx
++)
2551 if (psym_arr
[bfd_idx
] == NULL
)
2554 free (psym_arr
[bfd_idx
]);
2555 free (sec_arr
[bfd_idx
]);
2564 /* Iterate over all function_info we have collected, calling DOIT on
2565 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2569 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2570 struct bfd_link_info
*,
2572 struct bfd_link_info
*info
,
2578 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2580 extern const bfd_target bfd_elf32_spu_vec
;
2583 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2586 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2588 struct _spu_elf_section_data
*sec_data
;
2589 struct spu_elf_stack_info
*sinfo
;
2591 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2592 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2595 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2596 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2597 if (!doit (&sinfo
->fun
[i
], info
, param
))
2605 /* Transfer call info attached to struct function_info entries for
2606 all of a given function's sections to the first entry. */
2609 transfer_calls (struct function_info
*fun
,
2610 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2611 void *param ATTRIBUTE_UNUSED
)
2613 struct function_info
*start
= fun
->start
;
2617 struct call_info
*call
, *call_next
;
2619 while (start
->start
!= NULL
)
2620 start
= start
->start
;
2621 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2623 call_next
= call
->next
;
2624 if (!insert_callee (start
, call
))
2627 fun
->call_list
= NULL
;
2632 /* Mark nodes in the call graph that are called by some other node. */
2635 mark_non_root (struct function_info
*fun
,
2636 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2637 void *param ATTRIBUTE_UNUSED
)
2639 struct call_info
*call
;
2644 for (call
= fun
->call_list
; call
; call
= call
->next
)
2646 call
->fun
->non_root
= TRUE
;
2647 mark_non_root (call
->fun
, 0, 0);
2652 /* Remove cycles from the call graph. Set depth of nodes. */
2655 remove_cycles (struct function_info
*fun
,
2656 struct bfd_link_info
*info
,
2659 struct call_info
**callp
, *call
;
2660 unsigned int depth
= *(unsigned int *) param
;
2661 unsigned int max_depth
= depth
;
2665 fun
->marking
= TRUE
;
2667 callp
= &fun
->call_list
;
2668 while ((call
= *callp
) != NULL
)
2670 if (!call
->fun
->visit2
)
2672 call
->max_depth
= depth
+ !call
->is_pasted
;
2673 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2675 if (max_depth
< call
->max_depth
)
2676 max_depth
= call
->max_depth
;
2678 else if (call
->fun
->marking
)
2680 if (!spu_hash_table (info
)->params
->auto_overlay
)
2682 const char *f1
= func_name (fun
);
2683 const char *f2
= func_name (call
->fun
);
2685 info
->callbacks
->info (_("Stack analysis will ignore the call "
2689 *callp
= call
->next
;
2693 callp
= &call
->next
;
2695 fun
->marking
= FALSE
;
2696 *(unsigned int *) param
= max_depth
;
2700 /* Check that we actually visited all nodes in remove_cycles. If we
2701 didn't, then there is some cycle in the call graph not attached to
2702 any root node. Arbitrarily choose a node in the cycle as a new
2703 root and break the cycle. */
2706 mark_detached_root (struct function_info
*fun
,
2707 struct bfd_link_info
*info
,
2712 fun
->non_root
= FALSE
;
2713 *(unsigned int *) param
= 0;
2714 return remove_cycles (fun
, info
, param
);
2717 /* Populate call_list for each function. */
2720 build_call_tree (struct bfd_link_info
*info
)
2725 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2727 extern const bfd_target bfd_elf32_spu_vec
;
2730 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2733 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2734 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2738 /* Transfer call info from hot/cold section part of function
2740 if (!spu_hash_table (info
)->params
->auto_overlay
2741 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2744 /* Find the call graph root(s). */
2745 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2748 /* Remove cycles from the call graph. We start from the root node(s)
2749 so that we break cycles in a reasonable place. */
2751 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
2754 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
2757 /* qsort predicate to sort calls by max_depth then count. */
2760 sort_calls (const void *a
, const void *b
)
2762 struct call_info
*const *c1
= a
;
2763 struct call_info
*const *c2
= b
;
2766 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2770 delta
= (*c2
)->count
- (*c1
)->count
;
2774 return (char *) c1
- (char *) c2
;
2778 unsigned int max_overlay_size
;
2781 /* Set linker_mark and gc_mark on any sections that we will put in
2782 overlays. These flags are used by the generic ELF linker, but we
2783 won't be continuing on to bfd_elf_final_link so it is OK to use
2784 them. linker_mark is clear before we get here. Set segment_mark
2785 on sections that are part of a pasted function (excluding the last
2788 Set up function rodata section if --overlay-rodata. We don't
2789 currently include merged string constant rodata sections since
2791 Sort the call graph so that the deepest nodes will be visited
2795 mark_overlay_section (struct function_info
*fun
,
2796 struct bfd_link_info
*info
,
2799 struct call_info
*call
;
2801 struct _mos_param
*mos_param
= param
;
2807 if (!fun
->sec
->linker_mark
)
2811 fun
->sec
->linker_mark
= 1;
2812 fun
->sec
->gc_mark
= 1;
2813 fun
->sec
->segment_mark
= 0;
2814 /* Ensure SEC_CODE is set on this text section (it ought to
2815 be!), and SEC_CODE is clear on rodata sections. We use
2816 this flag to differentiate the two overlay section types. */
2817 fun
->sec
->flags
|= SEC_CODE
;
2819 if (spu_hash_table (info
)->params
->auto_overlay
& OVERLAY_RODATA
)
2823 /* Find the rodata section corresponding to this function's
2825 if (strcmp (fun
->sec
->name
, ".text") == 0)
2827 name
= bfd_malloc (sizeof (".rodata"));
2830 memcpy (name
, ".rodata", sizeof (".rodata"));
2832 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2834 size_t len
= strlen (fun
->sec
->name
);
2835 name
= bfd_malloc (len
+ 3);
2838 memcpy (name
, ".rodata", sizeof (".rodata"));
2839 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2841 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2843 size_t len
= strlen (fun
->sec
->name
) + 1;
2844 name
= bfd_malloc (len
);
2847 memcpy (name
, fun
->sec
->name
, len
);
2853 asection
*rodata
= NULL
;
2854 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2855 if (group_sec
== NULL
)
2856 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2858 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2860 if (strcmp (group_sec
->name
, name
) == 0)
2865 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2867 fun
->rodata
= rodata
;
2870 fun
->rodata
->linker_mark
= 1;
2871 fun
->rodata
->gc_mark
= 1;
2872 fun
->rodata
->flags
&= ~SEC_CODE
;
2877 size
= fun
->sec
->size
;
2879 size
+= fun
->rodata
->size
;
2880 if (mos_param
->max_overlay_size
< size
)
2881 mos_param
->max_overlay_size
= size
;
2884 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2889 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2893 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2894 calls
[count
++] = call
;
2896 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2898 fun
->call_list
= NULL
;
2902 calls
[count
]->next
= fun
->call_list
;
2903 fun
->call_list
= calls
[count
];
2908 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2910 if (call
->is_pasted
)
2912 /* There can only be one is_pasted call per function_info. */
2913 BFD_ASSERT (!fun
->sec
->segment_mark
);
2914 fun
->sec
->segment_mark
= 1;
2916 if (!mark_overlay_section (call
->fun
, info
, param
))
2920 /* Don't put entry code into an overlay. The overlay manager needs
2922 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2923 == info
->output_bfd
->start_address
)
2925 fun
->sec
->linker_mark
= 0;
2926 if (fun
->rodata
!= NULL
)
2927 fun
->rodata
->linker_mark
= 0;
2932 /* If non-zero then unmark functions called from those within sections
2933 that we need to unmark. Unfortunately this isn't reliable since the
2934 call graph cannot know the destination of function pointer calls. */
2935 #define RECURSE_UNMARK 0
2938 asection
*exclude_input_section
;
2939 asection
*exclude_output_section
;
2940 unsigned long clearing
;
2943 /* Undo some of mark_overlay_section's work. */
2946 unmark_overlay_section (struct function_info
*fun
,
2947 struct bfd_link_info
*info
,
2950 struct call_info
*call
;
2951 struct _uos_param
*uos_param
= param
;
2952 unsigned int excluded
= 0;
2960 if (fun
->sec
== uos_param
->exclude_input_section
2961 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
2965 uos_param
->clearing
+= excluded
;
2967 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
2969 fun
->sec
->linker_mark
= 0;
2971 fun
->rodata
->linker_mark
= 0;
2974 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2975 if (!unmark_overlay_section (call
->fun
, info
, param
))
2979 uos_param
->clearing
-= excluded
;
2984 unsigned int lib_size
;
2985 asection
**lib_sections
;
2988 /* Add sections we have marked as belonging to overlays to an array
2989 for consideration as non-overlay sections. The array consist of
2990 pairs of sections, (text,rodata), for functions in the call graph. */
2993 collect_lib_sections (struct function_info
*fun
,
2994 struct bfd_link_info
*info
,
2997 struct _cl_param
*lib_param
= param
;
2998 struct call_info
*call
;
3005 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3008 size
= fun
->sec
->size
;
3010 size
+= fun
->rodata
->size
;
3011 if (size
<= lib_param
->lib_size
)
3013 *lib_param
->lib_sections
++ = fun
->sec
;
3014 fun
->sec
->gc_mark
= 0;
3015 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3017 *lib_param
->lib_sections
++ = fun
->rodata
;
3018 fun
->rodata
->gc_mark
= 0;
3021 *lib_param
->lib_sections
++ = NULL
;
3024 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3025 collect_lib_sections (call
->fun
, info
, param
);
3030 /* qsort predicate to sort sections by call count. */
3033 sort_lib (const void *a
, const void *b
)
3035 asection
*const *s1
= a
;
3036 asection
*const *s2
= b
;
3037 struct _spu_elf_section_data
*sec_data
;
3038 struct spu_elf_stack_info
*sinfo
;
3042 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3043 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3046 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3047 delta
-= sinfo
->fun
[i
].call_count
;
3050 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3051 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3054 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3055 delta
+= sinfo
->fun
[i
].call_count
;
3064 /* Remove some sections from those marked to be in overlays. Choose
3065 those that are called from many places, likely library functions. */
3068 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3071 asection
**lib_sections
;
3072 unsigned int i
, lib_count
;
3073 struct _cl_param collect_lib_param
;
3074 struct function_info dummy_caller
;
3075 struct spu_link_hash_table
*htab
;
3077 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3079 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3081 extern const bfd_target bfd_elf32_spu_vec
;
3084 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3087 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3088 if (sec
->linker_mark
3089 && sec
->size
< lib_size
3090 && (sec
->flags
& SEC_CODE
) != 0)
3093 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3094 if (lib_sections
== NULL
)
3095 return (unsigned int) -1;
3096 collect_lib_param
.lib_size
= lib_size
;
3097 collect_lib_param
.lib_sections
= lib_sections
;
3098 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3100 return (unsigned int) -1;
3101 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3103 /* Sort sections so that those with the most calls are first. */
3105 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3107 htab
= spu_hash_table (info
);
3108 for (i
= 0; i
< lib_count
; i
++)
3110 unsigned int tmp
, stub_size
;
3112 struct _spu_elf_section_data
*sec_data
;
3113 struct spu_elf_stack_info
*sinfo
;
3115 sec
= lib_sections
[2 * i
];
3116 /* If this section is OK, its size must be less than lib_size. */
3118 /* If it has a rodata section, then add that too. */
3119 if (lib_sections
[2 * i
+ 1])
3120 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3121 /* Add any new overlay call stubs needed by the section. */
3124 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3125 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3128 struct call_info
*call
;
3130 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3131 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3132 if (call
->fun
->sec
->linker_mark
)
3134 struct call_info
*p
;
3135 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3136 if (p
->fun
== call
->fun
)
3139 stub_size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
3142 if (tmp
+ stub_size
< lib_size
)
3144 struct call_info
**pp
, *p
;
3146 /* This section fits. Mark it as non-overlay. */
3147 lib_sections
[2 * i
]->linker_mark
= 0;
3148 if (lib_sections
[2 * i
+ 1])
3149 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3150 lib_size
-= tmp
+ stub_size
;
3151 /* Call stubs to the section we just added are no longer
3153 pp
= &dummy_caller
.call_list
;
3154 while ((p
= *pp
) != NULL
)
3155 if (!p
->fun
->sec
->linker_mark
)
3157 lib_size
+= ovl_stub_size (htab
->params
->ovly_flavour
);
3163 /* Add new call stubs to dummy_caller. */
3164 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3165 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3168 struct call_info
*call
;
3170 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3171 for (call
= sinfo
->fun
[k
].call_list
;
3174 if (call
->fun
->sec
->linker_mark
)
3176 struct call_info
*callee
;
3177 callee
= bfd_malloc (sizeof (*callee
));
3179 return (unsigned int) -1;
3181 if (!insert_callee (&dummy_caller
, callee
))
3187 while (dummy_caller
.call_list
!= NULL
)
3189 struct call_info
*call
= dummy_caller
.call_list
;
3190 dummy_caller
.call_list
= call
->next
;
3193 for (i
= 0; i
< 2 * lib_count
; i
++)
3194 if (lib_sections
[i
])
3195 lib_sections
[i
]->gc_mark
= 1;
3196 free (lib_sections
);
3200 /* Build an array of overlay sections. The deepest node's section is
3201 added first, then its parent node's section, then everything called
3202 from the parent section. The idea being to group sections to
3203 minimise calls between different overlays. */
3206 collect_overlays (struct function_info
*fun
,
3207 struct bfd_link_info
*info
,
3210 struct call_info
*call
;
3211 bfd_boolean added_fun
;
3212 asection
***ovly_sections
= param
;
3218 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3219 if (!call
->is_pasted
)
3221 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3227 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3229 fun
->sec
->gc_mark
= 0;
3230 *(*ovly_sections
)++ = fun
->sec
;
3231 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3233 fun
->rodata
->gc_mark
= 0;
3234 *(*ovly_sections
)++ = fun
->rodata
;
3237 *(*ovly_sections
)++ = NULL
;
3240 /* Pasted sections must stay with the first section. We don't
3241 put pasted sections in the array, just the first section.
3242 Mark subsequent sections as already considered. */
3243 if (fun
->sec
->segment_mark
)
3245 struct function_info
*call_fun
= fun
;
3248 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3249 if (call
->is_pasted
)
3251 call_fun
= call
->fun
;
3252 call_fun
->sec
->gc_mark
= 0;
3253 if (call_fun
->rodata
)
3254 call_fun
->rodata
->gc_mark
= 0;
3260 while (call_fun
->sec
->segment_mark
);
3264 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3265 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3270 struct _spu_elf_section_data
*sec_data
;
3271 struct spu_elf_stack_info
*sinfo
;
3273 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3274 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3277 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3278 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3286 struct _sum_stack_param
{
3288 size_t overall_stack
;
3289 bfd_boolean emit_stack_syms
;
3292 /* Descend the call graph for FUN, accumulating total stack required. */
3295 sum_stack (struct function_info
*fun
,
3296 struct bfd_link_info
*info
,
3299 struct call_info
*call
;
3300 struct function_info
*max
;
3301 size_t stack
, cum_stack
;
3303 bfd_boolean has_call
;
3304 struct _sum_stack_param
*sum_stack_param
= param
;
3305 struct spu_link_hash_table
*htab
;
3307 cum_stack
= fun
->stack
;
3308 sum_stack_param
->cum_stack
= cum_stack
;
3314 for (call
= fun
->call_list
; call
; call
= call
->next
)
3316 if (!call
->is_pasted
)
3318 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3320 stack
= sum_stack_param
->cum_stack
;
3321 /* Include caller stack for normal calls, don't do so for
3322 tail calls. fun->stack here is local stack usage for
3324 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3325 stack
+= fun
->stack
;
3326 if (cum_stack
< stack
)
3333 sum_stack_param
->cum_stack
= cum_stack
;
3335 /* Now fun->stack holds cumulative stack. */
3336 fun
->stack
= cum_stack
;
3340 && sum_stack_param
->overall_stack
< cum_stack
)
3341 sum_stack_param
->overall_stack
= cum_stack
;
3343 htab
= spu_hash_table (info
);
3344 if (htab
->params
->auto_overlay
)
3347 f1
= func_name (fun
);
3349 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3350 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3351 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3355 info
->callbacks
->minfo (_(" calls:\n"));
3356 for (call
= fun
->call_list
; call
; call
= call
->next
)
3357 if (!call
->is_pasted
)
3359 const char *f2
= func_name (call
->fun
);
3360 const char *ann1
= call
->fun
== max
? "*" : " ";
3361 const char *ann2
= call
->is_tail
? "t" : " ";
3363 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3367 if (sum_stack_param
->emit_stack_syms
)
3369 char *name
= bfd_malloc (18 + strlen (f1
));
3370 struct elf_link_hash_entry
*h
;
3375 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3376 sprintf (name
, "__stack_%s", f1
);
3378 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3380 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3383 && (h
->root
.type
== bfd_link_hash_new
3384 || h
->root
.type
== bfd_link_hash_undefined
3385 || h
->root
.type
== bfd_link_hash_undefweak
))
3387 h
->root
.type
= bfd_link_hash_defined
;
3388 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3389 h
->root
.u
.def
.value
= cum_stack
;
3394 h
->ref_regular_nonweak
= 1;
3395 h
->forced_local
= 1;
3403 /* SEC is part of a pasted function. Return the call_info for the
3404 next section of this function. */
3406 static struct call_info
*
3407 find_pasted_call (asection
*sec
)
3409 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3410 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3411 struct call_info
*call
;
3414 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3415 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3416 if (call
->is_pasted
)
3422 /* qsort predicate to sort bfds by file name. */
3425 sort_bfds (const void *a
, const void *b
)
3427 bfd
*const *abfd1
= a
;
3428 bfd
*const *abfd2
= b
;
3430 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3433 /* Handle --auto-overlay. */
3435 static void spu_elf_auto_overlay (struct bfd_link_info
*)
3439 spu_elf_auto_overlay (struct bfd_link_info
*info
)
3443 struct elf_segment_map
*m
;
3444 unsigned int fixed_size
, lo
, hi
;
3445 struct spu_link_hash_table
*htab
;
3446 unsigned int base
, i
, count
, bfd_count
;
3448 asection
**ovly_sections
, **ovly_p
;
3450 unsigned int total_overlay_size
, overlay_size
;
3451 struct elf_link_hash_entry
*h
;
3452 struct _mos_param mos_param
;
3453 struct _uos_param uos_param
;
3454 struct function_info dummy_caller
;
3456 /* Find the extents of our loadable image. */
3457 lo
= (unsigned int) -1;
3459 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3460 if (m
->p_type
== PT_LOAD
)
3461 for (i
= 0; i
< m
->count
; i
++)
3462 if (m
->sections
[i
]->size
!= 0)
3464 if (m
->sections
[i
]->vma
< lo
)
3465 lo
= m
->sections
[i
]->vma
;
3466 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3467 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3469 fixed_size
= hi
+ 1 - lo
;
3471 if (!discover_functions (info
))
3474 if (!build_call_tree (info
))
3477 uos_param
.exclude_input_section
= 0;
3478 uos_param
.exclude_output_section
3479 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3481 htab
= spu_hash_table (info
);
3482 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3483 FALSE
, FALSE
, FALSE
);
3485 && (h
->root
.type
== bfd_link_hash_defined
3486 || h
->root
.type
== bfd_link_hash_defweak
)
3489 /* We have a user supplied overlay manager. */
3490 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3494 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3495 builtin version to .text, and will adjust .text size. */
3496 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
3499 /* Mark overlay sections, and find max overlay section size. */
3500 mos_param
.max_overlay_size
= 0;
3501 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3504 /* We can't put the overlay manager or interrupt routines in
3506 uos_param
.clearing
= 0;
3507 if ((uos_param
.exclude_input_section
3508 || uos_param
.exclude_output_section
)
3509 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3513 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3515 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3516 if (bfd_arr
== NULL
)
3519 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3522 total_overlay_size
= 0;
3523 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3525 extern const bfd_target bfd_elf32_spu_vec
;
3527 unsigned int old_count
;
3529 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3533 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3534 if (sec
->linker_mark
)
3536 if ((sec
->flags
& SEC_CODE
) != 0)
3538 fixed_size
-= sec
->size
;
3539 total_overlay_size
+= sec
->size
;
3541 if (count
!= old_count
)
3542 bfd_arr
[bfd_count
++] = ibfd
;
3545 /* Since the overlay link script selects sections by file name and
3546 section name, ensure that file names are unique. */
3549 bfd_boolean ok
= TRUE
;
3551 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3552 for (i
= 1; i
< bfd_count
; ++i
)
3553 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3555 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3557 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3558 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3559 bfd_arr
[i
]->filename
,
3560 bfd_arr
[i
]->my_archive
->filename
);
3562 info
->callbacks
->einfo (_("%s duplicated\n"),
3563 bfd_arr
[i
]->filename
);
3569 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3570 "object files in auto-overlay script\n"));
3571 bfd_set_error (bfd_error_bad_value
);
3577 if (htab
->reserved
== 0)
3579 struct _sum_stack_param sum_stack_param
;
3581 sum_stack_param
.emit_stack_syms
= 0;
3582 sum_stack_param
.overall_stack
= 0;
3583 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3585 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
3587 fixed_size
+= htab
->reserved
;
3588 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
->ovly_flavour
);
3589 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3591 /* Guess number of overlays. Assuming overlay buffer is on
3592 average only half full should be conservative. */
3593 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3594 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3595 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3598 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3599 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3600 "size of 0x%v exceeds local store\n"),
3601 (bfd_vma
) fixed_size
,
3602 (bfd_vma
) mos_param
.max_overlay_size
);
3604 /* Now see if we should put some functions in the non-overlay area. */
3605 else if (fixed_size
< htab
->overlay_fixed
)
3607 unsigned int max_fixed
, lib_size
;
3609 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
3610 if (max_fixed
> htab
->overlay_fixed
)
3611 max_fixed
= htab
->overlay_fixed
;
3612 lib_size
= max_fixed
- fixed_size
;
3613 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3614 if (lib_size
== (unsigned int) -1)
3616 fixed_size
= max_fixed
- lib_size
;
3619 /* Build an array of sections, suitably sorted to place into
3621 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3622 if (ovly_sections
== NULL
)
3624 ovly_p
= ovly_sections
;
3625 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3627 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3629 script
= (*htab
->params
->spu_elf_open_overlay_script
) ();
3631 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3634 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3635 overlay_size
= htab
->local_store
- fixed_size
;
3638 while (base
< count
)
3640 unsigned int size
= 0;
3643 for (i
= base
; i
< count
; i
++)
3647 unsigned int num_stubs
;
3648 struct call_info
*call
, *pasty
;
3649 struct _spu_elf_section_data
*sec_data
;
3650 struct spu_elf_stack_info
*sinfo
;
3653 /* See whether we can add this section to the current
3654 overlay without overflowing our overlay buffer. */
3655 sec
= ovly_sections
[2 * i
];
3656 tmp
= size
+ sec
->size
;
3657 if (ovly_sections
[2 * i
+ 1])
3658 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3659 if (tmp
> overlay_size
)
3661 if (sec
->segment_mark
)
3663 /* Pasted sections must stay together, so add their
3665 struct call_info
*pasty
= find_pasted_call (sec
);
3666 while (pasty
!= NULL
)
3668 struct function_info
*call_fun
= pasty
->fun
;
3669 tmp
+= call_fun
->sec
->size
;
3670 if (call_fun
->rodata
)
3671 tmp
+= call_fun
->rodata
->size
;
3672 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3673 if (pasty
->is_pasted
)
3677 if (tmp
> overlay_size
)
3680 /* If we add this section, we might need new overlay call
3681 stubs. Add any overlay section calls to dummy_call. */
3683 sec_data
= spu_elf_section_data (sec
);
3684 sinfo
= sec_data
->u
.i
.stack_info
;
3685 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3686 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3687 if (call
->is_pasted
)
3689 BFD_ASSERT (pasty
== NULL
);
3692 else if (call
->fun
->sec
->linker_mark
)
3694 if (!copy_callee (&dummy_caller
, call
))
3697 while (pasty
!= NULL
)
3699 struct function_info
*call_fun
= pasty
->fun
;
3701 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3702 if (call
->is_pasted
)
3704 BFD_ASSERT (pasty
== NULL
);
3707 else if (!copy_callee (&dummy_caller
, call
))
3711 /* Calculate call stub size. */
3713 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3718 /* If the call is within this overlay, we won't need a
3720 for (k
= base
; k
< i
+ 1; k
++)
3721 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3727 if (tmp
+ num_stubs
* ovl_stub_size (htab
->params
->ovly_flavour
)
3736 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3737 ovly_sections
[2 * i
]->owner
,
3738 ovly_sections
[2 * i
],
3739 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3740 bfd_set_error (bfd_error_bad_value
);
3744 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3746 for (j
= base
; j
< i
; j
++)
3748 asection
*sec
= ovly_sections
[2 * j
];
3750 if (fprintf (script
, " %s%c%s (%s)\n",
3751 (sec
->owner
->my_archive
!= NULL
3752 ? sec
->owner
->my_archive
->filename
: ""),
3753 info
->path_separator
,
3754 sec
->owner
->filename
,
3757 if (sec
->segment_mark
)
3759 struct call_info
*call
= find_pasted_call (sec
);
3760 while (call
!= NULL
)
3762 struct function_info
*call_fun
= call
->fun
;
3763 sec
= call_fun
->sec
;
3764 if (fprintf (script
, " %s%c%s (%s)\n",
3765 (sec
->owner
->my_archive
!= NULL
3766 ? sec
->owner
->my_archive
->filename
: ""),
3767 info
->path_separator
,
3768 sec
->owner
->filename
,
3771 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3772 if (call
->is_pasted
)
3778 for (j
= base
; j
< i
; j
++)
3780 asection
*sec
= ovly_sections
[2 * j
+ 1];
3782 && fprintf (script
, " %s%c%s (%s)\n",
3783 (sec
->owner
->my_archive
!= NULL
3784 ? sec
->owner
->my_archive
->filename
: ""),
3785 info
->path_separator
,
3786 sec
->owner
->filename
,
3790 sec
= ovly_sections
[2 * j
];
3791 if (sec
->segment_mark
)
3793 struct call_info
*call
= find_pasted_call (sec
);
3794 while (call
!= NULL
)
3796 struct function_info
*call_fun
= call
->fun
;
3797 sec
= call_fun
->rodata
;
3799 && fprintf (script
, " %s%c%s (%s)\n",
3800 (sec
->owner
->my_archive
!= NULL
3801 ? sec
->owner
->my_archive
->filename
: ""),
3802 info
->path_separator
,
3803 sec
->owner
->filename
,
3806 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3807 if (call
->is_pasted
)
3813 if (fprintf (script
, " }\n") <= 0)
3816 while (dummy_caller
.call_list
!= NULL
)
3818 struct call_info
*call
= dummy_caller
.call_list
;
3819 dummy_caller
.call_list
= call
->next
;
3825 free (ovly_sections
);
3827 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3829 if (fclose (script
) != 0)
3832 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
3833 (*htab
->params
->spu_elf_relink
) ();
3838 bfd_set_error (bfd_error_system_call
);
3840 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3844 /* Provide an estimate of total stack required. */
3847 spu_elf_stack_analysis (struct bfd_link_info
*info
)
3849 struct spu_link_hash_table
*htab
;
3850 struct _sum_stack_param sum_stack_param
;
3852 if (!discover_functions (info
))
3855 if (!build_call_tree (info
))
3858 htab
= spu_hash_table (info
);
3859 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3860 info
->callbacks
->minfo (_("\nStack size for functions. "
3861 "Annotations: '*' max stack, 't' tail call\n"));
3863 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
3864 sum_stack_param
.overall_stack
= 0;
3865 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3868 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3869 (bfd_vma
) sum_stack_param
.overall_stack
);
3873 /* Perform a final link. */
3876 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3878 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3880 if (htab
->params
->auto_overlay
)
3881 spu_elf_auto_overlay (info
);
3883 if (htab
->params
->stack_analysis
3884 && !spu_elf_stack_analysis (info
))
3885 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3887 return bfd_elf_final_link (output_bfd
, info
);
3890 /* Called when not normally emitting relocs, ie. !info->relocatable
3891 and !info->emitrelocations. Returns a count of special relocs
3892 that need to be emitted. */
3895 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
3897 Elf_Internal_Rela
*relocs
;
3898 unsigned int count
= 0;
3900 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
3904 Elf_Internal_Rela
*rel
;
3905 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3907 for (rel
= relocs
; rel
< relend
; rel
++)
3909 int r_type
= ELF32_R_TYPE (rel
->r_info
);
3910 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3914 if (elf_section_data (sec
)->relocs
!= relocs
)
3921 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3924 spu_elf_relocate_section (bfd
*output_bfd
,
3925 struct bfd_link_info
*info
,
3927 asection
*input_section
,
3929 Elf_Internal_Rela
*relocs
,
3930 Elf_Internal_Sym
*local_syms
,
3931 asection
**local_sections
)
3933 Elf_Internal_Shdr
*symtab_hdr
;
3934 struct elf_link_hash_entry
**sym_hashes
;
3935 Elf_Internal_Rela
*rel
, *relend
;
3936 struct spu_link_hash_table
*htab
;
3939 bfd_boolean emit_these_relocs
= FALSE
;
3940 bfd_boolean is_ea_sym
;
3943 htab
= spu_hash_table (info
);
3944 stubs
= (htab
->stub_sec
!= NULL
3945 && maybe_needs_stubs (input_section
));
3946 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3947 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3948 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3951 relend
= relocs
+ input_section
->reloc_count
;
3952 for (; rel
< relend
; rel
++)
3955 reloc_howto_type
*howto
;
3956 unsigned int r_symndx
;
3957 Elf_Internal_Sym
*sym
;
3959 struct elf_link_hash_entry
*h
;
3960 const char *sym_name
;
3963 bfd_reloc_status_type r
;
3964 bfd_boolean unresolved_reloc
;
3966 enum _stub_type stub_type
;
3968 r_symndx
= ELF32_R_SYM (rel
->r_info
);
3969 r_type
= ELF32_R_TYPE (rel
->r_info
);
3970 howto
= elf_howto_table
+ r_type
;
3971 unresolved_reloc
= FALSE
;
3976 if (r_symndx
< symtab_hdr
->sh_info
)
3978 sym
= local_syms
+ r_symndx
;
3979 sec
= local_sections
[r_symndx
];
3980 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
3981 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
3985 if (sym_hashes
== NULL
)
3988 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
3990 while (h
->root
.type
== bfd_link_hash_indirect
3991 || h
->root
.type
== bfd_link_hash_warning
)
3992 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
3995 if (h
->root
.type
== bfd_link_hash_defined
3996 || h
->root
.type
== bfd_link_hash_defweak
)
3998 sec
= h
->root
.u
.def
.section
;
4000 || sec
->output_section
== NULL
)
4001 /* Set a flag that will be cleared later if we find a
4002 relocation value for this symbol. output_section
4003 is typically NULL for symbols satisfied by a shared
4005 unresolved_reloc
= TRUE
;
4007 relocation
= (h
->root
.u
.def
.value
4008 + sec
->output_section
->vma
4009 + sec
->output_offset
);
4011 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4013 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4014 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4016 else if (!info
->relocatable
4017 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4020 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4021 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4022 if (!info
->callbacks
->undefined_symbol (info
,
4023 h
->root
.root
.string
,
4026 rel
->r_offset
, err
))
4030 sym_name
= h
->root
.root
.string
;
4033 if (sec
!= NULL
&& elf_discarded_section (sec
))
4035 /* For relocs against symbols from removed linkonce sections,
4036 or sections discarded by a linker script, we just want the
4037 section contents zeroed. Avoid any special processing. */
4038 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4044 if (info
->relocatable
)
4047 is_ea_sym
= (ea
!= NULL
4049 && sec
->output_section
== ea
);
4051 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4055 /* ._ea is a special section that isn't allocated in SPU
4056 memory, but rather occupies space in PPU memory as
4057 part of an embedded ELF image. If this reloc is
4058 against a symbol defined in ._ea, then transform the
4059 reloc into an equivalent one without a symbol
4060 relative to the start of the ELF image. */
4061 rel
->r_addend
+= (relocation
4063 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4064 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4066 emit_these_relocs
= TRUE
;
4071 unresolved_reloc
= TRUE
;
4073 if (unresolved_reloc
)
4075 (*_bfd_error_handler
)
4076 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4078 bfd_get_section_name (input_bfd
, input_section
),
4079 (long) rel
->r_offset
,
4085 /* If this symbol is in an overlay area, we may need to relocate
4086 to the overlay stub. */
4087 addend
= rel
->r_addend
;
4089 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4090 contents
, info
)) != no_stub
)
4092 unsigned int ovl
= 0;
4093 struct got_entry
*g
, **head
;
4095 if (stub_type
!= nonovl_stub
)
4096 ovl
= (spu_elf_section_data (input_section
->output_section
)
4100 head
= &h
->got
.glist
;
4102 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4104 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4105 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4110 relocation
= g
->stub_addr
;
4114 r
= _bfd_final_link_relocate (howto
,
4118 rel
->r_offset
, relocation
, addend
);
4120 if (r
!= bfd_reloc_ok
)
4122 const char *msg
= (const char *) 0;
4126 case bfd_reloc_overflow
:
4127 if (!((*info
->callbacks
->reloc_overflow
)
4128 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4129 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4133 case bfd_reloc_undefined
:
4134 if (!((*info
->callbacks
->undefined_symbol
)
4135 (info
, sym_name
, input_bfd
, input_section
,
4136 rel
->r_offset
, TRUE
)))
4140 case bfd_reloc_outofrange
:
4141 msg
= _("internal error: out of range error");
4144 case bfd_reloc_notsupported
:
4145 msg
= _("internal error: unsupported relocation error");
4148 case bfd_reloc_dangerous
:
4149 msg
= _("internal error: dangerous error");
4153 msg
= _("internal error: unknown error");
4158 if (!((*info
->callbacks
->warning
)
4159 (info
, msg
, sym_name
, input_bfd
, input_section
,
4168 && emit_these_relocs
4169 && !info
->emitrelocations
)
4171 Elf_Internal_Rela
*wrel
;
4172 Elf_Internal_Shdr
*rel_hdr
;
4174 wrel
= rel
= relocs
;
4175 relend
= relocs
+ input_section
->reloc_count
;
4176 for (; rel
< relend
; rel
++)
4180 r_type
= ELF32_R_TYPE (rel
->r_info
);
4181 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4184 input_section
->reloc_count
= wrel
- relocs
;
4185 /* Backflips for _bfd_elf_link_output_relocs. */
4186 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4187 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4194 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4197 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4198 const char *sym_name ATTRIBUTE_UNUSED
,
4199 Elf_Internal_Sym
*sym
,
4200 asection
*sym_sec ATTRIBUTE_UNUSED
,
4201 struct elf_link_hash_entry
*h
)
4203 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4205 if (!info
->relocatable
4206 && htab
->stub_sec
!= NULL
4208 && (h
->root
.type
== bfd_link_hash_defined
4209 || h
->root
.type
== bfd_link_hash_defweak
)
4211 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4213 struct got_entry
*g
;
4215 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4216 if (g
->addend
== 0 && g
->ovl
== 0)
4218 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4219 (htab
->stub_sec
[0]->output_section
->owner
,
4220 htab
->stub_sec
[0]->output_section
));
4221 sym
->st_value
= g
->stub_addr
;
4229 static int spu_plugin
= 0;
4232 spu_elf_plugin (int val
)
4237 /* Set ELF header e_type for plugins. */
4240 spu_elf_post_process_headers (bfd
*abfd
,
4241 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4245 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4247 i_ehdrp
->e_type
= ET_DYN
;
4251 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4252 segments for overlays. */
4255 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4262 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4263 extra
= htab
->num_overlays
;
4269 sec
= bfd_get_section_by_name (abfd
, ".toe");
4270 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4276 /* Remove .toe section from other PT_LOAD segments and put it in
4277 a segment of its own. Put overlays in separate segments too. */
4280 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4283 struct elf_segment_map
*m
;
4289 toe
= bfd_get_section_by_name (abfd
, ".toe");
4290 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4291 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4292 for (i
= 0; i
< m
->count
; i
++)
4293 if ((s
= m
->sections
[i
]) == toe
4294 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4296 struct elf_segment_map
*m2
;
4299 if (i
+ 1 < m
->count
)
4301 amt
= sizeof (struct elf_segment_map
);
4302 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4303 m2
= bfd_zalloc (abfd
, amt
);
4306 m2
->count
= m
->count
- (i
+ 1);
4307 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4308 m2
->count
* sizeof (m
->sections
[0]));
4309 m2
->p_type
= PT_LOAD
;
4317 amt
= sizeof (struct elf_segment_map
);
4318 m2
= bfd_zalloc (abfd
, amt
);
4321 m2
->p_type
= PT_LOAD
;
4323 m2
->sections
[0] = s
;
4333 /* Tweak the section type of .note.spu_name. */
4336 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4337 Elf_Internal_Shdr
*hdr
,
4340 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4341 hdr
->sh_type
= SHT_NOTE
;
4345 /* Tweak phdrs before writing them out. */
4348 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4350 const struct elf_backend_data
*bed
;
4351 struct elf_obj_tdata
*tdata
;
4352 Elf_Internal_Phdr
*phdr
, *last
;
4353 struct spu_link_hash_table
*htab
;
4360 bed
= get_elf_backend_data (abfd
);
4361 tdata
= elf_tdata (abfd
);
4363 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4364 htab
= spu_hash_table (info
);
4365 if (htab
->num_overlays
!= 0)
4367 struct elf_segment_map
*m
;
4370 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4372 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4374 /* Mark this as an overlay header. */
4375 phdr
[i
].p_flags
|= PF_OVERLAY
;
4377 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4379 bfd_byte
*p
= htab
->ovtab
->contents
;
4380 unsigned int off
= o
* 16 + 8;
4382 /* Write file_off into _ovly_table. */
4383 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4388 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4389 of 16. This should always be possible when using the standard
4390 linker scripts, but don't create overlapping segments if
4391 someone is playing games with linker scripts. */
4393 for (i
= count
; i
-- != 0; )
4394 if (phdr
[i
].p_type
== PT_LOAD
)
4398 adjust
= -phdr
[i
].p_filesz
& 15;
4401 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4404 adjust
= -phdr
[i
].p_memsz
& 15;
4407 && phdr
[i
].p_filesz
!= 0
4408 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4409 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4412 if (phdr
[i
].p_filesz
!= 0)
4416 if (i
== (unsigned int) -1)
4417 for (i
= count
; i
-- != 0; )
4418 if (phdr
[i
].p_type
== PT_LOAD
)
4422 adjust
= -phdr
[i
].p_filesz
& 15;
4423 phdr
[i
].p_filesz
+= adjust
;
4425 adjust
= -phdr
[i
].p_memsz
& 15;
4426 phdr
[i
].p_memsz
+= adjust
;
4432 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4433 #define TARGET_BIG_NAME "elf32-spu"
4434 #define ELF_ARCH bfd_arch_spu
4435 #define ELF_MACHINE_CODE EM_SPU
4436 /* This matches the alignment need for DMA. */
4437 #define ELF_MAXPAGESIZE 0x80
4438 #define elf_backend_rela_normal 1
4439 #define elf_backend_can_gc_sections 1
4441 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4442 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4443 #define elf_info_to_howto spu_elf_info_to_howto
4444 #define elf_backend_count_relocs spu_elf_count_relocs
4445 #define elf_backend_relocate_section spu_elf_relocate_section
4446 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4447 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4448 #define elf_backend_object_p spu_elf_object_p
4449 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4450 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4452 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4453 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4454 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4455 #define elf_backend_post_process_headers spu_elf_post_process_headers
4456 #define elf_backend_fake_sections spu_elf_fake_sections
4457 #define elf_backend_special_sections spu_elf_special_sections
4458 #define bfd_elf32_bfd_final_link spu_elf_final_link
4460 #include "elf32-target.h"