1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 /* Shortcuts to overlay sections. */
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count
;
308 /* The stub section for each overlay section. */
311 struct elf_link_hash_entry
*ovly_load
;
312 struct elf_link_hash_entry
*ovly_return
;
313 unsigned long ovly_load_r_symndx
;
315 /* Number of overlay buffers. */
316 unsigned int num_buf
;
318 /* Total number of overlays. */
319 unsigned int num_overlays
;
321 /* How much memory we have. */
322 unsigned int local_store
;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed
;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved
;
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space
;
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub
;
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr
) (void);
338 FILE *(*spu_elf_open_overlay_script
) (void);
339 void (*spu_elf_relink
) (void);
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay
: 3;
345 #define AUTO_OVERLAY 1
346 #define AUTO_RELINK 2
347 #define OVERLAY_RODATA 4
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms
:1;
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs
: 1;
357 unsigned int stub_err
: 1;
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis
: 1;
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms
: 1;
366 /* Hijack the generic got fields for overlay stub accounting. */
370 struct got_entry
*next
;
376 #define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
379 /* Create a spu ELF linker hash table. */
381 static struct bfd_link_hash_table
*
382 spu_elf_link_hash_table_create (bfd
*abfd
)
384 struct spu_link_hash_table
*htab
;
386 htab
= bfd_malloc (sizeof (*htab
));
390 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
391 _bfd_elf_link_hash_newfunc
,
392 sizeof (struct elf_link_hash_entry
)))
398 memset (&htab
->ovtab
, 0,
399 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
401 htab
->elf
.init_got_refcount
.refcount
= 0;
402 htab
->elf
.init_got_refcount
.glist
= NULL
;
403 htab
->elf
.init_got_offset
.offset
= 0;
404 htab
->elf
.init_got_offset
.glist
= NULL
;
405 return &htab
->elf
.root
;
408 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
413 get_sym_h (struct elf_link_hash_entry
**hp
,
414 Elf_Internal_Sym
**symp
,
416 Elf_Internal_Sym
**locsymsp
,
417 unsigned long r_symndx
,
420 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
422 if (r_symndx
>= symtab_hdr
->sh_info
)
424 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
425 struct elf_link_hash_entry
*h
;
427 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
428 while (h
->root
.type
== bfd_link_hash_indirect
429 || h
->root
.type
== bfd_link_hash_warning
)
430 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
440 asection
*symsec
= NULL
;
441 if (h
->root
.type
== bfd_link_hash_defined
442 || h
->root
.type
== bfd_link_hash_defweak
)
443 symsec
= h
->root
.u
.def
.section
;
449 Elf_Internal_Sym
*sym
;
450 Elf_Internal_Sym
*locsyms
= *locsymsp
;
454 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
456 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
458 0, NULL
, NULL
, NULL
);
463 sym
= locsyms
+ r_symndx
;
472 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
478 /* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
482 spu_elf_create_sections (struct bfd_link_info
*info
,
487 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
489 /* Stash some options away where we can get at them later. */
490 htab
->stack_analysis
= stack_analysis
;
491 htab
->emit_stack_syms
= emit_stack_syms
;
493 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
494 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
499 /* Make SPU_PTNOTE_SPUNAME section. */
506 ibfd
= info
->input_bfds
;
507 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
508 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
510 || !bfd_set_section_alignment (ibfd
, s
, 4))
513 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
514 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
515 size
+= (name_len
+ 3) & -4;
517 if (!bfd_set_section_size (ibfd
, s
, size
))
520 data
= bfd_zalloc (ibfd
, size
);
524 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
525 bfd_put_32 (ibfd
, name_len
, data
+ 4);
526 bfd_put_32 (ibfd
, 1, data
+ 8);
527 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
528 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
529 bfd_get_filename (info
->output_bfd
), name_len
);
536 /* qsort predicate to sort sections by vma. */
539 sort_sections (const void *a
, const void *b
)
541 const asection
*const *s1
= a
;
542 const asection
*const *s2
= b
;
543 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
546 return delta
< 0 ? -1 : 1;
548 return (*s1
)->index
- (*s2
)->index
;
551 /* Identify overlays in the output bfd, and number them. */
554 spu_elf_find_overlays (struct bfd_link_info
*info
)
556 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
557 asection
**alloc_sec
;
558 unsigned int i
, n
, ovl_index
, num_buf
;
562 if (info
->output_bfd
->section_count
< 2)
566 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
567 if (alloc_sec
== NULL
)
570 /* Pick out all the alloced sections. */
571 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
572 if ((s
->flags
& SEC_ALLOC
) != 0
573 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
583 /* Sort them by vma. */
584 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
586 /* Look for overlapping vmas. Any with overlap must be overlays.
587 Count them. Also count the number of overlay regions. */
588 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
589 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
592 if (s
->vma
< ovl_end
)
594 asection
*s0
= alloc_sec
[i
- 1];
596 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
598 alloc_sec
[ovl_index
] = s0
;
599 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
600 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
602 alloc_sec
[ovl_index
] = s
;
603 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
604 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
605 if (s0
->vma
!= s
->vma
)
607 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
612 if (ovl_end
< s
->vma
+ s
->size
)
613 ovl_end
= s
->vma
+ s
->size
;
616 ovl_end
= s
->vma
+ s
->size
;
619 htab
->num_overlays
= ovl_index
;
620 htab
->num_buf
= num_buf
;
621 htab
->ovl_sec
= alloc_sec
;
622 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
623 FALSE
, FALSE
, FALSE
);
624 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
625 FALSE
, FALSE
, FALSE
);
626 return ovl_index
!= 0;
629 /* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631 #ifndef OVL_STUB_SIZE
632 /* Default to faster. */
633 #define OVL_STUB_SIZE 16
634 /* #define OVL_STUB_SIZE 8 */
636 #define BRSL 0x33000000
637 #define BR 0x32000000
638 #define NOP 0x40200000
639 #define LNOP 0x00200000
640 #define ILA 0x42000000
642 /* Return true for all relative and absolute branch instructions.
650 brhnz 00100011 0.. */
653 is_branch (const unsigned char *insn
)
655 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
658 /* Return true for all indirect branch instructions.
666 bihnz 00100101 011 */
669 is_indirect_branch (const unsigned char *insn
)
671 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
674 /* Return true for branch hint instructions.
679 is_hint (const unsigned char *insn
)
681 return (insn
[0] & 0xfc) == 0x10;
684 /* True if INPUT_SECTION might need overlay stubs. */
687 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section
->flags
& SEC_ALLOC
) == 0)
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section
->output_section
== NULL
695 || input_section
->output_section
->owner
!= output_bfd
)
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section
->name
, ".eh_frame") == 0)
713 /* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
716 static enum _stub_type
717 needs_ovl_stub (struct elf_link_hash_entry
*h
,
718 Elf_Internal_Sym
*sym
,
720 asection
*input_section
,
721 Elf_Internal_Rela
*irela
,
723 struct bfd_link_info
*info
)
725 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
726 enum elf_spu_reloc_type r_type
;
727 unsigned int sym_type
;
729 enum _stub_type ret
= no_stub
;
732 || sym_sec
->output_section
== NULL
733 || sym_sec
->output_section
->owner
!= info
->output_bfd
734 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
747 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
751 /* Usually, symbols in non-overlay sections don't need stubs. */
752 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
753 && !htab
->non_overlay_stubs
)
759 sym_type
= ELF_ST_TYPE (sym
->st_info
);
761 r_type
= ELF32_R_TYPE (irela
->r_info
);
763 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
767 if (contents
== NULL
)
770 if (!bfd_get_section_contents (input_section
->owner
,
777 contents
+= irela
->r_offset
;
779 if (is_branch (contents
) || is_hint (contents
))
782 if ((contents
[0] & 0xfd) == 0x31
783 && sym_type
!= STT_FUNC
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name
;
795 sym_name
= h
->root
.root
.string
;
798 Elf_Internal_Shdr
*symtab_hdr
;
799 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
800 sym_name
= bfd_elf_sym_name (input_section
->owner
,
805 (*_bfd_error_handler
) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec
->owner
, sym_name
);
813 if (sym_type
!= STT_FUNC
815 && (sym_sec
->flags
& SEC_CODE
) == 0)
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
820 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
821 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
826 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
830 count_stub (struct spu_link_hash_table
*htab
,
833 enum _stub_type stub_type
,
834 struct elf_link_hash_entry
*h
,
835 const Elf_Internal_Rela
*irela
)
837 unsigned int ovl
= 0;
838 struct got_entry
*g
, **head
;
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
846 if (stub_type
!= nonovl_stub
)
847 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
850 head
= &h
->got
.glist
;
853 if (elf_local_got_ents (ibfd
) == NULL
)
855 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
856 * sizeof (*elf_local_got_ents (ibfd
)));
857 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
858 if (elf_local_got_ents (ibfd
) == NULL
)
861 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
866 addend
= irela
->r_addend
;
870 struct got_entry
*gnext
;
872 for (g
= *head
; g
!= NULL
; g
= g
->next
)
873 if (g
->addend
== addend
&& g
->ovl
== 0)
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g
= *head
; g
!= NULL
; g
= gnext
)
882 if (g
->addend
== addend
)
884 htab
->stub_count
[g
->ovl
] -= 1;
892 for (g
= *head
; g
!= NULL
; g
= g
->next
)
893 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
899 g
= bfd_malloc (sizeof *g
);
904 g
->stub_addr
= (bfd_vma
) -1;
908 htab
->stub_count
[ovl
] += 1;
914 /* Two instruction overlay stubs look like:
917 .word target_ovl_and_address
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
922 Four instruction overlay stubs look like:
926 ila $79,target_address
930 build_stub (struct spu_link_hash_table
*htab
,
933 enum _stub_type stub_type
,
934 struct elf_link_hash_entry
*h
,
935 const Elf_Internal_Rela
*irela
,
940 struct got_entry
*g
, **head
;
942 bfd_vma addend
, val
, from
, to
;
945 if (stub_type
!= nonovl_stub
)
946 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
949 head
= &h
->got
.glist
;
951 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
955 addend
= irela
->r_addend
;
957 for (g
= *head
; g
!= NULL
; g
= g
->next
)
958 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
963 if (g
->ovl
== 0 && ovl
!= 0)
966 if (g
->stub_addr
!= (bfd_vma
) -1)
969 sec
= htab
->stub_sec
[ovl
];
970 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
971 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
973 to
= (htab
->ovly_load
->root
.u
.def
.value
974 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
975 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
977 if (OVL_STUB_SIZE
== 16)
979 if (((dest
| to
| from
) & 3) != 0
980 || val
+ 0x40000 >= 0x80000)
985 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
987 if (OVL_STUB_SIZE
== 16)
989 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
990 sec
->contents
+ sec
->size
);
991 bfd_put_32 (sec
->owner
, LNOP
,
992 sec
->contents
+ sec
->size
+ 4);
993 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
994 sec
->contents
+ sec
->size
+ 8);
995 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
996 sec
->contents
+ sec
->size
+ 12);
998 else if (OVL_STUB_SIZE
== 8)
1000 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
1001 sec
->contents
+ sec
->size
);
1003 val
= (dest
& 0x3ffff) | (ovl
<< 18);
1004 bfd_put_32 (sec
->owner
, val
,
1005 sec
->contents
+ sec
->size
+ 4);
1009 sec
->size
+= OVL_STUB_SIZE
;
1011 if (htab
->emit_stub_syms
)
1017 len
= 8 + sizeof (".ovl_call.") - 1;
1019 len
+= strlen (h
->root
.root
.string
);
1024 add
= (int) irela
->r_addend
& 0xffffffff;
1027 name
= bfd_malloc (len
);
1031 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1033 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1035 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec
->id
& 0xffffffff,
1037 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1039 sprintf (name
+ len
- 9, "+%x", add
);
1041 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1045 if (h
->root
.type
== bfd_link_hash_new
)
1047 h
->root
.type
= bfd_link_hash_defined
;
1048 h
->root
.u
.def
.section
= sec
;
1049 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1050 h
->size
= OVL_STUB_SIZE
;
1054 h
->ref_regular_nonweak
= 1;
1055 h
->forced_local
= 1;
1063 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1067 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
1071 struct bfd_link_info
*info
= inf
;
1072 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1075 if ((h
->root
.type
== bfd_link_hash_defined
1076 || h
->root
.type
== bfd_link_hash_defweak
)
1078 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1079 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1080 && sym_sec
->output_section
!= NULL
1081 && sym_sec
->output_section
->owner
== info
->output_bfd
1082 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1083 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1084 || htab
->non_overlay_stubs
))
1086 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1093 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
1097 struct bfd_link_info
*info
= inf
;
1098 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1101 if ((h
->root
.type
== bfd_link_hash_defined
1102 || h
->root
.type
== bfd_link_hash_defweak
)
1104 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1105 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1106 && sym_sec
->output_section
!= NULL
1107 && sym_sec
->output_section
->owner
== info
->output_bfd
1108 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1109 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1110 || htab
->non_overlay_stubs
))
1112 return build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1113 h
->root
.u
.def
.value
, sym_sec
);
1119 /* Size or build stubs. */
1122 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1124 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1127 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1129 extern const bfd_target bfd_elf32_spu_vec
;
1130 Elf_Internal_Shdr
*symtab_hdr
;
1132 Elf_Internal_Sym
*local_syms
= NULL
;
1134 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1139 if (symtab_hdr
->sh_info
== 0)
1142 /* Walk over each section attached to the input bfd. */
1143 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1145 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1147 /* If there aren't any relocs, then there's nothing more to do. */
1148 if ((isec
->flags
& SEC_RELOC
) == 0
1149 || isec
->reloc_count
== 0)
1152 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1155 /* Get the relocs. */
1156 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1158 if (internal_relocs
== NULL
)
1159 goto error_ret_free_local
;
1161 /* Now examine each relocation. */
1162 irela
= internal_relocs
;
1163 irelaend
= irela
+ isec
->reloc_count
;
1164 for (; irela
< irelaend
; irela
++)
1166 enum elf_spu_reloc_type r_type
;
1167 unsigned int r_indx
;
1169 Elf_Internal_Sym
*sym
;
1170 struct elf_link_hash_entry
*h
;
1171 enum _stub_type stub_type
;
1173 r_type
= ELF32_R_TYPE (irela
->r_info
);
1174 r_indx
= ELF32_R_SYM (irela
->r_info
);
1176 if (r_type
>= R_SPU_max
)
1178 bfd_set_error (bfd_error_bad_value
);
1179 error_ret_free_internal
:
1180 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1181 free (internal_relocs
);
1182 error_ret_free_local
:
1183 if (local_syms
!= NULL
1184 && (symtab_hdr
->contents
1185 != (unsigned char *) local_syms
))
1190 /* Determine the reloc target section. */
1191 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1192 goto error_ret_free_internal
;
1194 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1196 if (stub_type
== no_stub
)
1198 else if (stub_type
== stub_error
)
1199 goto error_ret_free_internal
;
1201 if (htab
->stub_count
== NULL
)
1204 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1205 htab
->stub_count
= bfd_zmalloc (amt
);
1206 if (htab
->stub_count
== NULL
)
1207 goto error_ret_free_internal
;
1212 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1213 goto error_ret_free_internal
;
1220 dest
= h
->root
.u
.def
.value
;
1222 dest
= sym
->st_value
;
1223 dest
+= irela
->r_addend
;
1224 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1226 goto error_ret_free_internal
;
1230 /* We're done with the internal relocs, free them. */
1231 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1232 free (internal_relocs
);
1235 if (local_syms
!= NULL
1236 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1238 if (!info
->keep_memory
)
1241 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1248 /* Allocate space for overlay call and return stubs. */
1251 spu_elf_size_stubs (struct bfd_link_info
*info
,
1252 void (*place_spu_section
) (asection
*, asection
*,
1254 int non_overlay_stubs
)
1256 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1263 htab
->non_overlay_stubs
= non_overlay_stubs
;
1264 if (!process_stubs (info
, FALSE
))
1267 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1271 if (htab
->stub_count
== NULL
)
1274 ibfd
= info
->input_bfds
;
1275 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1276 htab
->stub_sec
= bfd_zmalloc (amt
);
1277 if (htab
->stub_sec
== NULL
)
1280 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1281 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1282 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1283 htab
->stub_sec
[0] = stub
;
1285 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1287 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1288 (*place_spu_section
) (stub
, NULL
, ".text");
1290 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1292 asection
*osec
= htab
->ovl_sec
[i
];
1293 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1294 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1295 htab
->stub_sec
[ovl
] = stub
;
1297 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1299 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1300 (*place_spu_section
) (stub
, osec
, NULL
);
1303 /* htab->ovtab consists of two arrays.
1313 . } _ovly_buf_table[];
1316 flags
= (SEC_ALLOC
| SEC_LOAD
1317 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1318 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1319 if (htab
->ovtab
== NULL
1320 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1323 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1324 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1326 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1327 if (htab
->toe
== NULL
1328 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1330 htab
->toe
->size
= 16;
1331 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1336 /* Functions to handle embedded spu_ovl.o object. */
1339 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1345 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1351 struct _ovl_stream
*os
;
1355 os
= (struct _ovl_stream
*) stream
;
1356 max
= (const char *) os
->end
- (const char *) os
->start
;
1358 if ((ufile_ptr
) offset
>= max
)
1362 if (count
> max
- offset
)
1363 count
= max
- offset
;
1365 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1370 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1372 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1379 return *ovl_bfd
!= NULL
;
1382 /* Define an STT_OBJECT symbol. */
1384 static struct elf_link_hash_entry
*
1385 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1387 struct elf_link_hash_entry
*h
;
1389 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1393 if (h
->root
.type
!= bfd_link_hash_defined
1396 h
->root
.type
= bfd_link_hash_defined
;
1397 h
->root
.u
.def
.section
= htab
->ovtab
;
1398 h
->type
= STT_OBJECT
;
1401 h
->ref_regular_nonweak
= 1;
1404 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1406 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1407 h
->root
.u
.def
.section
->owner
,
1408 h
->root
.root
.string
);
1409 bfd_set_error (bfd_error_bad_value
);
1414 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1415 h
->root
.root
.string
);
1416 bfd_set_error (bfd_error_bad_value
);
1423 /* Fill in all stubs and the overlay tables. */
1426 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1428 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1429 struct elf_link_hash_entry
*h
;
1435 htab
->emit_stub_syms
= emit_syms
;
1436 if (htab
->stub_count
== NULL
)
1439 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1440 if (htab
->stub_sec
[i
]->size
!= 0)
1442 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1443 htab
->stub_sec
[i
]->size
);
1444 if (htab
->stub_sec
[i
]->contents
== NULL
)
1446 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1447 htab
->stub_sec
[i
]->size
= 0;
1450 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1451 htab
->ovly_load
= h
;
1452 BFD_ASSERT (h
!= NULL
1453 && (h
->root
.type
== bfd_link_hash_defined
1454 || h
->root
.type
== bfd_link_hash_defweak
)
1457 s
= h
->root
.u
.def
.section
->output_section
;
1458 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1460 (*_bfd_error_handler
) (_("%s in overlay section"),
1461 h
->root
.root
.string
);
1462 bfd_set_error (bfd_error_bad_value
);
1466 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1467 htab
->ovly_return
= h
;
1469 /* Fill in all the stubs. */
1470 process_stubs (info
, TRUE
);
1471 if (!htab
->stub_err
)
1472 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1476 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1477 bfd_set_error (bfd_error_bad_value
);
1481 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1483 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1485 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1486 bfd_set_error (bfd_error_bad_value
);
1489 htab
->stub_sec
[i
]->rawsize
= 0;
1492 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1493 if (htab
->ovtab
->contents
== NULL
)
1496 /* Write out _ovly_table. */
1497 p
= htab
->ovtab
->contents
;
1498 /* set low bit of .size to mark non-overlay area as present. */
1500 obfd
= htab
->ovtab
->output_section
->owner
;
1501 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1503 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1507 unsigned long off
= ovl_index
* 16;
1508 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1510 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1511 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1512 /* file_off written later in spu_elf_modify_program_headers. */
1513 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1517 h
= define_ovtab_symbol (htab
, "_ovly_table");
1520 h
->root
.u
.def
.value
= 16;
1521 h
->size
= htab
->num_overlays
* 16;
1523 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1526 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1529 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1532 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1533 h
->size
= htab
->num_buf
* 4;
1535 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1538 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1541 h
= define_ovtab_symbol (htab
, "_EAR_");
1544 h
->root
.u
.def
.section
= htab
->toe
;
1545 h
->root
.u
.def
.value
= 0;
1551 /* Check that all loadable section VMAs lie in the range
1552 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1555 spu_elf_check_vma (struct bfd_link_info
*info
,
1559 unsigned int overlay_fixed
,
1560 unsigned int reserved
,
1561 int extra_stack_space
,
1562 void (*spu_elf_load_ovl_mgr
) (void),
1563 FILE *(*spu_elf_open_overlay_script
) (void),
1564 void (*spu_elf_relink
) (void))
1566 struct elf_segment_map
*m
;
1568 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1569 bfd
*abfd
= info
->output_bfd
;
1571 if (auto_overlay
& AUTO_OVERLAY
)
1572 htab
->auto_overlay
= auto_overlay
;
1573 htab
->local_store
= hi
+ 1 - lo
;
1574 htab
->overlay_fixed
= overlay_fixed
;
1575 htab
->reserved
= reserved
;
1576 htab
->extra_stack_space
= extra_stack_space
;
1577 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1578 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1579 htab
->spu_elf_relink
= spu_elf_relink
;
1581 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1582 if (m
->p_type
== PT_LOAD
)
1583 for (i
= 0; i
< m
->count
; i
++)
1584 if (m
->sections
[i
]->size
!= 0
1585 && (m
->sections
[i
]->vma
< lo
1586 || m
->sections
[i
]->vma
> hi
1587 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1588 return m
->sections
[i
];
1590 /* No need for overlays if it all fits. */
1591 htab
->auto_overlay
= 0;
1595 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1596 Search for stack adjusting insns, and return the sp delta. */
1599 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1603 memset (reg
, 0, sizeof (reg
));
1604 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
1606 unsigned char buf
[4];
1610 /* Assume no relocs on stack adjusing insns. */
1611 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1614 if (buf
[0] == 0x24 /* stqd */)
1618 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1619 /* Partly decoded immediate field. */
1620 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1622 if (buf
[0] == 0x1c /* ai */)
1625 imm
= (imm
^ 0x200) - 0x200;
1626 reg
[rt
] = reg
[ra
] + imm
;
1628 if (rt
== 1 /* sp */)
1635 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1637 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1639 reg
[rt
] = reg
[ra
] + reg
[rb
];
1647 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1649 if (buf
[0] >= 0x42 /* ila */)
1650 imm
|= (buf
[0] & 1) << 17;
1655 if (buf
[0] == 0x40 /* il */)
1657 if ((buf
[1] & 0x80) == 0)
1659 imm
= (imm
^ 0x8000) - 0x8000;
1661 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1667 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1669 reg
[rt
] |= imm
& 0xffff;
1672 else if (buf
[0] == 0x04 /* ori */)
1675 imm
= (imm
^ 0x200) - 0x200;
1676 reg
[rt
] = reg
[ra
] | imm
;
1679 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
1681 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
1682 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
1683 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
1684 | ((imm
& 0x1000) ? 0x000000ff : 0));
1687 else if (buf
[0] == 0x16 /* andbi */)
1693 reg
[rt
] = reg
[ra
] & imm
;
1696 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1698 /* Used in pic reg load. Say rt is trashed. Won't be used
1699 in stack adjust, but we need to continue past this branch. */
1703 else if (is_branch (buf
) || is_indirect_branch (buf
))
1704 /* If we hit a branch then we must be out of the prologue. */
1711 /* qsort predicate to sort symbols by section and value. */
1713 static Elf_Internal_Sym
*sort_syms_syms
;
1714 static asection
**sort_syms_psecs
;
1717 sort_syms (const void *a
, const void *b
)
1719 Elf_Internal_Sym
*const *s1
= a
;
1720 Elf_Internal_Sym
*const *s2
= b
;
1721 asection
*sec1
,*sec2
;
1722 bfd_signed_vma delta
;
1724 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1725 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1728 return sec1
->index
- sec2
->index
;
1730 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1732 return delta
< 0 ? -1 : 1;
1734 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1736 return delta
< 0 ? -1 : 1;
1738 return *s1
< *s2
? -1 : 1;
1743 struct function_info
*fun
;
1744 struct call_info
*next
;
1746 unsigned int max_depth
;
1747 unsigned int is_tail
: 1;
1748 unsigned int is_pasted
: 1;
1751 struct function_info
1753 /* List of functions called. Also branches to hot/cold part of
1755 struct call_info
*call_list
;
1756 /* For hot/cold part of function, point to owner. */
1757 struct function_info
*start
;
1758 /* Symbol at start of function. */
1760 Elf_Internal_Sym
*sym
;
1761 struct elf_link_hash_entry
*h
;
1763 /* Function section. */
1766 /* Where last called from, and number of sections called from. */
1767 asection
*last_caller
;
1768 unsigned int call_count
;
1769 /* Address range of (this part of) function. */
1773 /* Distance from root of call tree. Tail and hot/cold branches
1774 count as one deeper. We aren't counting stack frames here. */
1776 /* Set if global symbol. */
1777 unsigned int global
: 1;
1778 /* Set if known to be start of function (as distinct from a hunk
1779 in hot/cold section. */
1780 unsigned int is_func
: 1;
1781 /* Set if not a root node. */
1782 unsigned int non_root
: 1;
1783 /* Flags used during call tree traversal. It's cheaper to replicate
1784 the visit flags than have one which needs clearing after a traversal. */
1785 unsigned int visit1
: 1;
1786 unsigned int visit2
: 1;
1787 unsigned int marking
: 1;
1788 unsigned int visit3
: 1;
1789 unsigned int visit4
: 1;
1790 unsigned int visit5
: 1;
1791 unsigned int visit6
: 1;
1792 unsigned int visit7
: 1;
1795 struct spu_elf_stack_info
1799 /* Variable size array describing functions, one per contiguous
1800 address range belonging to a function. */
1801 struct function_info fun
[1];
1804 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1805 entries for section SEC. */
1807 static struct spu_elf_stack_info
*
1808 alloc_stack_info (asection
*sec
, int max_fun
)
1810 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1813 amt
= sizeof (struct spu_elf_stack_info
);
1814 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1815 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1816 if (sec_data
->u
.i
.stack_info
!= NULL
)
1817 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1818 return sec_data
->u
.i
.stack_info
;
1821 /* Add a new struct function_info describing a (part of a) function
1822 starting at SYM_H. Keep the array sorted by address. */
1824 static struct function_info
*
1825 maybe_insert_function (asection
*sec
,
1828 bfd_boolean is_func
)
1830 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1831 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1837 sinfo
= alloc_stack_info (sec
, 20);
1844 Elf_Internal_Sym
*sym
= sym_h
;
1845 off
= sym
->st_value
;
1846 size
= sym
->st_size
;
1850 struct elf_link_hash_entry
*h
= sym_h
;
1851 off
= h
->root
.u
.def
.value
;
1855 for (i
= sinfo
->num_fun
; --i
>= 0; )
1856 if (sinfo
->fun
[i
].lo
<= off
)
1861 /* Don't add another entry for an alias, but do update some
1863 if (sinfo
->fun
[i
].lo
== off
)
1865 /* Prefer globals over local syms. */
1866 if (global
&& !sinfo
->fun
[i
].global
)
1868 sinfo
->fun
[i
].global
= TRUE
;
1869 sinfo
->fun
[i
].u
.h
= sym_h
;
1872 sinfo
->fun
[i
].is_func
= TRUE
;
1873 return &sinfo
->fun
[i
];
1875 /* Ignore a zero-size symbol inside an existing function. */
1876 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1877 return &sinfo
->fun
[i
];
1880 if (sinfo
->num_fun
>= sinfo
->max_fun
)
1882 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1883 bfd_size_type old
= amt
;
1885 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1886 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1887 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1888 sinfo
= bfd_realloc (sinfo
, amt
);
1891 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1892 sec_data
->u
.i
.stack_info
= sinfo
;
1895 if (++i
< sinfo
->num_fun
)
1896 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1897 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1898 sinfo
->fun
[i
].is_func
= is_func
;
1899 sinfo
->fun
[i
].global
= global
;
1900 sinfo
->fun
[i
].sec
= sec
;
1902 sinfo
->fun
[i
].u
.h
= sym_h
;
1904 sinfo
->fun
[i
].u
.sym
= sym_h
;
1905 sinfo
->fun
[i
].lo
= off
;
1906 sinfo
->fun
[i
].hi
= off
+ size
;
1907 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1908 sinfo
->num_fun
+= 1;
1909 return &sinfo
->fun
[i
];
1912 /* Return the name of FUN. */
1915 func_name (struct function_info
*fun
)
1919 Elf_Internal_Shdr
*symtab_hdr
;
1921 while (fun
->start
!= NULL
)
1925 return fun
->u
.h
->root
.root
.string
;
1928 if (fun
->u
.sym
->st_name
== 0)
1930 size_t len
= strlen (sec
->name
);
1931 char *name
= bfd_malloc (len
+ 10);
1934 sprintf (name
, "%s+%lx", sec
->name
,
1935 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1939 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1940 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1943 /* Read the instruction at OFF in SEC. Return true iff the instruction
1944 is a nop, lnop, or stop 0 (all zero insn). */
1947 is_nop (asection
*sec
, bfd_vma off
)
1949 unsigned char insn
[4];
1951 if (off
+ 4 > sec
->size
1952 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1954 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1956 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1961 /* Extend the range of FUN to cover nop padding up to LIMIT.
1962 Return TRUE iff some instruction other than a NOP was found. */
1965 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1967 bfd_vma off
= (fun
->hi
+ 3) & -4;
1969 while (off
< limit
&& is_nop (fun
->sec
, off
))
1980 /* Check and fix overlapping function ranges. Return TRUE iff there
1981 are gaps in the current info we have about functions in SEC. */
1984 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1986 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1987 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1989 bfd_boolean gaps
= FALSE
;
1994 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1995 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1997 /* Fix overlapping symbols. */
1998 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1999 const char *f2
= func_name (&sinfo
->fun
[i
]);
2001 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2002 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2004 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2007 if (sinfo
->num_fun
== 0)
2011 if (sinfo
->fun
[0].lo
!= 0)
2013 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2015 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2017 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2018 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2020 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2026 /* Search current function info for a function that contains address
2027 OFFSET in section SEC. */
2029 static struct function_info
*
2030 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2032 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2033 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2037 hi
= sinfo
->num_fun
;
2040 mid
= (lo
+ hi
) / 2;
2041 if (offset
< sinfo
->fun
[mid
].lo
)
2043 else if (offset
>= sinfo
->fun
[mid
].hi
)
2046 return &sinfo
->fun
[mid
];
2048 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2053 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2054 if CALLEE was new. If this function return FALSE, CALLEE should
2058 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2060 struct call_info
**pp
, *p
;
2062 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2063 if (p
->fun
== callee
->fun
)
2065 /* Tail calls use less stack than normal calls. Retain entry
2066 for normal call over one for tail call. */
2067 p
->is_tail
&= callee
->is_tail
;
2070 p
->fun
->start
= NULL
;
2071 p
->fun
->is_func
= TRUE
;
2074 /* Reorder list so most recent call is first. */
2076 p
->next
= caller
->call_list
;
2077 caller
->call_list
= p
;
2080 callee
->next
= caller
->call_list
;
2082 caller
->call_list
= callee
;
2086 /* Copy CALL and insert the copy into CALLER. */
2089 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2091 struct call_info
*callee
;
2092 callee
= bfd_malloc (sizeof (*callee
));
2096 if (!insert_callee (caller
, callee
))
2101 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2102 overlay stub sections. */
2105 interesting_section (asection
*s
, bfd
*obfd
)
2107 return (s
->output_section
!= NULL
2108 && s
->output_section
->owner
== obfd
2109 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2110 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2114 /* Rummage through the relocs for SEC, looking for function calls.
2115 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2116 mark destination symbols on calls as being functions. Also
2117 look at branches, which may be tail calls or go to hot/cold
2118 section part of same function. */
2121 mark_functions_via_relocs (asection
*sec
,
2122 struct bfd_link_info
*info
,
2125 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2126 Elf_Internal_Shdr
*symtab_hdr
;
2128 static bfd_boolean warned
;
2130 if (!interesting_section (sec
, info
->output_bfd
)
2131 || sec
->reloc_count
== 0)
2134 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2136 if (internal_relocs
== NULL
)
2139 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2140 psyms
= &symtab_hdr
->contents
;
2141 irela
= internal_relocs
;
2142 irelaend
= irela
+ sec
->reloc_count
;
2143 for (; irela
< irelaend
; irela
++)
2145 enum elf_spu_reloc_type r_type
;
2146 unsigned int r_indx
;
2148 Elf_Internal_Sym
*sym
;
2149 struct elf_link_hash_entry
*h
;
2151 bfd_boolean reject
, is_call
;
2152 struct function_info
*caller
;
2153 struct call_info
*callee
;
2156 r_type
= ELF32_R_TYPE (irela
->r_info
);
2157 if (r_type
!= R_SPU_REL16
2158 && r_type
!= R_SPU_ADDR16
)
2161 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2165 r_indx
= ELF32_R_SYM (irela
->r_info
);
2166 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2170 || sym_sec
->output_section
== NULL
2171 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2177 unsigned char insn
[4];
2179 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2180 irela
->r_offset
, 4))
2182 if (is_branch (insn
))
2184 is_call
= (insn
[0] & 0xfd) == 0x31;
2185 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2186 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2189 info
->callbacks
->einfo
2190 (_("%B(%A+0x%v): call to non-code section"
2191 " %B(%A), analysis incomplete\n"),
2192 sec
->owner
, sec
, irela
->r_offset
,
2193 sym_sec
->owner
, sym_sec
);
2201 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2209 /* For --auto-overlay, count possible stubs we need for
2210 function pointer references. */
2211 unsigned int sym_type
;
2215 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2216 if (sym_type
== STT_FUNC
)
2217 spu_hash_table (info
)->non_ovly_stub
+= 1;
2222 val
= h
->root
.u
.def
.value
;
2224 val
= sym
->st_value
;
2225 val
+= irela
->r_addend
;
2229 struct function_info
*fun
;
2231 if (irela
->r_addend
!= 0)
2233 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2236 fake
->st_value
= val
;
2238 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2242 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2244 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2247 if (irela
->r_addend
!= 0
2248 && fun
->u
.sym
!= sym
)
2253 caller
= find_function (sec
, irela
->r_offset
, info
);
2256 callee
= bfd_malloc (sizeof *callee
);
2260 callee
->fun
= find_function (sym_sec
, val
, info
);
2261 if (callee
->fun
== NULL
)
2263 callee
->is_tail
= !is_call
;
2264 callee
->is_pasted
= FALSE
;
2266 if (callee
->fun
->last_caller
!= sec
)
2268 callee
->fun
->last_caller
= sec
;
2269 callee
->fun
->call_count
+= 1;
2271 if (!insert_callee (caller
, callee
))
2274 && !callee
->fun
->is_func
2275 && callee
->fun
->stack
== 0)
2277 /* This is either a tail call or a branch from one part of
2278 the function to another, ie. hot/cold section. If the
2279 destination has been called by some other function then
2280 it is a separate function. We also assume that functions
2281 are not split across input files. */
2282 if (sec
->owner
!= sym_sec
->owner
)
2284 callee
->fun
->start
= NULL
;
2285 callee
->fun
->is_func
= TRUE
;
2287 else if (callee
->fun
->start
== NULL
)
2288 callee
->fun
->start
= caller
;
2291 struct function_info
*callee_start
;
2292 struct function_info
*caller_start
;
2293 callee_start
= callee
->fun
;
2294 while (callee_start
->start
)
2295 callee_start
= callee_start
->start
;
2296 caller_start
= caller
;
2297 while (caller_start
->start
)
2298 caller_start
= caller_start
->start
;
2299 if (caller_start
!= callee_start
)
2301 callee
->fun
->start
= NULL
;
2302 callee
->fun
->is_func
= TRUE
;
2311 /* Handle something like .init or .fini, which has a piece of a function.
2312 These sections are pasted together to form a single function. */
2315 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2317 struct bfd_link_order
*l
;
2318 struct _spu_elf_section_data
*sec_data
;
2319 struct spu_elf_stack_info
*sinfo
;
2320 Elf_Internal_Sym
*fake
;
2321 struct function_info
*fun
, *fun_start
;
2323 fake
= bfd_zmalloc (sizeof (*fake
));
2327 fake
->st_size
= sec
->size
;
2329 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2330 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2334 /* Find a function immediately preceding this section. */
2336 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2338 if (l
->u
.indirect
.section
== sec
)
2340 if (fun_start
!= NULL
)
2342 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2346 fun
->start
= fun_start
;
2348 callee
->is_tail
= TRUE
;
2349 callee
->is_pasted
= TRUE
;
2351 if (!insert_callee (fun_start
, callee
))
2357 if (l
->type
== bfd_indirect_link_order
2358 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2359 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2360 && sinfo
->num_fun
!= 0)
2361 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2364 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2368 /* Map address ranges in code sections to functions. */
2371 discover_functions (struct bfd_link_info
*info
)
2375 Elf_Internal_Sym
***psym_arr
;
2376 asection
***sec_arr
;
2377 bfd_boolean gaps
= FALSE
;
2380 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2383 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2384 if (psym_arr
== NULL
)
2386 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2387 if (sec_arr
== NULL
)
2391 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2393 ibfd
= ibfd
->link_next
, bfd_idx
++)
2395 extern const bfd_target bfd_elf32_spu_vec
;
2396 Elf_Internal_Shdr
*symtab_hdr
;
2399 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2400 asection
**psecs
, **p
;
2402 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2405 /* Read all the symbols. */
2406 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2407 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2411 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2412 if (interesting_section (sec
, info
->output_bfd
))
2420 if (symtab_hdr
->contents
!= NULL
)
2422 /* Don't use cached symbols since the generic ELF linker
2423 code only reads local symbols, and we need globals too. */
2424 free (symtab_hdr
->contents
);
2425 symtab_hdr
->contents
= NULL
;
2427 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2429 symtab_hdr
->contents
= (void *) syms
;
2433 /* Select defined function symbols that are going to be output. */
2434 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2437 psym_arr
[bfd_idx
] = psyms
;
2438 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2441 sec_arr
[bfd_idx
] = psecs
;
2442 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2443 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2444 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
2445 || ELF_ST_TYPE (sy
->st_info
) == STT_SECTION
)
2449 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2450 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2453 symcount
= psy
- psyms
;
2456 /* Sort them by section and offset within section. */
2457 sort_syms_syms
= syms
;
2458 sort_syms_psecs
= psecs
;
2459 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2461 /* Now inspect the function symbols. */
2462 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2464 asection
*s
= psecs
[*psy
- syms
];
2465 Elf_Internal_Sym
**psy2
;
2467 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2468 if (psecs
[*psy2
- syms
] != s
)
2471 if (!alloc_stack_info (s
, psy2
- psy
))
2476 /* First install info about properly typed and sized functions.
2477 In an ideal world this will cover all code sections, except
2478 when partitioning functions into hot and cold sections,
2479 and the horrible pasted together .init and .fini functions. */
2480 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2483 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2485 asection
*s
= psecs
[sy
- syms
];
2486 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2491 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2492 if (interesting_section (sec
, info
->output_bfd
))
2493 gaps
|= check_function_ranges (sec
, info
);
2498 /* See if we can discover more function symbols by looking at
2500 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2502 ibfd
= ibfd
->link_next
, bfd_idx
++)
2506 if (psym_arr
[bfd_idx
] == NULL
)
2509 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2510 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2514 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2516 ibfd
= ibfd
->link_next
, bfd_idx
++)
2518 Elf_Internal_Shdr
*symtab_hdr
;
2520 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2523 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2526 psecs
= sec_arr
[bfd_idx
];
2528 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2529 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2532 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2533 if (interesting_section (sec
, info
->output_bfd
))
2534 gaps
|= check_function_ranges (sec
, info
);
2538 /* Finally, install all globals. */
2539 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2543 s
= psecs
[sy
- syms
];
2545 /* Global syms might be improperly typed functions. */
2546 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2547 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2549 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2555 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2557 extern const bfd_target bfd_elf32_spu_vec
;
2560 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2563 /* Some of the symbols we've installed as marking the
2564 beginning of functions may have a size of zero. Extend
2565 the range of such functions to the beginning of the
2566 next symbol of interest. */
2567 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2568 if (interesting_section (sec
, info
->output_bfd
))
2570 struct _spu_elf_section_data
*sec_data
;
2571 struct spu_elf_stack_info
*sinfo
;
2573 sec_data
= spu_elf_section_data (sec
);
2574 sinfo
= sec_data
->u
.i
.stack_info
;
2578 bfd_vma hi
= sec
->size
;
2580 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2582 sinfo
->fun
[fun_idx
].hi
= hi
;
2583 hi
= sinfo
->fun
[fun_idx
].lo
;
2586 /* No symbols in this section. Must be .init or .fini
2587 or something similar. */
2588 else if (!pasted_function (sec
, info
))
2594 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2596 ibfd
= ibfd
->link_next
, bfd_idx
++)
2598 if (psym_arr
[bfd_idx
] == NULL
)
2601 free (psym_arr
[bfd_idx
]);
2602 free (sec_arr
[bfd_idx
]);
2611 /* Iterate over all function_info we have collected, calling DOIT on
2612 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2616 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2617 struct bfd_link_info
*,
2619 struct bfd_link_info
*info
,
2625 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2627 extern const bfd_target bfd_elf32_spu_vec
;
2630 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2633 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2635 struct _spu_elf_section_data
*sec_data
;
2636 struct spu_elf_stack_info
*sinfo
;
2638 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2639 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2642 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2643 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2644 if (!doit (&sinfo
->fun
[i
], info
, param
))
2652 /* Transfer call info attached to struct function_info entries for
2653 all of a given function's sections to the first entry. */
2656 transfer_calls (struct function_info
*fun
,
2657 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2658 void *param ATTRIBUTE_UNUSED
)
2660 struct function_info
*start
= fun
->start
;
2664 struct call_info
*call
, *call_next
;
2666 while (start
->start
!= NULL
)
2667 start
= start
->start
;
2668 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2670 call_next
= call
->next
;
2671 if (!insert_callee (start
, call
))
2674 fun
->call_list
= NULL
;
2679 /* Mark nodes in the call graph that are called by some other node. */
2682 mark_non_root (struct function_info
*fun
,
2683 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2684 void *param ATTRIBUTE_UNUSED
)
2686 struct call_info
*call
;
2691 for (call
= fun
->call_list
; call
; call
= call
->next
)
2693 call
->fun
->non_root
= TRUE
;
2694 mark_non_root (call
->fun
, 0, 0);
2699 /* Remove cycles from the call graph. Set depth of nodes. */
2702 remove_cycles (struct function_info
*fun
,
2703 struct bfd_link_info
*info
,
2706 struct call_info
**callp
, *call
;
2707 unsigned int depth
= *(unsigned int *) param
;
2708 unsigned int max_depth
= depth
;
2712 fun
->marking
= TRUE
;
2714 callp
= &fun
->call_list
;
2715 while ((call
= *callp
) != NULL
)
2717 if (!call
->fun
->visit2
)
2719 call
->max_depth
= depth
+ !call
->is_pasted
;
2720 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2722 if (max_depth
< call
->max_depth
)
2723 max_depth
= call
->max_depth
;
2725 else if (call
->fun
->marking
)
2727 if (!spu_hash_table (info
)->auto_overlay
)
2729 const char *f1
= func_name (fun
);
2730 const char *f2
= func_name (call
->fun
);
2732 info
->callbacks
->info (_("Stack analysis will ignore the call "
2736 *callp
= call
->next
;
2740 callp
= &call
->next
;
2742 fun
->marking
= FALSE
;
2743 *(unsigned int *) param
= max_depth
;
2747 /* Check that we actually visited all nodes in remove_cycles. If we
2748 didn't, then there is some cycle in the call graph not attached to
2749 any root node. Arbitrarily choose a node in the cycle as a new
2750 root and break the cycle. */
2753 mark_detached_root (struct function_info
*fun
,
2754 struct bfd_link_info
*info
,
2759 fun
->non_root
= FALSE
;
2760 *(unsigned int *) param
= 0;
2761 return remove_cycles (fun
, info
, param
);
2764 /* Populate call_list for each function. */
2767 build_call_tree (struct bfd_link_info
*info
)
2772 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2774 extern const bfd_target bfd_elf32_spu_vec
;
2777 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2780 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2781 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2785 /* Transfer call info from hot/cold section part of function
2787 if (!spu_hash_table (info
)->auto_overlay
2788 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2791 /* Find the call graph root(s). */
2792 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2795 /* Remove cycles from the call graph. We start from the root node(s)
2796 so that we break cycles in a reasonable place. */
2798 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
2801 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
2804 /* qsort predicate to sort calls by max_depth then count. */
2807 sort_calls (const void *a
, const void *b
)
2809 struct call_info
*const *c1
= a
;
2810 struct call_info
*const *c2
= b
;
2813 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2817 delta
= (*c2
)->count
- (*c1
)->count
;
2821 return (char *) c1
- (char *) c2
;
2825 unsigned int max_overlay_size
;
2828 /* Set linker_mark and gc_mark on any sections that we will put in
2829 overlays. These flags are used by the generic ELF linker, but we
2830 won't be continuing on to bfd_elf_final_link so it is OK to use
2831 them. linker_mark is clear before we get here. Set segment_mark
2832 on sections that are part of a pasted function (excluding the last
2835 Set up function rodata section if --overlay-rodata. We don't
2836 currently include merged string constant rodata sections since
2838 Sort the call graph so that the deepest nodes will be visited
2842 mark_overlay_section (struct function_info
*fun
,
2843 struct bfd_link_info
*info
,
2846 struct call_info
*call
;
2848 struct _mos_param
*mos_param
= param
;
2854 if (!fun
->sec
->linker_mark
)
2858 fun
->sec
->linker_mark
= 1;
2859 fun
->sec
->gc_mark
= 1;
2860 fun
->sec
->segment_mark
= 0;
2861 /* Ensure SEC_CODE is set on this text section (it ought to
2862 be!), and SEC_CODE is clear on rodata sections. We use
2863 this flag to differentiate the two overlay section types. */
2864 fun
->sec
->flags
|= SEC_CODE
;
2866 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2870 /* Find the rodata section corresponding to this function's
2872 if (strcmp (fun
->sec
->name
, ".text") == 0)
2874 name
= bfd_malloc (sizeof (".rodata"));
2877 memcpy (name
, ".rodata", sizeof (".rodata"));
2879 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2881 size_t len
= strlen (fun
->sec
->name
);
2882 name
= bfd_malloc (len
+ 3);
2885 memcpy (name
, ".rodata", sizeof (".rodata"));
2886 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2888 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2890 size_t len
= strlen (fun
->sec
->name
) + 1;
2891 name
= bfd_malloc (len
);
2894 memcpy (name
, fun
->sec
->name
, len
);
2900 asection
*rodata
= NULL
;
2901 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2902 if (group_sec
== NULL
)
2903 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2905 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2907 if (strcmp (group_sec
->name
, name
) == 0)
2912 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2914 fun
->rodata
= rodata
;
2917 fun
->rodata
->linker_mark
= 1;
2918 fun
->rodata
->gc_mark
= 1;
2919 fun
->rodata
->flags
&= ~SEC_CODE
;
2924 size
= fun
->sec
->size
;
2926 size
+= fun
->rodata
->size
;
2927 if (mos_param
->max_overlay_size
< size
)
2928 mos_param
->max_overlay_size
= size
;
2931 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2936 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2940 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2941 calls
[count
++] = call
;
2943 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2945 fun
->call_list
= NULL
;
2949 calls
[count
]->next
= fun
->call_list
;
2950 fun
->call_list
= calls
[count
];
2955 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2957 if (call
->is_pasted
)
2959 /* There can only be one is_pasted call per function_info. */
2960 BFD_ASSERT (!fun
->sec
->segment_mark
);
2961 fun
->sec
->segment_mark
= 1;
2963 if (!mark_overlay_section (call
->fun
, info
, param
))
2967 /* Don't put entry code into an overlay. The overlay manager needs
2969 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2970 == info
->output_bfd
->start_address
)
2972 fun
->sec
->linker_mark
= 0;
2973 if (fun
->rodata
!= NULL
)
2974 fun
->rodata
->linker_mark
= 0;
2979 /* If non-zero then unmark functions called from those within sections
2980 that we need to unmark. Unfortunately this isn't reliable since the
2981 call graph cannot know the destination of function pointer calls. */
2982 #define RECURSE_UNMARK 0
2985 asection
*exclude_input_section
;
2986 asection
*exclude_output_section
;
2987 unsigned long clearing
;
2990 /* Undo some of mark_overlay_section's work. */
2993 unmark_overlay_section (struct function_info
*fun
,
2994 struct bfd_link_info
*info
,
2997 struct call_info
*call
;
2998 struct _uos_param
*uos_param
= param
;
2999 unsigned int excluded
= 0;
3007 if (fun
->sec
== uos_param
->exclude_input_section
3008 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3012 uos_param
->clearing
+= excluded
;
3014 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3016 fun
->sec
->linker_mark
= 0;
3018 fun
->rodata
->linker_mark
= 0;
3021 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3022 if (!unmark_overlay_section (call
->fun
, info
, param
))
3026 uos_param
->clearing
-= excluded
;
3031 unsigned int lib_size
;
3032 asection
**lib_sections
;
3035 /* Add sections we have marked as belonging to overlays to an array
3036 for consideration as non-overlay sections. The array consist of
3037 pairs of sections, (text,rodata), for functions in the call graph. */
3040 collect_lib_sections (struct function_info
*fun
,
3041 struct bfd_link_info
*info
,
3044 struct _cl_param
*lib_param
= param
;
3045 struct call_info
*call
;
3052 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3055 size
= fun
->sec
->size
;
3057 size
+= fun
->rodata
->size
;
3058 if (size
<= lib_param
->lib_size
)
3060 *lib_param
->lib_sections
++ = fun
->sec
;
3061 fun
->sec
->gc_mark
= 0;
3062 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3064 *lib_param
->lib_sections
++ = fun
->rodata
;
3065 fun
->rodata
->gc_mark
= 0;
3068 *lib_param
->lib_sections
++ = NULL
;
3071 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3072 collect_lib_sections (call
->fun
, info
, param
);
3077 /* qsort predicate to sort sections by call count. */
3080 sort_lib (const void *a
, const void *b
)
3082 asection
*const *s1
= a
;
3083 asection
*const *s2
= b
;
3084 struct _spu_elf_section_data
*sec_data
;
3085 struct spu_elf_stack_info
*sinfo
;
3089 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3090 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3093 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3094 delta
-= sinfo
->fun
[i
].call_count
;
3097 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3098 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3101 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3102 delta
+= sinfo
->fun
[i
].call_count
;
3111 /* Remove some sections from those marked to be in overlays. Choose
3112 those that are called from many places, likely library functions. */
3115 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3118 asection
**lib_sections
;
3119 unsigned int i
, lib_count
;
3120 struct _cl_param collect_lib_param
;
3121 struct function_info dummy_caller
;
3123 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3125 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3127 extern const bfd_target bfd_elf32_spu_vec
;
3130 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3133 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3134 if (sec
->linker_mark
3135 && sec
->size
< lib_size
3136 && (sec
->flags
& SEC_CODE
) != 0)
3139 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3140 if (lib_sections
== NULL
)
3141 return (unsigned int) -1;
3142 collect_lib_param
.lib_size
= lib_size
;
3143 collect_lib_param
.lib_sections
= lib_sections
;
3144 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3146 return (unsigned int) -1;
3147 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3149 /* Sort sections so that those with the most calls are first. */
3151 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3153 for (i
= 0; i
< lib_count
; i
++)
3155 unsigned int tmp
, stub_size
;
3157 struct _spu_elf_section_data
*sec_data
;
3158 struct spu_elf_stack_info
*sinfo
;
3160 sec
= lib_sections
[2 * i
];
3161 /* If this section is OK, its size must be less than lib_size. */
3163 /* If it has a rodata section, then add that too. */
3164 if (lib_sections
[2 * i
+ 1])
3165 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3166 /* Add any new overlay call stubs needed by the section. */
3169 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3170 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3173 struct call_info
*call
;
3175 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3176 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3177 if (call
->fun
->sec
->linker_mark
)
3179 struct call_info
*p
;
3180 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3181 if (p
->fun
== call
->fun
)
3184 stub_size
+= OVL_STUB_SIZE
;
3187 if (tmp
+ stub_size
< lib_size
)
3189 struct call_info
**pp
, *p
;
3191 /* This section fits. Mark it as non-overlay. */
3192 lib_sections
[2 * i
]->linker_mark
= 0;
3193 if (lib_sections
[2 * i
+ 1])
3194 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3195 lib_size
-= tmp
+ stub_size
;
3196 /* Call stubs to the section we just added are no longer
3198 pp
= &dummy_caller
.call_list
;
3199 while ((p
= *pp
) != NULL
)
3200 if (!p
->fun
->sec
->linker_mark
)
3202 lib_size
+= OVL_STUB_SIZE
;
3208 /* Add new call stubs to dummy_caller. */
3209 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3210 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3213 struct call_info
*call
;
3215 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3216 for (call
= sinfo
->fun
[k
].call_list
;
3219 if (call
->fun
->sec
->linker_mark
)
3221 struct call_info
*callee
;
3222 callee
= bfd_malloc (sizeof (*callee
));
3224 return (unsigned int) -1;
3226 if (!insert_callee (&dummy_caller
, callee
))
3232 while (dummy_caller
.call_list
!= NULL
)
3234 struct call_info
*call
= dummy_caller
.call_list
;
3235 dummy_caller
.call_list
= call
->next
;
3238 for (i
= 0; i
< 2 * lib_count
; i
++)
3239 if (lib_sections
[i
])
3240 lib_sections
[i
]->gc_mark
= 1;
3241 free (lib_sections
);
3245 /* Build an array of overlay sections. The deepest node's section is
3246 added first, then its parent node's section, then everything called
3247 from the parent section. The idea being to group sections to
3248 minimise calls between different overlays. */
3251 collect_overlays (struct function_info
*fun
,
3252 struct bfd_link_info
*info
,
3255 struct call_info
*call
;
3256 bfd_boolean added_fun
;
3257 asection
***ovly_sections
= param
;
3263 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3264 if (!call
->is_pasted
)
3266 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3272 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3274 fun
->sec
->gc_mark
= 0;
3275 *(*ovly_sections
)++ = fun
->sec
;
3276 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3278 fun
->rodata
->gc_mark
= 0;
3279 *(*ovly_sections
)++ = fun
->rodata
;
3282 *(*ovly_sections
)++ = NULL
;
3285 /* Pasted sections must stay with the first section. We don't
3286 put pasted sections in the array, just the first section.
3287 Mark subsequent sections as already considered. */
3288 if (fun
->sec
->segment_mark
)
3290 struct function_info
*call_fun
= fun
;
3293 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3294 if (call
->is_pasted
)
3296 call_fun
= call
->fun
;
3297 call_fun
->sec
->gc_mark
= 0;
3298 if (call_fun
->rodata
)
3299 call_fun
->rodata
->gc_mark
= 0;
3305 while (call_fun
->sec
->segment_mark
);
3309 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3310 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3315 struct _spu_elf_section_data
*sec_data
;
3316 struct spu_elf_stack_info
*sinfo
;
3318 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3319 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3322 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3323 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3331 struct _sum_stack_param
{
3333 size_t overall_stack
;
3334 bfd_boolean emit_stack_syms
;
3337 /* Descend the call graph for FUN, accumulating total stack required. */
3340 sum_stack (struct function_info
*fun
,
3341 struct bfd_link_info
*info
,
3344 struct call_info
*call
;
3345 struct function_info
*max
;
3346 size_t stack
, cum_stack
;
3348 bfd_boolean has_call
;
3349 struct _sum_stack_param
*sum_stack_param
= param
;
3350 struct spu_link_hash_table
*htab
;
3352 cum_stack
= fun
->stack
;
3353 sum_stack_param
->cum_stack
= cum_stack
;
3359 for (call
= fun
->call_list
; call
; call
= call
->next
)
3361 if (!call
->is_pasted
)
3363 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3365 stack
= sum_stack_param
->cum_stack
;
3366 /* Include caller stack for normal calls, don't do so for
3367 tail calls. fun->stack here is local stack usage for
3369 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3370 stack
+= fun
->stack
;
3371 if (cum_stack
< stack
)
3378 sum_stack_param
->cum_stack
= cum_stack
;
3380 /* Now fun->stack holds cumulative stack. */
3381 fun
->stack
= cum_stack
;
3385 && sum_stack_param
->overall_stack
< cum_stack
)
3386 sum_stack_param
->overall_stack
= cum_stack
;
3388 htab
= spu_hash_table (info
);
3389 if (htab
->auto_overlay
)
3392 f1
= func_name (fun
);
3394 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3395 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3396 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3400 info
->callbacks
->minfo (_(" calls:\n"));
3401 for (call
= fun
->call_list
; call
; call
= call
->next
)
3402 if (!call
->is_pasted
)
3404 const char *f2
= func_name (call
->fun
);
3405 const char *ann1
= call
->fun
== max
? "*" : " ";
3406 const char *ann2
= call
->is_tail
? "t" : " ";
3408 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3412 if (sum_stack_param
->emit_stack_syms
)
3414 char *name
= bfd_malloc (18 + strlen (f1
));
3415 struct elf_link_hash_entry
*h
;
3420 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3421 sprintf (name
, "__stack_%s", f1
);
3423 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3425 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3428 && (h
->root
.type
== bfd_link_hash_new
3429 || h
->root
.type
== bfd_link_hash_undefined
3430 || h
->root
.type
== bfd_link_hash_undefweak
))
3432 h
->root
.type
= bfd_link_hash_defined
;
3433 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3434 h
->root
.u
.def
.value
= cum_stack
;
3439 h
->ref_regular_nonweak
= 1;
3440 h
->forced_local
= 1;
3448 /* SEC is part of a pasted function. Return the call_info for the
3449 next section of this function. */
3451 static struct call_info
*
3452 find_pasted_call (asection
*sec
)
3454 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3455 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3456 struct call_info
*call
;
3459 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3460 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3461 if (call
->is_pasted
)
3467 /* qsort predicate to sort bfds by file name. */
3470 sort_bfds (const void *a
, const void *b
)
3472 bfd
*const *abfd1
= a
;
3473 bfd
*const *abfd2
= b
;
3475 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3478 /* Handle --auto-overlay. */
3480 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3484 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3485 void (*spu_elf_load_ovl_mgr
) (void))
3489 struct elf_segment_map
*m
;
3490 unsigned int fixed_size
, lo
, hi
;
3491 struct spu_link_hash_table
*htab
;
3492 unsigned int base
, i
, count
, bfd_count
;
3494 asection
**ovly_sections
, **ovly_p
;
3496 unsigned int total_overlay_size
, overlay_size
;
3497 struct elf_link_hash_entry
*h
;
3498 struct _mos_param mos_param
;
3499 struct _uos_param uos_param
;
3500 struct function_info dummy_caller
;
3502 /* Find the extents of our loadable image. */
3503 lo
= (unsigned int) -1;
3505 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3506 if (m
->p_type
== PT_LOAD
)
3507 for (i
= 0; i
< m
->count
; i
++)
3508 if (m
->sections
[i
]->size
!= 0)
3510 if (m
->sections
[i
]->vma
< lo
)
3511 lo
= m
->sections
[i
]->vma
;
3512 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3513 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3515 fixed_size
= hi
+ 1 - lo
;
3517 if (!discover_functions (info
))
3520 if (!build_call_tree (info
))
3523 uos_param
.exclude_input_section
= 0;
3524 uos_param
.exclude_output_section
3525 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3527 htab
= spu_hash_table (info
);
3528 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3529 FALSE
, FALSE
, FALSE
);
3531 && (h
->root
.type
== bfd_link_hash_defined
3532 || h
->root
.type
== bfd_link_hash_defweak
)
3535 /* We have a user supplied overlay manager. */
3536 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3540 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3541 builtin version to .text, and will adjust .text size. */
3542 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3544 fixed_size
-= text
->size
;
3545 spu_elf_load_ovl_mgr ();
3546 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3548 fixed_size
+= text
->size
;
3551 /* Mark overlay sections, and find max overlay section size. */
3552 mos_param
.max_overlay_size
= 0;
3553 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3556 /* We can't put the overlay manager or interrupt routines in
3558 uos_param
.clearing
= 0;
3559 if ((uos_param
.exclude_input_section
3560 || uos_param
.exclude_output_section
)
3561 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3565 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3567 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3568 if (bfd_arr
== NULL
)
3571 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3574 total_overlay_size
= 0;
3575 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3577 extern const bfd_target bfd_elf32_spu_vec
;
3579 unsigned int old_count
;
3581 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3585 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3586 if (sec
->linker_mark
)
3588 if ((sec
->flags
& SEC_CODE
) != 0)
3590 fixed_size
-= sec
->size
;
3591 total_overlay_size
+= sec
->size
;
3593 if (count
!= old_count
)
3594 bfd_arr
[bfd_count
++] = ibfd
;
3597 /* Since the overlay link script selects sections by file name and
3598 section name, ensure that file names are unique. */
3601 bfd_boolean ok
= TRUE
;
3603 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3604 for (i
= 1; i
< bfd_count
; ++i
)
3605 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3607 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3609 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3610 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3611 bfd_arr
[i
]->filename
,
3612 bfd_arr
[i
]->my_archive
->filename
);
3614 info
->callbacks
->einfo (_("%s duplicated\n"),
3615 bfd_arr
[i
]->filename
);
3621 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3622 "object files in auto-overlay script\n"));
3623 bfd_set_error (bfd_error_bad_value
);
3629 if (htab
->reserved
== 0)
3631 struct _sum_stack_param sum_stack_param
;
3633 sum_stack_param
.emit_stack_syms
= 0;
3634 sum_stack_param
.overall_stack
= 0;
3635 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3637 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
3639 fixed_size
+= htab
->reserved
;
3640 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3641 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3643 /* Guess number of overlays. Assuming overlay buffer is on
3644 average only half full should be conservative. */
3645 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3646 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3647 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3650 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3651 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3652 "size of 0x%v exceeds local store\n"),
3653 (bfd_vma
) fixed_size
,
3654 (bfd_vma
) mos_param
.max_overlay_size
);
3656 /* Now see if we should put some functions in the non-overlay area. */
3657 else if (fixed_size
< htab
->overlay_fixed
)
3659 unsigned int max_fixed
, lib_size
;
3661 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
3662 if (max_fixed
> htab
->overlay_fixed
)
3663 max_fixed
= htab
->overlay_fixed
;
3664 lib_size
= max_fixed
- fixed_size
;
3665 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3666 if (lib_size
== (unsigned int) -1)
3668 fixed_size
= max_fixed
- lib_size
;
3671 /* Build an array of sections, suitably sorted to place into
3673 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3674 if (ovly_sections
== NULL
)
3676 ovly_p
= ovly_sections
;
3677 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3679 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3681 script
= htab
->spu_elf_open_overlay_script ();
3683 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3686 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3687 overlay_size
= htab
->local_store
- fixed_size
;
3690 while (base
< count
)
3692 unsigned int size
= 0;
3695 for (i
= base
; i
< count
; i
++)
3699 unsigned int stub_size
;
3700 struct call_info
*call
, *pasty
;
3701 struct _spu_elf_section_data
*sec_data
;
3702 struct spu_elf_stack_info
*sinfo
;
3705 /* See whether we can add this section to the current
3706 overlay without overflowing our overlay buffer. */
3707 sec
= ovly_sections
[2 * i
];
3708 tmp
= size
+ sec
->size
;
3709 if (ovly_sections
[2 * i
+ 1])
3710 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3711 if (tmp
> overlay_size
)
3713 if (sec
->segment_mark
)
3715 /* Pasted sections must stay together, so add their
3717 struct call_info
*pasty
= find_pasted_call (sec
);
3718 while (pasty
!= NULL
)
3720 struct function_info
*call_fun
= pasty
->fun
;
3721 tmp
+= call_fun
->sec
->size
;
3722 if (call_fun
->rodata
)
3723 tmp
+= call_fun
->rodata
->size
;
3724 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3725 if (pasty
->is_pasted
)
3729 if (tmp
> overlay_size
)
3732 /* If we add this section, we might need new overlay call
3733 stubs. Add any overlay section calls to dummy_call. */
3735 sec_data
= spu_elf_section_data (sec
);
3736 sinfo
= sec_data
->u
.i
.stack_info
;
3737 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3738 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3739 if (call
->is_pasted
)
3741 BFD_ASSERT (pasty
== NULL
);
3744 else if (call
->fun
->sec
->linker_mark
)
3746 if (!copy_callee (&dummy_caller
, call
))
3749 while (pasty
!= NULL
)
3751 struct function_info
*call_fun
= pasty
->fun
;
3753 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3754 if (call
->is_pasted
)
3756 BFD_ASSERT (pasty
== NULL
);
3759 else if (!copy_callee (&dummy_caller
, call
))
3763 /* Calculate call stub size. */
3765 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3769 stub_size
+= OVL_STUB_SIZE
;
3770 /* If the call is within this overlay, we won't need a
3772 for (k
= base
; k
< i
+ 1; k
++)
3773 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3775 stub_size
-= OVL_STUB_SIZE
;
3779 if (tmp
+ stub_size
> overlay_size
)
3787 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3788 ovly_sections
[2 * i
]->owner
,
3789 ovly_sections
[2 * i
],
3790 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3791 bfd_set_error (bfd_error_bad_value
);
3795 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3797 for (j
= base
; j
< i
; j
++)
3799 asection
*sec
= ovly_sections
[2 * j
];
3801 if (fprintf (script
, " %s%c%s (%s)\n",
3802 (sec
->owner
->my_archive
!= NULL
3803 ? sec
->owner
->my_archive
->filename
: ""),
3804 info
->path_separator
,
3805 sec
->owner
->filename
,
3808 if (sec
->segment_mark
)
3810 struct call_info
*call
= find_pasted_call (sec
);
3811 while (call
!= NULL
)
3813 struct function_info
*call_fun
= call
->fun
;
3814 sec
= call_fun
->sec
;
3815 if (fprintf (script
, " %s%c%s (%s)\n",
3816 (sec
->owner
->my_archive
!= NULL
3817 ? sec
->owner
->my_archive
->filename
: ""),
3818 info
->path_separator
,
3819 sec
->owner
->filename
,
3822 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3823 if (call
->is_pasted
)
3829 for (j
= base
; j
< i
; j
++)
3831 asection
*sec
= ovly_sections
[2 * j
+ 1];
3833 && fprintf (script
, " %s%c%s (%s)\n",
3834 (sec
->owner
->my_archive
!= NULL
3835 ? sec
->owner
->my_archive
->filename
: ""),
3836 info
->path_separator
,
3837 sec
->owner
->filename
,
3841 sec
= ovly_sections
[2 * j
];
3842 if (sec
->segment_mark
)
3844 struct call_info
*call
= find_pasted_call (sec
);
3845 while (call
!= NULL
)
3847 struct function_info
*call_fun
= call
->fun
;
3848 sec
= call_fun
->rodata
;
3850 && fprintf (script
, " %s%c%s (%s)\n",
3851 (sec
->owner
->my_archive
!= NULL
3852 ? sec
->owner
->my_archive
->filename
: ""),
3853 info
->path_separator
,
3854 sec
->owner
->filename
,
3857 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3858 if (call
->is_pasted
)
3864 if (fprintf (script
, " }\n") <= 0)
3867 while (dummy_caller
.call_list
!= NULL
)
3869 struct call_info
*call
= dummy_caller
.call_list
;
3870 dummy_caller
.call_list
= call
->next
;
3876 free (ovly_sections
);
3878 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3880 if (fclose (script
) != 0)
3883 if (htab
->auto_overlay
& AUTO_RELINK
)
3884 htab
->spu_elf_relink ();
3889 bfd_set_error (bfd_error_system_call
);
3891 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3895 /* Provide an estimate of total stack required. */
3898 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3900 struct _sum_stack_param sum_stack_param
;
3902 if (!discover_functions (info
))
3905 if (!build_call_tree (info
))
3908 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3909 info
->callbacks
->minfo (_("\nStack size for functions. "
3910 "Annotations: '*' max stack, 't' tail call\n"));
3912 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3913 sum_stack_param
.overall_stack
= 0;
3914 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3917 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3918 (bfd_vma
) sum_stack_param
.overall_stack
);
3922 /* Perform a final link. */
3925 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3927 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3929 if (htab
->auto_overlay
)
3930 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3932 if (htab
->stack_analysis
3933 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3934 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3936 return bfd_elf_final_link (output_bfd
, info
);
3939 /* Called when not normally emitting relocs, ie. !info->relocatable
3940 and !info->emitrelocations. Returns a count of special relocs
3941 that need to be emitted. */
3944 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
3946 Elf_Internal_Rela
*relocs
;
3947 unsigned int count
= 0;
3949 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
3953 Elf_Internal_Rela
*rel
;
3954 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3956 for (rel
= relocs
; rel
< relend
; rel
++)
3958 int r_type
= ELF32_R_TYPE (rel
->r_info
);
3959 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3963 if (elf_section_data (sec
)->relocs
!= relocs
)
3970 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3973 spu_elf_relocate_section (bfd
*output_bfd
,
3974 struct bfd_link_info
*info
,
3976 asection
*input_section
,
3978 Elf_Internal_Rela
*relocs
,
3979 Elf_Internal_Sym
*local_syms
,
3980 asection
**local_sections
)
3982 Elf_Internal_Shdr
*symtab_hdr
;
3983 struct elf_link_hash_entry
**sym_hashes
;
3984 Elf_Internal_Rela
*rel
, *relend
;
3985 struct spu_link_hash_table
*htab
;
3986 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3988 bfd_boolean emit_these_relocs
= FALSE
;
3989 bfd_boolean is_ea_sym
;
3992 htab
= spu_hash_table (info
);
3993 stubs
= (htab
->stub_sec
!= NULL
3994 && maybe_needs_stubs (input_section
, output_bfd
));
3995 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3996 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3999 relend
= relocs
+ input_section
->reloc_count
;
4000 for (; rel
< relend
; rel
++)
4003 reloc_howto_type
*howto
;
4004 unsigned int r_symndx
;
4005 Elf_Internal_Sym
*sym
;
4007 struct elf_link_hash_entry
*h
;
4008 const char *sym_name
;
4011 bfd_reloc_status_type r
;
4012 bfd_boolean unresolved_reloc
;
4014 enum _stub_type stub_type
;
4016 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4017 r_type
= ELF32_R_TYPE (rel
->r_info
);
4018 howto
= elf_howto_table
+ r_type
;
4019 unresolved_reloc
= FALSE
;
4024 if (r_symndx
< symtab_hdr
->sh_info
)
4026 sym
= local_syms
+ r_symndx
;
4027 sec
= local_sections
[r_symndx
];
4028 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4029 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4033 if (sym_hashes
== NULL
)
4036 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4038 while (h
->root
.type
== bfd_link_hash_indirect
4039 || h
->root
.type
== bfd_link_hash_warning
)
4040 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4043 if (h
->root
.type
== bfd_link_hash_defined
4044 || h
->root
.type
== bfd_link_hash_defweak
)
4046 sec
= h
->root
.u
.def
.section
;
4048 || sec
->output_section
== NULL
)
4049 /* Set a flag that will be cleared later if we find a
4050 relocation value for this symbol. output_section
4051 is typically NULL for symbols satisfied by a shared
4053 unresolved_reloc
= TRUE
;
4055 relocation
= (h
->root
.u
.def
.value
4056 + sec
->output_section
->vma
4057 + sec
->output_offset
);
4059 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4061 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4062 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4064 else if (!info
->relocatable
4065 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4068 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4069 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4070 if (!info
->callbacks
->undefined_symbol (info
,
4071 h
->root
.root
.string
,
4074 rel
->r_offset
, err
))
4078 sym_name
= h
->root
.root
.string
;
4081 if (sec
!= NULL
&& elf_discarded_section (sec
))
4083 /* For relocs against symbols from removed linkonce sections,
4084 or sections discarded by a linker script, we just want the
4085 section contents zeroed. Avoid any special processing. */
4086 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4092 if (info
->relocatable
)
4095 is_ea_sym
= (ea
!= NULL
4097 && sec
->output_section
== ea
);
4099 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4103 /* ._ea is a special section that isn't allocated in SPU
4104 memory, but rather occupies space in PPU memory as
4105 part of an embedded ELF image. If this reloc is
4106 against a symbol defined in ._ea, then transform the
4107 reloc into an equivalent one without a symbol
4108 relative to the start of the ELF image. */
4109 rel
->r_addend
+= (relocation
4111 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4112 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4114 emit_these_relocs
= TRUE
;
4119 unresolved_reloc
= TRUE
;
4121 if (unresolved_reloc
)
4123 (*_bfd_error_handler
)
4124 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4126 bfd_get_section_name (input_bfd
, input_section
),
4127 (long) rel
->r_offset
,
4133 /* If this symbol is in an overlay area, we may need to relocate
4134 to the overlay stub. */
4135 addend
= rel
->r_addend
;
4137 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4138 contents
, info
)) != no_stub
)
4140 unsigned int ovl
= 0;
4141 struct got_entry
*g
, **head
;
4143 if (stub_type
!= nonovl_stub
)
4144 ovl
= (spu_elf_section_data (input_section
->output_section
)
4148 head
= &h
->got
.glist
;
4150 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4152 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4153 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4158 relocation
= g
->stub_addr
;
4162 r
= _bfd_final_link_relocate (howto
,
4166 rel
->r_offset
, relocation
, addend
);
4168 if (r
!= bfd_reloc_ok
)
4170 const char *msg
= (const char *) 0;
4174 case bfd_reloc_overflow
:
4175 if (!((*info
->callbacks
->reloc_overflow
)
4176 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4177 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4181 case bfd_reloc_undefined
:
4182 if (!((*info
->callbacks
->undefined_symbol
)
4183 (info
, sym_name
, input_bfd
, input_section
,
4184 rel
->r_offset
, TRUE
)))
4188 case bfd_reloc_outofrange
:
4189 msg
= _("internal error: out of range error");
4192 case bfd_reloc_notsupported
:
4193 msg
= _("internal error: unsupported relocation error");
4196 case bfd_reloc_dangerous
:
4197 msg
= _("internal error: dangerous error");
4201 msg
= _("internal error: unknown error");
4206 if (!((*info
->callbacks
->warning
)
4207 (info
, msg
, sym_name
, input_bfd
, input_section
,
4216 && emit_these_relocs
4217 && !info
->emitrelocations
)
4219 Elf_Internal_Rela
*wrel
;
4220 Elf_Internal_Shdr
*rel_hdr
;
4222 wrel
= rel
= relocs
;
4223 relend
= relocs
+ input_section
->reloc_count
;
4224 for (; rel
< relend
; rel
++)
4228 r_type
= ELF32_R_TYPE (rel
->r_info
);
4229 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4232 input_section
->reloc_count
= wrel
- relocs
;
4233 /* Backflips for _bfd_elf_link_output_relocs. */
4234 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4235 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4242 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4245 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4246 const char *sym_name ATTRIBUTE_UNUSED
,
4247 Elf_Internal_Sym
*sym
,
4248 asection
*sym_sec ATTRIBUTE_UNUSED
,
4249 struct elf_link_hash_entry
*h
)
4251 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4253 if (!info
->relocatable
4254 && htab
->stub_sec
!= NULL
4256 && (h
->root
.type
== bfd_link_hash_defined
4257 || h
->root
.type
== bfd_link_hash_defweak
)
4259 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4261 struct got_entry
*g
;
4263 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4264 if (g
->addend
== 0 && g
->ovl
== 0)
4266 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4267 (htab
->stub_sec
[0]->output_section
->owner
,
4268 htab
->stub_sec
[0]->output_section
));
4269 sym
->st_value
= g
->stub_addr
;
4277 static int spu_plugin
= 0;
4280 spu_elf_plugin (int val
)
4285 /* Set ELF header e_type for plugins. */
4288 spu_elf_post_process_headers (bfd
*abfd
,
4289 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4293 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4295 i_ehdrp
->e_type
= ET_DYN
;
4299 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4300 segments for overlays. */
4303 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4310 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4311 extra
= htab
->num_overlays
;
4317 sec
= bfd_get_section_by_name (abfd
, ".toe");
4318 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4324 /* Remove .toe section from other PT_LOAD segments and put it in
4325 a segment of its own. Put overlays in separate segments too. */
4328 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4331 struct elf_segment_map
*m
;
4337 toe
= bfd_get_section_by_name (abfd
, ".toe");
4338 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4339 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4340 for (i
= 0; i
< m
->count
; i
++)
4341 if ((s
= m
->sections
[i
]) == toe
4342 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4344 struct elf_segment_map
*m2
;
4347 if (i
+ 1 < m
->count
)
4349 amt
= sizeof (struct elf_segment_map
);
4350 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4351 m2
= bfd_zalloc (abfd
, amt
);
4354 m2
->count
= m
->count
- (i
+ 1);
4355 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4356 m2
->count
* sizeof (m
->sections
[0]));
4357 m2
->p_type
= PT_LOAD
;
4365 amt
= sizeof (struct elf_segment_map
);
4366 m2
= bfd_zalloc (abfd
, amt
);
4369 m2
->p_type
= PT_LOAD
;
4371 m2
->sections
[0] = s
;
4381 /* Tweak the section type of .note.spu_name. */
4384 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4385 Elf_Internal_Shdr
*hdr
,
4388 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4389 hdr
->sh_type
= SHT_NOTE
;
4393 /* Tweak phdrs before writing them out. */
4396 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4398 const struct elf_backend_data
*bed
;
4399 struct elf_obj_tdata
*tdata
;
4400 Elf_Internal_Phdr
*phdr
, *last
;
4401 struct spu_link_hash_table
*htab
;
4408 bed
= get_elf_backend_data (abfd
);
4409 tdata
= elf_tdata (abfd
);
4411 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4412 htab
= spu_hash_table (info
);
4413 if (htab
->num_overlays
!= 0)
4415 struct elf_segment_map
*m
;
4418 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4420 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4422 /* Mark this as an overlay header. */
4423 phdr
[i
].p_flags
|= PF_OVERLAY
;
4425 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4427 bfd_byte
*p
= htab
->ovtab
->contents
;
4428 unsigned int off
= o
* 16 + 8;
4430 /* Write file_off into _ovly_table. */
4431 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4436 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4437 of 16. This should always be possible when using the standard
4438 linker scripts, but don't create overlapping segments if
4439 someone is playing games with linker scripts. */
4441 for (i
= count
; i
-- != 0; )
4442 if (phdr
[i
].p_type
== PT_LOAD
)
4446 adjust
= -phdr
[i
].p_filesz
& 15;
4449 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4452 adjust
= -phdr
[i
].p_memsz
& 15;
4455 && phdr
[i
].p_filesz
!= 0
4456 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4457 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4460 if (phdr
[i
].p_filesz
!= 0)
4464 if (i
== (unsigned int) -1)
4465 for (i
= count
; i
-- != 0; )
4466 if (phdr
[i
].p_type
== PT_LOAD
)
4470 adjust
= -phdr
[i
].p_filesz
& 15;
4471 phdr
[i
].p_filesz
+= adjust
;
4473 adjust
= -phdr
[i
].p_memsz
& 15;
4474 phdr
[i
].p_memsz
+= adjust
;
4480 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4481 #define TARGET_BIG_NAME "elf32-spu"
4482 #define ELF_ARCH bfd_arch_spu
4483 #define ELF_MACHINE_CODE EM_SPU
4484 /* This matches the alignment need for DMA. */
4485 #define ELF_MAXPAGESIZE 0x80
4486 #define elf_backend_rela_normal 1
4487 #define elf_backend_can_gc_sections 1
4489 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4490 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4491 #define elf_info_to_howto spu_elf_info_to_howto
4492 #define elf_backend_count_relocs spu_elf_count_relocs
4493 #define elf_backend_relocate_section spu_elf_relocate_section
4494 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4495 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4496 #define elf_backend_object_p spu_elf_object_p
4497 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4498 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4500 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4501 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4502 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4503 #define elf_backend_post_process_headers spu_elf_post_process_headers
4504 #define elf_backend_fake_sections spu_elf_fake_sections
4505 #define elf_backend_special_sections spu_elf_special_sections
4506 #define bfd_elf32_bfd_final_link spu_elf_final_link
4508 #include "elf32-target.h"