1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 /* Shortcuts to overlay sections. */
305 /* Count of stubs in each overlay section. */
306 unsigned int *stub_count
;
308 /* The stub section for each overlay section. */
311 struct elf_link_hash_entry
*ovly_load
;
312 struct elf_link_hash_entry
*ovly_return
;
313 unsigned long ovly_load_r_symndx
;
315 /* Number of overlay buffers. */
316 unsigned int num_buf
;
318 /* Total number of overlays. */
319 unsigned int num_overlays
;
321 /* How much memory we have. */
322 unsigned int local_store
;
323 /* Local store --auto-overlay should reserve for non-overlay
324 functions and data. */
325 unsigned int overlay_fixed
;
326 /* Local store --auto-overlay should reserve for stack and heap. */
327 unsigned int reserved
;
328 /* If reserved is not specified, stack analysis will calculate a value
329 for the stack. This parameter adjusts that value to allow for
330 negative sp access (the ABI says 2000 bytes below sp are valid,
331 and the overlay manager uses some of this area). */
332 int extra_stack_space
;
333 /* Count of overlay stubs needed in non-overlay area. */
334 unsigned int non_ovly_stub
;
336 /* Stash various callbacks for --auto-overlay. */
337 void (*spu_elf_load_ovl_mgr
) (void);
338 FILE *(*spu_elf_open_overlay_script
) (void);
339 void (*spu_elf_relink
) (void);
341 /* Bit 0 set if --auto-overlay.
342 Bit 1 set if --auto-relink.
343 Bit 2 set if --overlay-rodata. */
344 unsigned int auto_overlay
: 3;
345 #define AUTO_OVERLAY 1
346 #define AUTO_RELINK 2
347 #define OVERLAY_RODATA 4
349 /* Set if we should emit symbols for stubs. */
350 unsigned int emit_stub_syms
:1;
352 /* Set if we want stubs on calls out of overlay regions to
353 non-overlay regions. */
354 unsigned int non_overlay_stubs
: 1;
357 unsigned int stub_err
: 1;
359 /* Set if stack size analysis should be done. */
360 unsigned int stack_analysis
: 1;
362 /* Set if __stack_* syms will be emitted. */
363 unsigned int emit_stack_syms
: 1;
366 /* Hijack the generic got fields for overlay stub accounting. */
370 struct got_entry
*next
;
376 #define spu_hash_table(p) \
377 ((struct spu_link_hash_table *) ((p)->hash))
379 /* Create a spu ELF linker hash table. */
381 static struct bfd_link_hash_table
*
382 spu_elf_link_hash_table_create (bfd
*abfd
)
384 struct spu_link_hash_table
*htab
;
386 htab
= bfd_malloc (sizeof (*htab
));
390 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
391 _bfd_elf_link_hash_newfunc
,
392 sizeof (struct elf_link_hash_entry
)))
398 memset (&htab
->ovtab
, 0,
399 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
401 htab
->elf
.init_got_refcount
.refcount
= 0;
402 htab
->elf
.init_got_refcount
.glist
= NULL
;
403 htab
->elf
.init_got_offset
.offset
= 0;
404 htab
->elf
.init_got_offset
.glist
= NULL
;
405 return &htab
->elf
.root
;
408 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
410 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
413 get_sym_h (struct elf_link_hash_entry
**hp
,
414 Elf_Internal_Sym
**symp
,
416 Elf_Internal_Sym
**locsymsp
,
417 unsigned long r_symndx
,
420 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
422 if (r_symndx
>= symtab_hdr
->sh_info
)
424 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
425 struct elf_link_hash_entry
*h
;
427 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
428 while (h
->root
.type
== bfd_link_hash_indirect
429 || h
->root
.type
== bfd_link_hash_warning
)
430 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
440 asection
*symsec
= NULL
;
441 if (h
->root
.type
== bfd_link_hash_defined
442 || h
->root
.type
== bfd_link_hash_defweak
)
443 symsec
= h
->root
.u
.def
.section
;
449 Elf_Internal_Sym
*sym
;
450 Elf_Internal_Sym
*locsyms
= *locsymsp
;
454 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
456 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
458 0, NULL
, NULL
, NULL
);
463 sym
= locsyms
+ r_symndx
;
472 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
478 /* Create the note section if not already present. This is done early so
479 that the linker maps the sections to the right place in the output. */
482 spu_elf_create_sections (struct bfd_link_info
*info
,
487 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
489 /* Stash some options away where we can get at them later. */
490 htab
->stack_analysis
= stack_analysis
;
491 htab
->emit_stack_syms
= emit_stack_syms
;
493 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
494 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
499 /* Make SPU_PTNOTE_SPUNAME section. */
506 ibfd
= info
->input_bfds
;
507 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
508 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
510 || !bfd_set_section_alignment (ibfd
, s
, 4))
513 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
514 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
515 size
+= (name_len
+ 3) & -4;
517 if (!bfd_set_section_size (ibfd
, s
, size
))
520 data
= bfd_zalloc (ibfd
, size
);
524 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
525 bfd_put_32 (ibfd
, name_len
, data
+ 4);
526 bfd_put_32 (ibfd
, 1, data
+ 8);
527 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
528 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
529 bfd_get_filename (info
->output_bfd
), name_len
);
536 /* qsort predicate to sort sections by vma. */
539 sort_sections (const void *a
, const void *b
)
541 const asection
*const *s1
= a
;
542 const asection
*const *s2
= b
;
543 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
546 return delta
< 0 ? -1 : 1;
548 return (*s1
)->index
- (*s2
)->index
;
551 /* Identify overlays in the output bfd, and number them. */
554 spu_elf_find_overlays (struct bfd_link_info
*info
)
556 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
557 asection
**alloc_sec
;
558 unsigned int i
, n
, ovl_index
, num_buf
;
562 if (info
->output_bfd
->section_count
< 2)
566 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
567 if (alloc_sec
== NULL
)
570 /* Pick out all the alloced sections. */
571 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
572 if ((s
->flags
& SEC_ALLOC
) != 0
573 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
583 /* Sort them by vma. */
584 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
586 /* Look for overlapping vmas. Any with overlap must be overlays.
587 Count them. Also count the number of overlay regions. */
588 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
589 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
592 if (s
->vma
< ovl_end
)
594 asection
*s0
= alloc_sec
[i
- 1];
596 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
598 alloc_sec
[ovl_index
] = s0
;
599 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
600 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= ++num_buf
;
602 alloc_sec
[ovl_index
] = s
;
603 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
604 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
605 if (s0
->vma
!= s
->vma
)
607 info
->callbacks
->einfo (_("%X%P: overlay sections %A and %A "
608 "do not start at the same address.\n"),
612 if (ovl_end
< s
->vma
+ s
->size
)
613 ovl_end
= s
->vma
+ s
->size
;
616 ovl_end
= s
->vma
+ s
->size
;
619 htab
->num_overlays
= ovl_index
;
620 htab
->num_buf
= num_buf
;
621 htab
->ovl_sec
= alloc_sec
;
622 htab
->ovly_load
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
623 FALSE
, FALSE
, FALSE
);
624 htab
->ovly_return
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return",
625 FALSE
, FALSE
, FALSE
);
626 return ovl_index
!= 0;
629 /* Support two sizes of overlay stubs, a slower more compact stub of two
630 intructions, and a faster stub of four instructions. */
631 #ifndef OVL_STUB_SIZE
632 /* Default to faster. */
633 #define OVL_STUB_SIZE 16
634 /* #define OVL_STUB_SIZE 8 */
636 #define BRSL 0x33000000
637 #define BR 0x32000000
638 #define NOP 0x40200000
639 #define LNOP 0x00200000
640 #define ILA 0x42000000
642 /* Return true for all relative and absolute branch instructions.
650 brhnz 00100011 0.. */
653 is_branch (const unsigned char *insn
)
655 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
658 /* Return true for all indirect branch instructions.
666 bihnz 00100101 011 */
669 is_indirect_branch (const unsigned char *insn
)
671 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
674 /* Return true for branch hint instructions.
679 is_hint (const unsigned char *insn
)
681 return (insn
[0] & 0xfc) == 0x10;
684 /* True if INPUT_SECTION might need overlay stubs. */
687 maybe_needs_stubs (asection
*input_section
, bfd
*output_bfd
)
689 /* No stubs for debug sections and suchlike. */
690 if ((input_section
->flags
& SEC_ALLOC
) == 0)
693 /* No stubs for link-once sections that will be discarded. */
694 if (input_section
->output_section
== NULL
695 || input_section
->output_section
->owner
!= output_bfd
)
698 /* Don't create stubs for .eh_frame references. */
699 if (strcmp (input_section
->name
, ".eh_frame") == 0)
713 /* Return non-zero if this reloc symbol should go via an overlay stub.
714 Return 2 if the stub must be in non-overlay area. */
716 static enum _stub_type
717 needs_ovl_stub (struct elf_link_hash_entry
*h
,
718 Elf_Internal_Sym
*sym
,
720 asection
*input_section
,
721 Elf_Internal_Rela
*irela
,
723 struct bfd_link_info
*info
)
725 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
726 enum elf_spu_reloc_type r_type
;
727 unsigned int sym_type
;
729 enum _stub_type ret
= no_stub
;
732 || sym_sec
->output_section
== NULL
733 || sym_sec
->output_section
->owner
!= info
->output_bfd
734 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
739 /* Ensure no stubs for user supplied overlay manager syms. */
740 if (h
== htab
->ovly_load
|| h
== htab
->ovly_return
)
743 /* setjmp always goes via an overlay stub, because then the return
744 and hence the longjmp goes via __ovly_return. That magically
745 makes setjmp/longjmp between overlays work. */
746 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
747 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
751 /* Usually, symbols in non-overlay sections don't need stubs. */
752 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
753 && !htab
->non_overlay_stubs
)
759 sym_type
= ELF_ST_TYPE (sym
->st_info
);
761 r_type
= ELF32_R_TYPE (irela
->r_info
);
763 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
767 if (contents
== NULL
)
770 if (!bfd_get_section_contents (input_section
->owner
,
777 contents
+= irela
->r_offset
;
779 if (is_branch (contents
) || is_hint (contents
))
782 if ((contents
[0] & 0xfd) == 0x31
783 && sym_type
!= STT_FUNC
786 /* It's common for people to write assembly and forget
787 to give function symbols the right type. Handle
788 calls to such symbols, but warn so that (hopefully)
789 people will fix their code. We need the symbol
790 type to be correct to distinguish function pointer
791 initialisation from other pointer initialisations. */
792 const char *sym_name
;
795 sym_name
= h
->root
.root
.string
;
798 Elf_Internal_Shdr
*symtab_hdr
;
799 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
800 sym_name
= bfd_elf_sym_name (input_section
->owner
,
805 (*_bfd_error_handler
) (_("warning: call to non-function"
806 " symbol %s defined in %B"),
807 sym_sec
->owner
, sym_name
);
813 if (sym_type
!= STT_FUNC
815 && (sym_sec
->flags
& SEC_CODE
) == 0)
818 /* A reference from some other section to a symbol in an overlay
819 section needs a stub. */
820 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
821 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
824 /* If this insn isn't a branch then we are possibly taking the
825 address of a function and passing it out somehow. */
826 return !branch
&& sym_type
== STT_FUNC
? nonovl_stub
: ret
;
830 count_stub (struct spu_link_hash_table
*htab
,
833 enum _stub_type stub_type
,
834 struct elf_link_hash_entry
*h
,
835 const Elf_Internal_Rela
*irela
)
837 unsigned int ovl
= 0;
838 struct got_entry
*g
, **head
;
841 /* If this instruction is a branch or call, we need a stub
842 for it. One stub per function per overlay.
843 If it isn't a branch, then we are taking the address of
844 this function so need a stub in the non-overlay area
845 for it. One stub per function. */
846 if (stub_type
!= nonovl_stub
)
847 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
850 head
= &h
->got
.glist
;
853 if (elf_local_got_ents (ibfd
) == NULL
)
855 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
856 * sizeof (*elf_local_got_ents (ibfd
)));
857 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
858 if (elf_local_got_ents (ibfd
) == NULL
)
861 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
866 addend
= irela
->r_addend
;
870 struct got_entry
*gnext
;
872 for (g
= *head
; g
!= NULL
; g
= g
->next
)
873 if (g
->addend
== addend
&& g
->ovl
== 0)
878 /* Need a new non-overlay area stub. Zap other stubs. */
879 for (g
= *head
; g
!= NULL
; g
= gnext
)
882 if (g
->addend
== addend
)
884 htab
->stub_count
[g
->ovl
] -= 1;
892 for (g
= *head
; g
!= NULL
; g
= g
->next
)
893 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
899 g
= bfd_malloc (sizeof *g
);
904 g
->stub_addr
= (bfd_vma
) -1;
908 htab
->stub_count
[ovl
] += 1;
914 /* Two instruction overlay stubs look like:
917 .word target_ovl_and_address
919 ovl_and_address is a word with the overlay number in the top 14 bits
920 and local store address in the bottom 18 bits.
922 Four instruction overlay stubs look like:
926 ila $79,target_address
930 build_stub (struct spu_link_hash_table
*htab
,
933 enum _stub_type stub_type
,
934 struct elf_link_hash_entry
*h
,
935 const Elf_Internal_Rela
*irela
,
940 struct got_entry
*g
, **head
;
942 bfd_vma addend
, val
, from
, to
;
945 if (stub_type
!= nonovl_stub
)
946 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
949 head
= &h
->got
.glist
;
951 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
955 addend
= irela
->r_addend
;
957 for (g
= *head
; g
!= NULL
; g
= g
->next
)
958 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
963 if (g
->ovl
== 0 && ovl
!= 0)
966 if (g
->stub_addr
!= (bfd_vma
) -1)
969 sec
= htab
->stub_sec
[ovl
];
970 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
971 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
973 to
= (htab
->ovly_load
->root
.u
.def
.value
974 + htab
->ovly_load
->root
.u
.def
.section
->output_offset
975 + htab
->ovly_load
->root
.u
.def
.section
->output_section
->vma
);
977 if (OVL_STUB_SIZE
== 16)
979 if (((dest
| to
| from
) & 3) != 0
980 || val
+ 0x40000 >= 0x80000)
985 ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
987 if (OVL_STUB_SIZE
== 16)
989 bfd_put_32 (sec
->owner
, ILA
+ ((ovl
<< 7) & 0x01ffff80) + 78,
990 sec
->contents
+ sec
->size
);
991 bfd_put_32 (sec
->owner
, LNOP
,
992 sec
->contents
+ sec
->size
+ 4);
993 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
994 sec
->contents
+ sec
->size
+ 8);
995 bfd_put_32 (sec
->owner
, BR
+ ((val
<< 5) & 0x007fff80),
996 sec
->contents
+ sec
->size
+ 12);
998 else if (OVL_STUB_SIZE
== 8)
1000 bfd_put_32 (sec
->owner
, BRSL
+ ((val
<< 5) & 0x007fff80) + 75,
1001 sec
->contents
+ sec
->size
);
1003 val
= (dest
& 0x3ffff) | (ovl
<< 18);
1004 bfd_put_32 (sec
->owner
, val
,
1005 sec
->contents
+ sec
->size
+ 4);
1009 sec
->size
+= OVL_STUB_SIZE
;
1011 if (htab
->emit_stub_syms
)
1017 len
= 8 + sizeof (".ovl_call.") - 1;
1019 len
+= strlen (h
->root
.root
.string
);
1024 add
= (int) irela
->r_addend
& 0xffffffff;
1027 name
= bfd_malloc (len
);
1031 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1033 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1035 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036 dest_sec
->id
& 0xffffffff,
1037 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1039 sprintf (name
+ len
- 9, "+%x", add
);
1041 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1045 if (h
->root
.type
== bfd_link_hash_new
)
1047 h
->root
.type
= bfd_link_hash_defined
;
1048 h
->root
.u
.def
.section
= sec
;
1049 h
->root
.u
.def
.value
= sec
->size
- OVL_STUB_SIZE
;
1050 h
->size
= OVL_STUB_SIZE
;
1054 h
->ref_regular_nonweak
= 1;
1055 h
->forced_local
= 1;
1063 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1067 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1069 /* Symbols starting with _SPUEAR_ need a stub because they may be
1070 invoked by the PPU. */
1071 struct bfd_link_info
*info
= inf
;
1072 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1075 if ((h
->root
.type
== bfd_link_hash_defined
1076 || h
->root
.type
== bfd_link_hash_defweak
)
1078 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1079 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1080 && sym_sec
->output_section
!= NULL
1081 && sym_sec
->output_section
->owner
== info
->output_bfd
1082 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1083 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1084 || htab
->non_overlay_stubs
))
1086 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1093 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1095 /* Symbols starting with _SPUEAR_ need a stub because they may be
1096 invoked by the PPU. */
1097 struct bfd_link_info
*info
= inf
;
1098 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1101 if ((h
->root
.type
== bfd_link_hash_defined
1102 || h
->root
.type
== bfd_link_hash_defweak
)
1104 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1105 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1106 && sym_sec
->output_section
!= NULL
1107 && sym_sec
->output_section
->owner
== info
->output_bfd
1108 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1109 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1110 || htab
->non_overlay_stubs
))
1112 return build_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1113 h
->root
.u
.def
.value
, sym_sec
);
1119 /* Size or build stubs. */
1122 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1124 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1127 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1129 extern const bfd_target bfd_elf32_spu_vec
;
1130 Elf_Internal_Shdr
*symtab_hdr
;
1132 Elf_Internal_Sym
*local_syms
= NULL
;
1134 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1137 /* We'll need the symbol table in a second. */
1138 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1139 if (symtab_hdr
->sh_info
== 0)
1142 /* Walk over each section attached to the input bfd. */
1143 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1145 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1147 /* If there aren't any relocs, then there's nothing more to do. */
1148 if ((isec
->flags
& SEC_RELOC
) == 0
1149 || isec
->reloc_count
== 0)
1152 if (!maybe_needs_stubs (isec
, info
->output_bfd
))
1155 /* Get the relocs. */
1156 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1158 if (internal_relocs
== NULL
)
1159 goto error_ret_free_local
;
1161 /* Now examine each relocation. */
1162 irela
= internal_relocs
;
1163 irelaend
= irela
+ isec
->reloc_count
;
1164 for (; irela
< irelaend
; irela
++)
1166 enum elf_spu_reloc_type r_type
;
1167 unsigned int r_indx
;
1169 Elf_Internal_Sym
*sym
;
1170 struct elf_link_hash_entry
*h
;
1171 enum _stub_type stub_type
;
1173 r_type
= ELF32_R_TYPE (irela
->r_info
);
1174 r_indx
= ELF32_R_SYM (irela
->r_info
);
1176 if (r_type
>= R_SPU_max
)
1178 bfd_set_error (bfd_error_bad_value
);
1179 error_ret_free_internal
:
1180 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1181 free (internal_relocs
);
1182 error_ret_free_local
:
1183 if (local_syms
!= NULL
1184 && (symtab_hdr
->contents
1185 != (unsigned char *) local_syms
))
1190 /* Determine the reloc target section. */
1191 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1192 goto error_ret_free_internal
;
1194 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1196 if (stub_type
== no_stub
)
1198 else if (stub_type
== stub_error
)
1199 goto error_ret_free_internal
;
1201 if (htab
->stub_count
== NULL
)
1204 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1205 htab
->stub_count
= bfd_zmalloc (amt
);
1206 if (htab
->stub_count
== NULL
)
1207 goto error_ret_free_internal
;
1212 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1213 goto error_ret_free_internal
;
1220 dest
= h
->root
.u
.def
.value
;
1222 dest
= sym
->st_value
;
1223 dest
+= irela
->r_addend
;
1224 if (!build_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
,
1226 goto error_ret_free_internal
;
1230 /* We're done with the internal relocs, free them. */
1231 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1232 free (internal_relocs
);
1235 if (local_syms
!= NULL
1236 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1238 if (!info
->keep_memory
)
1241 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1248 /* Allocate space for overlay call and return stubs. */
1251 spu_elf_size_stubs (struct bfd_link_info
*info
,
1252 void (*place_spu_section
) (asection
*, asection
*,
1254 int non_overlay_stubs
)
1256 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1263 htab
->non_overlay_stubs
= non_overlay_stubs
;
1264 if (!process_stubs (info
, FALSE
))
1267 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1271 if (htab
->stub_count
== NULL
)
1274 ibfd
= info
->input_bfds
;
1275 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1276 htab
->stub_sec
= bfd_zmalloc (amt
);
1277 if (htab
->stub_sec
== NULL
)
1280 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1281 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1282 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1283 htab
->stub_sec
[0] = stub
;
1285 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1287 stub
->size
= htab
->stub_count
[0] * OVL_STUB_SIZE
;
1288 (*place_spu_section
) (stub
, NULL
, ".text");
1290 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1292 asection
*osec
= htab
->ovl_sec
[i
];
1293 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1294 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1295 htab
->stub_sec
[ovl
] = stub
;
1297 || !bfd_set_section_alignment (ibfd
, stub
, 3 + (OVL_STUB_SIZE
> 8)))
1299 stub
->size
= htab
->stub_count
[ovl
] * OVL_STUB_SIZE
;
1300 (*place_spu_section
) (stub
, osec
, NULL
);
1303 /* htab->ovtab consists of two arrays.
1313 . } _ovly_buf_table[];
1316 flags
= (SEC_ALLOC
| SEC_LOAD
1317 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1318 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1319 if (htab
->ovtab
== NULL
1320 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1323 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1324 (*place_spu_section
) (htab
->ovtab
, NULL
, ".data");
1326 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1327 if (htab
->toe
== NULL
1328 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1330 htab
->toe
->size
= 16;
1331 (*place_spu_section
) (htab
->toe
, NULL
, ".toe");
1336 /* Functions to handle embedded spu_ovl.o object. */
1339 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1345 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1351 struct _ovl_stream
*os
;
1355 os
= (struct _ovl_stream
*) stream
;
1356 max
= (const char *) os
->end
- (const char *) os
->start
;
1358 if ((ufile_ptr
) offset
>= max
)
1362 if (count
> max
- offset
)
1363 count
= max
- offset
;
1365 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1370 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1372 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1379 return *ovl_bfd
!= NULL
;
1382 /* Define an STT_OBJECT symbol. */
1384 static struct elf_link_hash_entry
*
1385 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1387 struct elf_link_hash_entry
*h
;
1389 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1393 if (h
->root
.type
!= bfd_link_hash_defined
1396 h
->root
.type
= bfd_link_hash_defined
;
1397 h
->root
.u
.def
.section
= htab
->ovtab
;
1398 h
->type
= STT_OBJECT
;
1401 h
->ref_regular_nonweak
= 1;
1406 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1407 h
->root
.u
.def
.section
->owner
,
1408 h
->root
.root
.string
);
1409 bfd_set_error (bfd_error_bad_value
);
1416 /* Fill in all stubs and the overlay tables. */
1419 spu_elf_build_stubs (struct bfd_link_info
*info
, int emit_syms
)
1421 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1422 struct elf_link_hash_entry
*h
;
1428 htab
->emit_stub_syms
= emit_syms
;
1429 if (htab
->stub_count
== NULL
)
1432 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1433 if (htab
->stub_sec
[i
]->size
!= 0)
1435 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1436 htab
->stub_sec
[i
]->size
);
1437 if (htab
->stub_sec
[i
]->contents
== NULL
)
1439 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1440 htab
->stub_sec
[i
]->size
= 0;
1443 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load", FALSE
, FALSE
, FALSE
);
1444 htab
->ovly_load
= h
;
1445 BFD_ASSERT (h
!= NULL
1446 && (h
->root
.type
== bfd_link_hash_defined
1447 || h
->root
.type
== bfd_link_hash_defweak
)
1450 s
= h
->root
.u
.def
.section
->output_section
;
1451 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1453 (*_bfd_error_handler
) (_("%s in overlay section"),
1454 h
->root
.root
.string
);
1455 bfd_set_error (bfd_error_bad_value
);
1459 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_return", FALSE
, FALSE
, FALSE
);
1460 htab
->ovly_return
= h
;
1462 /* Fill in all the stubs. */
1463 process_stubs (info
, TRUE
);
1464 if (!htab
->stub_err
)
1465 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1469 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1470 bfd_set_error (bfd_error_bad_value
);
1474 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1476 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1478 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1479 bfd_set_error (bfd_error_bad_value
);
1482 htab
->stub_sec
[i
]->rawsize
= 0;
1485 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1486 if (htab
->ovtab
->contents
== NULL
)
1489 /* Write out _ovly_table. */
1490 p
= htab
->ovtab
->contents
;
1491 /* set low bit of .size to mark non-overlay area as present. */
1493 obfd
= htab
->ovtab
->output_section
->owner
;
1494 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
1496 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
1500 unsigned long off
= ovl_index
* 16;
1501 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
1503 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
1504 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16, p
+ off
+ 4);
1505 /* file_off written later in spu_elf_modify_program_headers. */
1506 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
1510 h
= define_ovtab_symbol (htab
, "_ovly_table");
1513 h
->root
.u
.def
.value
= 16;
1514 h
->size
= htab
->num_overlays
* 16;
1516 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
1519 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1522 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
1525 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
1526 h
->size
= htab
->num_buf
* 4;
1528 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
1531 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1534 h
= define_ovtab_symbol (htab
, "_EAR_");
1537 h
->root
.u
.def
.section
= htab
->toe
;
1538 h
->root
.u
.def
.value
= 0;
1544 /* Check that all loadable section VMAs lie in the range
1545 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
1548 spu_elf_check_vma (struct bfd_link_info
*info
,
1552 unsigned int overlay_fixed
,
1553 unsigned int reserved
,
1554 int extra_stack_space
,
1555 void (*spu_elf_load_ovl_mgr
) (void),
1556 FILE *(*spu_elf_open_overlay_script
) (void),
1557 void (*spu_elf_relink
) (void))
1559 struct elf_segment_map
*m
;
1561 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1562 bfd
*abfd
= info
->output_bfd
;
1564 if (auto_overlay
& AUTO_OVERLAY
)
1565 htab
->auto_overlay
= auto_overlay
;
1566 htab
->local_store
= hi
+ 1 - lo
;
1567 htab
->overlay_fixed
= overlay_fixed
;
1568 htab
->reserved
= reserved
;
1569 htab
->extra_stack_space
= extra_stack_space
;
1570 htab
->spu_elf_load_ovl_mgr
= spu_elf_load_ovl_mgr
;
1571 htab
->spu_elf_open_overlay_script
= spu_elf_open_overlay_script
;
1572 htab
->spu_elf_relink
= spu_elf_relink
;
1574 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
1575 if (m
->p_type
== PT_LOAD
)
1576 for (i
= 0; i
< m
->count
; i
++)
1577 if (m
->sections
[i
]->size
!= 0
1578 && (m
->sections
[i
]->vma
< lo
1579 || m
->sections
[i
]->vma
> hi
1580 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
1581 return m
->sections
[i
];
1583 /* No need for overlays if it all fits. */
1584 htab
->auto_overlay
= 0;
1588 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
1589 Search for stack adjusting insns, and return the sp delta. */
1592 find_function_stack_adjust (asection
*sec
, bfd_vma offset
)
1597 memset (reg
, 0, sizeof (reg
));
1598 for (unrecog
= 0; offset
+ 4 <= sec
->size
&& unrecog
< 32; offset
+= 4)
1600 unsigned char buf
[4];
1604 /* Assume no relocs on stack adjusing insns. */
1605 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
1608 if (buf
[0] == 0x24 /* stqd */)
1612 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
1613 /* Partly decoded immediate field. */
1614 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
1616 if (buf
[0] == 0x1c /* ai */)
1619 imm
= (imm
^ 0x200) - 0x200;
1620 reg
[rt
] = reg
[ra
] + imm
;
1622 if (rt
== 1 /* sp */)
1629 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
1631 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
1633 reg
[rt
] = reg
[ra
] + reg
[rb
];
1637 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1639 if (buf
[0] >= 0x42 /* ila */)
1640 imm
|= (buf
[0] & 1) << 17;
1645 if (buf
[0] == 0x40 /* il */)
1647 if ((buf
[1] & 0x80) == 0)
1649 imm
= (imm
^ 0x8000) - 0x8000;
1651 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
1657 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
1659 reg
[rt
] |= imm
& 0xffff;
1662 else if (buf
[0] == 0x04 /* ori */)
1665 imm
= (imm
^ 0x200) - 0x200;
1666 reg
[rt
] = reg
[ra
] | imm
;
1669 else if ((buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
1670 || (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */))
1672 /* Used in pic reg load. Say rt is trashed. */
1676 else if (is_branch (buf
) || is_indirect_branch (buf
))
1677 /* If we hit a branch then we must be out of the prologue. */
1686 /* qsort predicate to sort symbols by section and value. */
1688 static Elf_Internal_Sym
*sort_syms_syms
;
1689 static asection
**sort_syms_psecs
;
1692 sort_syms (const void *a
, const void *b
)
1694 Elf_Internal_Sym
*const *s1
= a
;
1695 Elf_Internal_Sym
*const *s2
= b
;
1696 asection
*sec1
,*sec2
;
1697 bfd_signed_vma delta
;
1699 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
1700 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
1703 return sec1
->index
- sec2
->index
;
1705 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
1707 return delta
< 0 ? -1 : 1;
1709 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
1711 return delta
< 0 ? -1 : 1;
1713 return *s1
< *s2
? -1 : 1;
1718 struct function_info
*fun
;
1719 struct call_info
*next
;
1721 unsigned int max_depth
;
1722 unsigned int is_tail
: 1;
1723 unsigned int is_pasted
: 1;
1726 struct function_info
1728 /* List of functions called. Also branches to hot/cold part of
1730 struct call_info
*call_list
;
1731 /* For hot/cold part of function, point to owner. */
1732 struct function_info
*start
;
1733 /* Symbol at start of function. */
1735 Elf_Internal_Sym
*sym
;
1736 struct elf_link_hash_entry
*h
;
1738 /* Function section. */
1741 /* Where last called from, and number of sections called from. */
1742 asection
*last_caller
;
1743 unsigned int call_count
;
1744 /* Address range of (this part of) function. */
1748 /* Distance from root of call tree. Tail and hot/cold branches
1749 count as one deeper. We aren't counting stack frames here. */
1751 /* Set if global symbol. */
1752 unsigned int global
: 1;
1753 /* Set if known to be start of function (as distinct from a hunk
1754 in hot/cold section. */
1755 unsigned int is_func
: 1;
1756 /* Set if not a root node. */
1757 unsigned int non_root
: 1;
1758 /* Flags used during call tree traversal. It's cheaper to replicate
1759 the visit flags than have one which needs clearing after a traversal. */
1760 unsigned int visit1
: 1;
1761 unsigned int visit2
: 1;
1762 unsigned int marking
: 1;
1763 unsigned int visit3
: 1;
1764 unsigned int visit4
: 1;
1765 unsigned int visit5
: 1;
1766 unsigned int visit6
: 1;
1767 unsigned int visit7
: 1;
1770 struct spu_elf_stack_info
1774 /* Variable size array describing functions, one per contiguous
1775 address range belonging to a function. */
1776 struct function_info fun
[1];
1779 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1780 entries for section SEC. */
1782 static struct spu_elf_stack_info
*
1783 alloc_stack_info (asection
*sec
, int max_fun
)
1785 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1788 amt
= sizeof (struct spu_elf_stack_info
);
1789 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
1790 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
1791 if (sec_data
->u
.i
.stack_info
!= NULL
)
1792 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
1793 return sec_data
->u
.i
.stack_info
;
1796 /* Add a new struct function_info describing a (part of a) function
1797 starting at SYM_H. Keep the array sorted by address. */
1799 static struct function_info
*
1800 maybe_insert_function (asection
*sec
,
1803 bfd_boolean is_func
)
1805 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1806 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1812 sinfo
= alloc_stack_info (sec
, 20);
1819 Elf_Internal_Sym
*sym
= sym_h
;
1820 off
= sym
->st_value
;
1821 size
= sym
->st_size
;
1825 struct elf_link_hash_entry
*h
= sym_h
;
1826 off
= h
->root
.u
.def
.value
;
1830 for (i
= sinfo
->num_fun
; --i
>= 0; )
1831 if (sinfo
->fun
[i
].lo
<= off
)
1836 /* Don't add another entry for an alias, but do update some
1838 if (sinfo
->fun
[i
].lo
== off
)
1840 /* Prefer globals over local syms. */
1841 if (global
&& !sinfo
->fun
[i
].global
)
1843 sinfo
->fun
[i
].global
= TRUE
;
1844 sinfo
->fun
[i
].u
.h
= sym_h
;
1847 sinfo
->fun
[i
].is_func
= TRUE
;
1848 return &sinfo
->fun
[i
];
1850 /* Ignore a zero-size symbol inside an existing function. */
1851 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
1852 return &sinfo
->fun
[i
];
1855 if (sinfo
->num_fun
>= sinfo
->max_fun
)
1857 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
1858 bfd_size_type old
= amt
;
1860 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1861 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
1862 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
1863 sinfo
= bfd_realloc (sinfo
, amt
);
1866 memset ((char *) sinfo
+ old
, 0, amt
- old
);
1867 sec_data
->u
.i
.stack_info
= sinfo
;
1870 if (++i
< sinfo
->num_fun
)
1871 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
1872 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
1873 sinfo
->fun
[i
].is_func
= is_func
;
1874 sinfo
->fun
[i
].global
= global
;
1875 sinfo
->fun
[i
].sec
= sec
;
1877 sinfo
->fun
[i
].u
.h
= sym_h
;
1879 sinfo
->fun
[i
].u
.sym
= sym_h
;
1880 sinfo
->fun
[i
].lo
= off
;
1881 sinfo
->fun
[i
].hi
= off
+ size
;
1882 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
);
1883 sinfo
->num_fun
+= 1;
1884 return &sinfo
->fun
[i
];
1887 /* Return the name of FUN. */
1890 func_name (struct function_info
*fun
)
1894 Elf_Internal_Shdr
*symtab_hdr
;
1896 while (fun
->start
!= NULL
)
1900 return fun
->u
.h
->root
.root
.string
;
1903 if (fun
->u
.sym
->st_name
== 0)
1905 size_t len
= strlen (sec
->name
);
1906 char *name
= bfd_malloc (len
+ 10);
1909 sprintf (name
, "%s+%lx", sec
->name
,
1910 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
1914 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1915 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
1918 /* Read the instruction at OFF in SEC. Return true iff the instruction
1919 is a nop, lnop, or stop 0 (all zero insn). */
1922 is_nop (asection
*sec
, bfd_vma off
)
1924 unsigned char insn
[4];
1926 if (off
+ 4 > sec
->size
1927 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
1929 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
1931 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
1936 /* Extend the range of FUN to cover nop padding up to LIMIT.
1937 Return TRUE iff some instruction other than a NOP was found. */
1940 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
1942 bfd_vma off
= (fun
->hi
+ 3) & -4;
1944 while (off
< limit
&& is_nop (fun
->sec
, off
))
1955 /* Check and fix overlapping function ranges. Return TRUE iff there
1956 are gaps in the current info we have about functions in SEC. */
1959 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
1961 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
1962 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
1964 bfd_boolean gaps
= FALSE
;
1969 for (i
= 1; i
< sinfo
->num_fun
; i
++)
1970 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
1972 /* Fix overlapping symbols. */
1973 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
1974 const char *f2
= func_name (&sinfo
->fun
[i
]);
1976 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
1977 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
1979 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
1982 if (sinfo
->num_fun
== 0)
1986 if (sinfo
->fun
[0].lo
!= 0)
1988 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
1990 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
1992 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
1993 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
1995 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2001 /* Search current function info for a function that contains address
2002 OFFSET in section SEC. */
2004 static struct function_info
*
2005 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2007 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2008 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2012 hi
= sinfo
->num_fun
;
2015 mid
= (lo
+ hi
) / 2;
2016 if (offset
< sinfo
->fun
[mid
].lo
)
2018 else if (offset
>= sinfo
->fun
[mid
].hi
)
2021 return &sinfo
->fun
[mid
];
2023 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2028 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2029 if CALLEE was new. If this function return FALSE, CALLEE should
2033 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2035 struct call_info
**pp
, *p
;
2037 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2038 if (p
->fun
== callee
->fun
)
2040 /* Tail calls use less stack than normal calls. Retain entry
2041 for normal call over one for tail call. */
2042 p
->is_tail
&= callee
->is_tail
;
2045 p
->fun
->start
= NULL
;
2046 p
->fun
->is_func
= TRUE
;
2049 /* Reorder list so most recent call is first. */
2051 p
->next
= caller
->call_list
;
2052 caller
->call_list
= p
;
2055 callee
->next
= caller
->call_list
;
2057 caller
->call_list
= callee
;
2061 /* Copy CALL and insert the copy into CALLER. */
2064 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2066 struct call_info
*callee
;
2067 callee
= bfd_malloc (sizeof (*callee
));
2071 if (!insert_callee (caller
, callee
))
2076 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2077 overlay stub sections. */
2080 interesting_section (asection
*s
, bfd
*obfd
)
2082 return (s
->output_section
!= NULL
2083 && s
->output_section
->owner
== obfd
2084 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2085 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2089 /* Rummage through the relocs for SEC, looking for function calls.
2090 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2091 mark destination symbols on calls as being functions. Also
2092 look at branches, which may be tail calls or go to hot/cold
2093 section part of same function. */
2096 mark_functions_via_relocs (asection
*sec
,
2097 struct bfd_link_info
*info
,
2100 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2101 Elf_Internal_Shdr
*symtab_hdr
;
2103 static bfd_boolean warned
;
2105 if (!interesting_section (sec
, info
->output_bfd
)
2106 || sec
->reloc_count
== 0)
2109 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2111 if (internal_relocs
== NULL
)
2114 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2115 psyms
= &symtab_hdr
->contents
;
2116 irela
= internal_relocs
;
2117 irelaend
= irela
+ sec
->reloc_count
;
2118 for (; irela
< irelaend
; irela
++)
2120 enum elf_spu_reloc_type r_type
;
2121 unsigned int r_indx
;
2123 Elf_Internal_Sym
*sym
;
2124 struct elf_link_hash_entry
*h
;
2126 bfd_boolean reject
, is_call
;
2127 struct function_info
*caller
;
2128 struct call_info
*callee
;
2131 r_type
= ELF32_R_TYPE (irela
->r_info
);
2132 if (r_type
!= R_SPU_REL16
2133 && r_type
!= R_SPU_ADDR16
)
2136 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
))
2140 r_indx
= ELF32_R_SYM (irela
->r_info
);
2141 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2145 || sym_sec
->output_section
== NULL
2146 || sym_sec
->output_section
->owner
!= info
->output_bfd
)
2152 unsigned char insn
[4];
2154 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2155 irela
->r_offset
, 4))
2157 if (is_branch (insn
))
2159 is_call
= (insn
[0] & 0xfd) == 0x31;
2160 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2161 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2164 info
->callbacks
->einfo
2165 (_("%B(%A+0x%v): call to non-code section"
2166 " %B(%A), analysis incomplete\n"),
2167 sec
->owner
, sec
, irela
->r_offset
,
2168 sym_sec
->owner
, sym_sec
);
2176 if (!(call_tree
&& spu_hash_table (info
)->auto_overlay
)
2184 /* For --auto-overlay, count possible stubs we need for
2185 function pointer references. */
2186 unsigned int sym_type
;
2190 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2191 if (sym_type
== STT_FUNC
)
2192 spu_hash_table (info
)->non_ovly_stub
+= 1;
2197 val
= h
->root
.u
.def
.value
;
2199 val
= sym
->st_value
;
2200 val
+= irela
->r_addend
;
2204 struct function_info
*fun
;
2206 if (irela
->r_addend
!= 0)
2208 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2211 fake
->st_value
= val
;
2213 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2217 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2219 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2222 if (irela
->r_addend
!= 0
2223 && fun
->u
.sym
!= sym
)
2228 caller
= find_function (sec
, irela
->r_offset
, info
);
2231 callee
= bfd_malloc (sizeof *callee
);
2235 callee
->fun
= find_function (sym_sec
, val
, info
);
2236 if (callee
->fun
== NULL
)
2238 callee
->is_tail
= !is_call
;
2239 callee
->is_pasted
= FALSE
;
2241 if (callee
->fun
->last_caller
!= sec
)
2243 callee
->fun
->last_caller
= sec
;
2244 callee
->fun
->call_count
+= 1;
2246 if (!insert_callee (caller
, callee
))
2249 && !callee
->fun
->is_func
2250 && callee
->fun
->stack
== 0)
2252 /* This is either a tail call or a branch from one part of
2253 the function to another, ie. hot/cold section. If the
2254 destination has been called by some other function then
2255 it is a separate function. We also assume that functions
2256 are not split across input files. */
2257 if (sec
->owner
!= sym_sec
->owner
)
2259 callee
->fun
->start
= NULL
;
2260 callee
->fun
->is_func
= TRUE
;
2262 else if (callee
->fun
->start
== NULL
)
2263 callee
->fun
->start
= caller
;
2266 struct function_info
*callee_start
;
2267 struct function_info
*caller_start
;
2268 callee_start
= callee
->fun
;
2269 while (callee_start
->start
)
2270 callee_start
= callee_start
->start
;
2271 caller_start
= caller
;
2272 while (caller_start
->start
)
2273 caller_start
= caller_start
->start
;
2274 if (caller_start
!= callee_start
)
2276 callee
->fun
->start
= NULL
;
2277 callee
->fun
->is_func
= TRUE
;
2286 /* Handle something like .init or .fini, which has a piece of a function.
2287 These sections are pasted together to form a single function. */
2290 pasted_function (asection
*sec
, struct bfd_link_info
*info
)
2292 struct bfd_link_order
*l
;
2293 struct _spu_elf_section_data
*sec_data
;
2294 struct spu_elf_stack_info
*sinfo
;
2295 Elf_Internal_Sym
*fake
;
2296 struct function_info
*fun
, *fun_start
;
2298 fake
= bfd_zmalloc (sizeof (*fake
));
2302 fake
->st_size
= sec
->size
;
2304 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2305 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2309 /* Find a function immediately preceding this section. */
2311 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2313 if (l
->u
.indirect
.section
== sec
)
2315 if (fun_start
!= NULL
)
2317 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2321 fun
->start
= fun_start
;
2323 callee
->is_tail
= TRUE
;
2324 callee
->is_pasted
= TRUE
;
2326 if (!insert_callee (fun_start
, callee
))
2332 if (l
->type
== bfd_indirect_link_order
2333 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2334 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2335 && sinfo
->num_fun
!= 0)
2336 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2339 info
->callbacks
->einfo (_("%A link_order not found\n"), sec
);
2343 /* Map address ranges in code sections to functions. */
2346 discover_functions (struct bfd_link_info
*info
)
2350 Elf_Internal_Sym
***psym_arr
;
2351 asection
***sec_arr
;
2352 bfd_boolean gaps
= FALSE
;
2355 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2358 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2359 if (psym_arr
== NULL
)
2361 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2362 if (sec_arr
== NULL
)
2366 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2368 ibfd
= ibfd
->link_next
, bfd_idx
++)
2370 extern const bfd_target bfd_elf32_spu_vec
;
2371 Elf_Internal_Shdr
*symtab_hdr
;
2374 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2375 asection
**psecs
, **p
;
2377 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2380 /* Read all the symbols. */
2381 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2382 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2386 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2387 if (interesting_section (sec
, info
->output_bfd
))
2395 if (symtab_hdr
->contents
!= NULL
)
2397 /* Don't use cached symbols since the generic ELF linker
2398 code only reads local symbols, and we need globals too. */
2399 free (symtab_hdr
->contents
);
2400 symtab_hdr
->contents
= NULL
;
2402 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2404 symtab_hdr
->contents
= (void *) syms
;
2408 /* Select defined function symbols that are going to be output. */
2409 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2412 psym_arr
[bfd_idx
] = psyms
;
2413 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2416 sec_arr
[bfd_idx
] = psecs
;
2417 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2418 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2419 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2423 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2424 if (s
!= NULL
&& interesting_section (s
, info
->output_bfd
))
2427 symcount
= psy
- psyms
;
2430 /* Sort them by section and offset within section. */
2431 sort_syms_syms
= syms
;
2432 sort_syms_psecs
= psecs
;
2433 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2435 /* Now inspect the function symbols. */
2436 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2438 asection
*s
= psecs
[*psy
- syms
];
2439 Elf_Internal_Sym
**psy2
;
2441 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2442 if (psecs
[*psy2
- syms
] != s
)
2445 if (!alloc_stack_info (s
, psy2
- psy
))
2450 /* First install info about properly typed and sized functions.
2451 In an ideal world this will cover all code sections, except
2452 when partitioning functions into hot and cold sections,
2453 and the horrible pasted together .init and .fini functions. */
2454 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2457 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2459 asection
*s
= psecs
[sy
- syms
];
2460 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2465 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2466 if (interesting_section (sec
, info
->output_bfd
))
2467 gaps
|= check_function_ranges (sec
, info
);
2472 /* See if we can discover more function symbols by looking at
2474 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2476 ibfd
= ibfd
->link_next
, bfd_idx
++)
2480 if (psym_arr
[bfd_idx
] == NULL
)
2483 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2484 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2488 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2490 ibfd
= ibfd
->link_next
, bfd_idx
++)
2492 Elf_Internal_Shdr
*symtab_hdr
;
2494 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2497 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
2500 psecs
= sec_arr
[bfd_idx
];
2502 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2503 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
2506 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2507 if (interesting_section (sec
, info
->output_bfd
))
2508 gaps
|= check_function_ranges (sec
, info
);
2512 /* Finally, install all globals. */
2513 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
2517 s
= psecs
[sy
- syms
];
2519 /* Global syms might be improperly typed functions. */
2520 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
2521 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
2523 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
2529 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2531 extern const bfd_target bfd_elf32_spu_vec
;
2534 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2537 /* Some of the symbols we've installed as marking the
2538 beginning of functions may have a size of zero. Extend
2539 the range of such functions to the beginning of the
2540 next symbol of interest. */
2541 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2542 if (interesting_section (sec
, info
->output_bfd
))
2544 struct _spu_elf_section_data
*sec_data
;
2545 struct spu_elf_stack_info
*sinfo
;
2547 sec_data
= spu_elf_section_data (sec
);
2548 sinfo
= sec_data
->u
.i
.stack_info
;
2552 bfd_vma hi
= sec
->size
;
2554 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
2556 sinfo
->fun
[fun_idx
].hi
= hi
;
2557 hi
= sinfo
->fun
[fun_idx
].lo
;
2560 /* No symbols in this section. Must be .init or .fini
2561 or something similar. */
2562 else if (!pasted_function (sec
, info
))
2568 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2570 ibfd
= ibfd
->link_next
, bfd_idx
++)
2572 if (psym_arr
[bfd_idx
] == NULL
)
2575 free (psym_arr
[bfd_idx
]);
2576 free (sec_arr
[bfd_idx
]);
2585 /* Iterate over all function_info we have collected, calling DOIT on
2586 each node if ROOT_ONLY is false. Only call DOIT on root nodes
2590 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
2591 struct bfd_link_info
*,
2593 struct bfd_link_info
*info
,
2599 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2601 extern const bfd_target bfd_elf32_spu_vec
;
2604 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2607 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2609 struct _spu_elf_section_data
*sec_data
;
2610 struct spu_elf_stack_info
*sinfo
;
2612 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
2613 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
2616 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
2617 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
2618 if (!doit (&sinfo
->fun
[i
], info
, param
))
2626 /* Transfer call info attached to struct function_info entries for
2627 all of a given function's sections to the first entry. */
2630 transfer_calls (struct function_info
*fun
,
2631 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2632 void *param ATTRIBUTE_UNUSED
)
2634 struct function_info
*start
= fun
->start
;
2638 struct call_info
*call
, *call_next
;
2640 while (start
->start
!= NULL
)
2641 start
= start
->start
;
2642 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
2644 call_next
= call
->next
;
2645 if (!insert_callee (start
, call
))
2648 fun
->call_list
= NULL
;
2653 /* Mark nodes in the call graph that are called by some other node. */
2656 mark_non_root (struct function_info
*fun
,
2657 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
2658 void *param ATTRIBUTE_UNUSED
)
2660 struct call_info
*call
;
2665 for (call
= fun
->call_list
; call
; call
= call
->next
)
2667 call
->fun
->non_root
= TRUE
;
2668 mark_non_root (call
->fun
, 0, 0);
2673 /* Remove cycles from the call graph. Set depth of nodes. */
2676 remove_cycles (struct function_info
*fun
,
2677 struct bfd_link_info
*info
,
2680 struct call_info
**callp
, *call
;
2681 unsigned int depth
= *(unsigned int *) param
;
2682 unsigned int max_depth
= depth
;
2686 fun
->marking
= TRUE
;
2688 callp
= &fun
->call_list
;
2689 while ((call
= *callp
) != NULL
)
2691 if (!call
->fun
->visit2
)
2693 call
->max_depth
= depth
+ !call
->is_pasted
;
2694 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
2696 if (max_depth
< call
->max_depth
)
2697 max_depth
= call
->max_depth
;
2699 else if (call
->fun
->marking
)
2701 if (!spu_hash_table (info
)->auto_overlay
)
2703 const char *f1
= func_name (fun
);
2704 const char *f2
= func_name (call
->fun
);
2706 info
->callbacks
->info (_("Stack analysis will ignore the call "
2710 *callp
= call
->next
;
2714 callp
= &call
->next
;
2716 fun
->marking
= FALSE
;
2717 *(unsigned int *) param
= max_depth
;
2721 /* Populate call_list for each function. */
2724 build_call_tree (struct bfd_link_info
*info
)
2729 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2731 extern const bfd_target bfd_elf32_spu_vec
;
2734 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2737 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2738 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
2742 /* Transfer call info from hot/cold section part of function
2744 if (!spu_hash_table (info
)->auto_overlay
2745 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
2748 /* Find the call graph root(s). */
2749 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
2752 /* Remove cycles from the call graph. We start from the root node(s)
2753 so that we break cycles in a reasonable place. */
2755 return for_each_node (remove_cycles
, info
, &depth
, TRUE
);
2758 /* qsort predicate to sort calls by max_depth then count. */
2761 sort_calls (const void *a
, const void *b
)
2763 struct call_info
*const *c1
= a
;
2764 struct call_info
*const *c2
= b
;
2767 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
2771 delta
= (*c2
)->count
- (*c1
)->count
;
2779 unsigned int max_overlay_size
;
2782 /* Set linker_mark and gc_mark on any sections that we will put in
2783 overlays. These flags are used by the generic ELF linker, but we
2784 won't be continuing on to bfd_elf_final_link so it is OK to use
2785 them. linker_mark is clear before we get here. Set segment_mark
2786 on sections that are part of a pasted function (excluding the last
2789 Set up function rodata section if --overlay-rodata. We don't
2790 currently include merged string constant rodata sections since
2792 Sort the call graph so that the deepest nodes will be visited
2796 mark_overlay_section (struct function_info
*fun
,
2797 struct bfd_link_info
*info
,
2800 struct call_info
*call
;
2802 struct _mos_param
*mos_param
= param
;
2808 if (!fun
->sec
->linker_mark
)
2810 fun
->sec
->linker_mark
= 1;
2811 fun
->sec
->gc_mark
= 1;
2812 fun
->sec
->segment_mark
= 0;
2813 /* Ensure SEC_CODE is set on this text section (it ought to
2814 be!), and SEC_CODE is clear on rodata sections. We use
2815 this flag to differentiate the two overlay section types. */
2816 fun
->sec
->flags
|= SEC_CODE
;
2817 if (spu_hash_table (info
)->auto_overlay
& OVERLAY_RODATA
)
2822 /* Find the rodata section corresponding to this function's
2824 if (strcmp (fun
->sec
->name
, ".text") == 0)
2826 name
= bfd_malloc (sizeof (".rodata"));
2829 memcpy (name
, ".rodata", sizeof (".rodata"));
2831 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
2833 size_t len
= strlen (fun
->sec
->name
);
2834 name
= bfd_malloc (len
+ 3);
2837 memcpy (name
, ".rodata", sizeof (".rodata"));
2838 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
2840 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
2842 size_t len
= strlen (fun
->sec
->name
) + 1;
2843 name
= bfd_malloc (len
);
2846 memcpy (name
, fun
->sec
->name
, len
);
2852 asection
*rodata
= NULL
;
2853 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
2854 if (group_sec
== NULL
)
2855 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
2857 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
2859 if (strcmp (group_sec
->name
, name
) == 0)
2864 group_sec
= elf_section_data (group_sec
)->next_in_group
;
2866 fun
->rodata
= rodata
;
2869 fun
->rodata
->linker_mark
= 1;
2870 fun
->rodata
->gc_mark
= 1;
2871 fun
->rodata
->flags
&= ~SEC_CODE
;
2875 size
= fun
->sec
->size
;
2877 size
+= fun
->rodata
->size
;
2878 if (mos_param
->max_overlay_size
< size
)
2879 mos_param
->max_overlay_size
= size
;
2883 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2888 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
2892 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2893 calls
[count
++] = call
;
2895 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
2897 fun
->call_list
= NULL
;
2901 calls
[count
]->next
= fun
->call_list
;
2902 fun
->call_list
= calls
[count
];
2907 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2909 if (call
->is_pasted
)
2911 /* There can only be one is_pasted call per function_info. */
2912 BFD_ASSERT (!fun
->sec
->segment_mark
);
2913 fun
->sec
->segment_mark
= 1;
2915 if (!mark_overlay_section (call
->fun
, info
, param
))
2919 /* Don't put entry code into an overlay. The overlay manager needs
2921 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
2922 == info
->output_bfd
->start_address
)
2924 fun
->sec
->linker_mark
= 0;
2925 if (fun
->rodata
!= NULL
)
2926 fun
->rodata
->linker_mark
= 0;
2931 /* If non-zero then unmark functions called from those within sections
2932 that we need to unmark. Unfortunately this isn't reliable since the
2933 call graph cannot know the destination of function pointer calls. */
2934 #define RECURSE_UNMARK 0
2937 asection
*exclude_input_section
;
2938 asection
*exclude_output_section
;
2939 unsigned long clearing
;
2942 /* Undo some of mark_overlay_section's work. */
2945 unmark_overlay_section (struct function_info
*fun
,
2946 struct bfd_link_info
*info
,
2949 struct call_info
*call
;
2950 struct _uos_param
*uos_param
= param
;
2951 unsigned int excluded
= 0;
2959 if (fun
->sec
== uos_param
->exclude_input_section
2960 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
2964 uos_param
->clearing
+= excluded
;
2966 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
2968 fun
->sec
->linker_mark
= 0;
2970 fun
->rodata
->linker_mark
= 0;
2973 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
2974 if (!unmark_overlay_section (call
->fun
, info
, param
))
2978 uos_param
->clearing
-= excluded
;
2983 unsigned int lib_size
;
2984 asection
**lib_sections
;
2987 /* Add sections we have marked as belonging to overlays to an array
2988 for consideration as non-overlay sections. The array consist of
2989 pairs of sections, (text,rodata), for functions in the call graph. */
2992 collect_lib_sections (struct function_info
*fun
,
2993 struct bfd_link_info
*info
,
2996 struct _cl_param
*lib_param
= param
;
2997 struct call_info
*call
;
3004 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3007 size
= fun
->sec
->size
;
3009 size
+= fun
->rodata
->size
;
3010 if (size
> lib_param
->lib_size
)
3013 *lib_param
->lib_sections
++ = fun
->sec
;
3014 fun
->sec
->gc_mark
= 0;
3015 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3017 *lib_param
->lib_sections
++ = fun
->rodata
;
3018 fun
->rodata
->gc_mark
= 0;
3021 *lib_param
->lib_sections
++ = NULL
;
3023 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3024 collect_lib_sections (call
->fun
, info
, param
);
3029 /* qsort predicate to sort sections by call count. */
3032 sort_lib (const void *a
, const void *b
)
3034 asection
*const *s1
= a
;
3035 asection
*const *s2
= b
;
3036 struct _spu_elf_section_data
*sec_data
;
3037 struct spu_elf_stack_info
*sinfo
;
3041 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3042 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3045 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3046 delta
-= sinfo
->fun
[i
].call_count
;
3049 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3050 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3053 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3054 delta
+= sinfo
->fun
[i
].call_count
;
3063 /* Remove some sections from those marked to be in overlays. Choose
3064 those that are called from many places, likely library functions. */
3067 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3070 asection
**lib_sections
;
3071 unsigned int i
, lib_count
;
3072 struct _cl_param collect_lib_param
;
3073 struct function_info dummy_caller
;
3075 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3077 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3079 extern const bfd_target bfd_elf32_spu_vec
;
3082 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3085 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3086 if (sec
->linker_mark
3087 && sec
->size
< lib_size
3088 && (sec
->flags
& SEC_CODE
) != 0)
3091 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3092 if (lib_sections
== NULL
)
3093 return (unsigned int) -1;
3094 collect_lib_param
.lib_size
= lib_size
;
3095 collect_lib_param
.lib_sections
= lib_sections
;
3096 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3098 return (unsigned int) -1;
3099 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3101 /* Sort sections so that those with the most calls are first. */
3103 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3105 for (i
= 0; i
< lib_count
; i
++)
3107 unsigned int tmp
, stub_size
;
3109 struct _spu_elf_section_data
*sec_data
;
3110 struct spu_elf_stack_info
*sinfo
;
3112 sec
= lib_sections
[2 * i
];
3113 /* If this section is OK, its size must be less than lib_size. */
3115 /* If it has a rodata section, then add that too. */
3116 if (lib_sections
[2 * i
+ 1])
3117 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3118 /* Add any new overlay call stubs needed by the section. */
3121 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3122 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3125 struct call_info
*call
;
3127 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3128 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3129 if (call
->fun
->sec
->linker_mark
)
3131 struct call_info
*p
;
3132 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3133 if (p
->fun
== call
->fun
)
3136 stub_size
+= OVL_STUB_SIZE
;
3139 if (tmp
+ stub_size
< lib_size
)
3141 struct call_info
**pp
, *p
;
3143 /* This section fits. Mark it as non-overlay. */
3144 lib_sections
[2 * i
]->linker_mark
= 0;
3145 if (lib_sections
[2 * i
+ 1])
3146 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3147 lib_size
-= tmp
+ stub_size
;
3148 /* Call stubs to the section we just added are no longer
3150 pp
= &dummy_caller
.call_list
;
3151 while ((p
= *pp
) != NULL
)
3152 if (!p
->fun
->sec
->linker_mark
)
3154 lib_size
+= OVL_STUB_SIZE
;
3160 /* Add new call stubs to dummy_caller. */
3161 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3162 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3165 struct call_info
*call
;
3167 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3168 for (call
= sinfo
->fun
[k
].call_list
;
3171 if (call
->fun
->sec
->linker_mark
)
3173 struct call_info
*callee
;
3174 callee
= bfd_malloc (sizeof (*callee
));
3176 return (unsigned int) -1;
3178 if (!insert_callee (&dummy_caller
, callee
))
3184 while (dummy_caller
.call_list
!= NULL
)
3186 struct call_info
*call
= dummy_caller
.call_list
;
3187 dummy_caller
.call_list
= call
->next
;
3190 for (i
= 0; i
< 2 * lib_count
; i
++)
3191 if (lib_sections
[i
])
3192 lib_sections
[i
]->gc_mark
= 1;
3193 free (lib_sections
);
3197 /* Build an array of overlay sections. The deepest node's section is
3198 added first, then its parent node's section, then everything called
3199 from the parent section. The idea being to group sections to
3200 minimise calls between different overlays. */
3203 collect_overlays (struct function_info
*fun
,
3204 struct bfd_link_info
*info
,
3207 struct call_info
*call
;
3208 bfd_boolean added_fun
;
3209 asection
***ovly_sections
= param
;
3215 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3216 if (!call
->is_pasted
)
3218 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3224 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3226 fun
->sec
->gc_mark
= 0;
3227 *(*ovly_sections
)++ = fun
->sec
;
3228 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3230 fun
->rodata
->gc_mark
= 0;
3231 *(*ovly_sections
)++ = fun
->rodata
;
3234 *(*ovly_sections
)++ = NULL
;
3237 /* Pasted sections must stay with the first section. We don't
3238 put pasted sections in the array, just the first section.
3239 Mark subsequent sections as already considered. */
3240 if (fun
->sec
->segment_mark
)
3242 struct function_info
*call_fun
= fun
;
3245 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3246 if (call
->is_pasted
)
3248 call_fun
= call
->fun
;
3249 call_fun
->sec
->gc_mark
= 0;
3250 if (call_fun
->rodata
)
3251 call_fun
->rodata
->gc_mark
= 0;
3257 while (call_fun
->sec
->segment_mark
);
3261 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3262 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3267 struct _spu_elf_section_data
*sec_data
;
3268 struct spu_elf_stack_info
*sinfo
;
3270 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3271 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3274 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3275 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3283 struct _sum_stack_param
{
3285 size_t overall_stack
;
3286 bfd_boolean emit_stack_syms
;
3289 /* Descend the call graph for FUN, accumulating total stack required. */
3292 sum_stack (struct function_info
*fun
,
3293 struct bfd_link_info
*info
,
3296 struct call_info
*call
;
3297 struct function_info
*max
;
3298 size_t stack
, cum_stack
;
3300 bfd_boolean has_call
;
3301 struct _sum_stack_param
*sum_stack_param
= param
;
3302 struct spu_link_hash_table
*htab
;
3304 cum_stack
= fun
->stack
;
3305 sum_stack_param
->cum_stack
= cum_stack
;
3311 for (call
= fun
->call_list
; call
; call
= call
->next
)
3313 if (!call
->is_pasted
)
3315 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3317 stack
= sum_stack_param
->cum_stack
;
3318 /* Include caller stack for normal calls, don't do so for
3319 tail calls. fun->stack here is local stack usage for
3321 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3322 stack
+= fun
->stack
;
3323 if (cum_stack
< stack
)
3330 sum_stack_param
->cum_stack
= cum_stack
;
3332 /* Now fun->stack holds cumulative stack. */
3333 fun
->stack
= cum_stack
;
3337 && sum_stack_param
->overall_stack
< cum_stack
)
3338 sum_stack_param
->overall_stack
= cum_stack
;
3340 htab
= spu_hash_table (info
);
3341 if (htab
->auto_overlay
)
3344 f1
= func_name (fun
);
3346 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3347 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3348 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3352 info
->callbacks
->minfo (_(" calls:\n"));
3353 for (call
= fun
->call_list
; call
; call
= call
->next
)
3354 if (!call
->is_pasted
)
3356 const char *f2
= func_name (call
->fun
);
3357 const char *ann1
= call
->fun
== max
? "*" : " ";
3358 const char *ann2
= call
->is_tail
? "t" : " ";
3360 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3364 if (sum_stack_param
->emit_stack_syms
)
3366 char *name
= bfd_malloc (18 + strlen (f1
));
3367 struct elf_link_hash_entry
*h
;
3372 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3373 sprintf (name
, "__stack_%s", f1
);
3375 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3377 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3380 && (h
->root
.type
== bfd_link_hash_new
3381 || h
->root
.type
== bfd_link_hash_undefined
3382 || h
->root
.type
== bfd_link_hash_undefweak
))
3384 h
->root
.type
= bfd_link_hash_defined
;
3385 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3386 h
->root
.u
.def
.value
= cum_stack
;
3391 h
->ref_regular_nonweak
= 1;
3392 h
->forced_local
= 1;
3400 /* SEC is part of a pasted function. Return the call_info for the
3401 next section of this function. */
3403 static struct call_info
*
3404 find_pasted_call (asection
*sec
)
3406 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3407 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3408 struct call_info
*call
;
3411 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3412 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3413 if (call
->is_pasted
)
3419 /* qsort predicate to sort bfds by file name. */
3422 sort_bfds (const void *a
, const void *b
)
3424 bfd
*const *abfd1
= a
;
3425 bfd
*const *abfd2
= b
;
3427 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3430 /* Handle --auto-overlay. */
3432 static void spu_elf_auto_overlay (struct bfd_link_info
*, void (*) (void))
3436 spu_elf_auto_overlay (struct bfd_link_info
*info
,
3437 void (*spu_elf_load_ovl_mgr
) (void))
3441 struct elf_segment_map
*m
;
3442 unsigned int fixed_size
, lo
, hi
;
3443 struct spu_link_hash_table
*htab
;
3444 unsigned int base
, i
, count
, bfd_count
;
3446 asection
**ovly_sections
, **ovly_p
;
3448 unsigned int total_overlay_size
, overlay_size
;
3449 struct elf_link_hash_entry
*h
;
3450 struct _mos_param mos_param
;
3451 struct _uos_param uos_param
;
3452 struct function_info dummy_caller
;
3454 /* Find the extents of our loadable image. */
3455 lo
= (unsigned int) -1;
3457 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
3458 if (m
->p_type
== PT_LOAD
)
3459 for (i
= 0; i
< m
->count
; i
++)
3460 if (m
->sections
[i
]->size
!= 0)
3462 if (m
->sections
[i
]->vma
< lo
)
3463 lo
= m
->sections
[i
]->vma
;
3464 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
3465 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
3467 fixed_size
= hi
+ 1 - lo
;
3469 if (!discover_functions (info
))
3472 if (!build_call_tree (info
))
3475 uos_param
.exclude_input_section
= 0;
3476 uos_param
.exclude_output_section
3477 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
3479 htab
= spu_hash_table (info
);
3480 h
= elf_link_hash_lookup (&htab
->elf
, "__ovly_load",
3481 FALSE
, FALSE
, FALSE
);
3483 && (h
->root
.type
== bfd_link_hash_defined
3484 || h
->root
.type
== bfd_link_hash_defweak
)
3487 /* We have a user supplied overlay manager. */
3488 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
3492 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3493 builtin version to .text, and will adjust .text size. */
3494 asection
*text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3496 fixed_size
-= text
->size
;
3497 spu_elf_load_ovl_mgr ();
3498 text
= bfd_get_section_by_name (info
->output_bfd
, ".text");
3500 fixed_size
+= text
->size
;
3503 /* Mark overlay sections, and find max overlay section size. */
3504 mos_param
.max_overlay_size
= 0;
3505 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
3508 /* We can't put the overlay manager or interrupt routines in
3510 uos_param
.clearing
= 0;
3511 if ((uos_param
.exclude_input_section
3512 || uos_param
.exclude_output_section
)
3513 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
3517 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3519 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
3520 if (bfd_arr
== NULL
)
3523 /* Count overlay sections, and subtract their sizes from "fixed_size". */
3526 total_overlay_size
= 0;
3527 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3529 extern const bfd_target bfd_elf32_spu_vec
;
3531 unsigned int old_count
;
3533 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3537 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3538 if (sec
->linker_mark
)
3540 if ((sec
->flags
& SEC_CODE
) != 0)
3542 fixed_size
-= sec
->size
;
3543 total_overlay_size
+= sec
->size
;
3545 if (count
!= old_count
)
3546 bfd_arr
[bfd_count
++] = ibfd
;
3549 /* Since the overlay link script selects sections by file name and
3550 section name, ensure that file names are unique. */
3553 bfd_boolean ok
= TRUE
;
3555 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
3556 for (i
= 1; i
< bfd_count
; ++i
)
3557 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
3559 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
3561 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
3562 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
3563 bfd_arr
[i
]->filename
,
3564 bfd_arr
[i
]->my_archive
->filename
);
3566 info
->callbacks
->einfo (_("%s duplicated\n"),
3567 bfd_arr
[i
]->filename
);
3573 info
->callbacks
->einfo (_("sorry, no support for duplicate "
3574 "object files in auto-overlay script\n"));
3575 bfd_set_error (bfd_error_bad_value
);
3581 if (htab
->reserved
== 0)
3583 struct _sum_stack_param sum_stack_param
;
3585 sum_stack_param
.emit_stack_syms
= 0;
3586 sum_stack_param
.overall_stack
= 0;
3587 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3589 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
3591 fixed_size
+= htab
->reserved
;
3592 fixed_size
+= htab
->non_ovly_stub
* OVL_STUB_SIZE
;
3593 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
3595 /* Guess number of overlays. Assuming overlay buffer is on
3596 average only half full should be conservative. */
3597 ovlynum
= total_overlay_size
* 2 / (htab
->local_store
- fixed_size
);
3598 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
3599 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
3602 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
3603 info
->callbacks
->einfo (_("non-overlay plus maximum overlay size "
3604 "of 0x%x exceeds local store\n"),
3605 fixed_size
+ mos_param
.max_overlay_size
);
3607 /* Now see if we should put some functions in the non-overlay area. */
3608 if (fixed_size
< htab
->overlay_fixed
3609 && htab
->overlay_fixed
+ mos_param
.max_overlay_size
< htab
->local_store
)
3611 unsigned int lib_size
= htab
->overlay_fixed
- fixed_size
;
3612 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
3613 if (lib_size
== (unsigned int) -1)
3615 fixed_size
= htab
->overlay_fixed
- lib_size
;
3618 /* Build an array of sections, suitably sorted to place into
3620 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
3621 if (ovly_sections
== NULL
)
3623 ovly_p
= ovly_sections
;
3624 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
3626 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
3628 script
= htab
->spu_elf_open_overlay_script ();
3630 if (fprintf (script
, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3633 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3634 overlay_size
= htab
->local_store
- fixed_size
;
3637 while (base
< count
)
3639 unsigned int size
= 0;
3642 for (i
= base
; i
< count
; i
++)
3646 unsigned int stub_size
;
3647 struct call_info
*call
, *pasty
;
3648 struct _spu_elf_section_data
*sec_data
;
3649 struct spu_elf_stack_info
*sinfo
;
3652 /* See whether we can add this section to the current
3653 overlay without overflowing our overlay buffer. */
3654 sec
= ovly_sections
[2 * i
];
3655 tmp
= size
+ sec
->size
;
3656 if (ovly_sections
[2 * i
+ 1])
3657 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
3658 if (tmp
> overlay_size
)
3660 if (sec
->segment_mark
)
3662 /* Pasted sections must stay together, so add their
3664 struct call_info
*pasty
= find_pasted_call (sec
);
3665 while (pasty
!= NULL
)
3667 struct function_info
*call_fun
= pasty
->fun
;
3668 tmp
+= call_fun
->sec
->size
;
3669 if (call_fun
->rodata
)
3670 tmp
+= call_fun
->rodata
->size
;
3671 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
3672 if (pasty
->is_pasted
)
3676 if (tmp
> overlay_size
)
3679 /* If we add this section, we might need new overlay call
3680 stubs. Add any overlay section calls to dummy_call. */
3682 sec_data
= spu_elf_section_data (sec
);
3683 sinfo
= sec_data
->u
.i
.stack_info
;
3684 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3685 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3686 if (call
->is_pasted
)
3688 BFD_ASSERT (pasty
== NULL
);
3691 else if (call
->fun
->sec
->linker_mark
)
3693 if (!copy_callee (&dummy_caller
, call
))
3696 while (pasty
!= NULL
)
3698 struct function_info
*call_fun
= pasty
->fun
;
3700 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3701 if (call
->is_pasted
)
3703 BFD_ASSERT (pasty
== NULL
);
3706 else if (!copy_callee (&dummy_caller
, call
))
3710 /* Calculate call stub size. */
3712 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
3716 stub_size
+= OVL_STUB_SIZE
;
3717 /* If the call is within this overlay, we won't need a
3719 for (k
= base
; k
< i
+ 1; k
++)
3720 if (call
->fun
->sec
== ovly_sections
[2 * k
])
3722 stub_size
-= OVL_STUB_SIZE
;
3726 if (tmp
+ stub_size
> overlay_size
)
3734 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
3735 ovly_sections
[2 * i
]->owner
,
3736 ovly_sections
[2 * i
],
3737 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
3738 bfd_set_error (bfd_error_bad_value
);
3742 if (fprintf (script
, " .ovly%d {\n", ++ovlynum
) <= 0)
3744 for (j
= base
; j
< i
; j
++)
3746 asection
*sec
= ovly_sections
[2 * j
];
3748 if (fprintf (script
, " %s%c%s (%s)\n",
3749 (sec
->owner
->my_archive
!= NULL
3750 ? sec
->owner
->my_archive
->filename
: ""),
3751 info
->path_separator
,
3752 sec
->owner
->filename
,
3755 if (sec
->segment_mark
)
3757 struct call_info
*call
= find_pasted_call (sec
);
3758 while (call
!= NULL
)
3760 struct function_info
*call_fun
= call
->fun
;
3761 sec
= call_fun
->sec
;
3762 if (fprintf (script
, " %s%c%s (%s)\n",
3763 (sec
->owner
->my_archive
!= NULL
3764 ? sec
->owner
->my_archive
->filename
: ""),
3765 info
->path_separator
,
3766 sec
->owner
->filename
,
3769 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3770 if (call
->is_pasted
)
3776 for (j
= base
; j
< i
; j
++)
3778 asection
*sec
= ovly_sections
[2 * j
+ 1];
3780 && fprintf (script
, " %s%c%s (%s)\n",
3781 (sec
->owner
->my_archive
!= NULL
3782 ? sec
->owner
->my_archive
->filename
: ""),
3783 info
->path_separator
,
3784 sec
->owner
->filename
,
3788 sec
= ovly_sections
[2 * j
];
3789 if (sec
->segment_mark
)
3791 struct call_info
*call
= find_pasted_call (sec
);
3792 while (call
!= NULL
)
3794 struct function_info
*call_fun
= call
->fun
;
3795 sec
= call_fun
->rodata
;
3797 && fprintf (script
, " %s%c%s (%s)\n",
3798 (sec
->owner
->my_archive
!= NULL
3799 ? sec
->owner
->my_archive
->filename
: ""),
3800 info
->path_separator
,
3801 sec
->owner
->filename
,
3804 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
3805 if (call
->is_pasted
)
3811 if (fprintf (script
, " }\n") <= 0)
3814 while (dummy_caller
.call_list
!= NULL
)
3816 struct call_info
*call
= dummy_caller
.call_list
;
3817 dummy_caller
.call_list
= call
->next
;
3823 free (ovly_sections
);
3825 if (fprintf (script
, " }\n}\nINSERT AFTER .text;\n") <= 0)
3827 if (fclose (script
) != 0)
3830 if (htab
->auto_overlay
& AUTO_RELINK
)
3831 htab
->spu_elf_relink ();
3836 bfd_set_error (bfd_error_system_call
);
3838 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
3842 /* Provide an estimate of total stack required. */
3845 spu_elf_stack_analysis (struct bfd_link_info
*info
, int emit_stack_syms
)
3847 struct _sum_stack_param sum_stack_param
;
3849 if (!discover_functions (info
))
3852 if (!build_call_tree (info
))
3855 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
3856 info
->callbacks
->minfo (_("\nStack size for functions. "
3857 "Annotations: '*' max stack, 't' tail call\n"));
3859 sum_stack_param
.emit_stack_syms
= emit_stack_syms
;
3860 sum_stack_param
.overall_stack
= 0;
3861 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
3864 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
3865 (bfd_vma
) sum_stack_param
.overall_stack
);
3869 /* Perform a final link. */
3872 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
3874 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3876 if (htab
->auto_overlay
)
3877 spu_elf_auto_overlay (info
, htab
->spu_elf_load_ovl_mgr
);
3879 if (htab
->stack_analysis
3880 && !spu_elf_stack_analysis (info
, htab
->emit_stack_syms
))
3881 info
->callbacks
->einfo ("%X%P: stack analysis error: %E\n");
3883 return bfd_elf_final_link (output_bfd
, info
);
3886 /* Called when not normally emitting relocs, ie. !info->relocatable
3887 and !info->emitrelocations. Returns a count of special relocs
3888 that need to be emitted. */
3891 spu_elf_count_relocs (asection
*sec
, Elf_Internal_Rela
*relocs
)
3893 unsigned int count
= 0;
3894 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
3896 for (; relocs
< relend
; relocs
++)
3898 int r_type
= ELF32_R_TYPE (relocs
->r_info
);
3899 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3906 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
3909 spu_elf_relocate_section (bfd
*output_bfd
,
3910 struct bfd_link_info
*info
,
3912 asection
*input_section
,
3914 Elf_Internal_Rela
*relocs
,
3915 Elf_Internal_Sym
*local_syms
,
3916 asection
**local_sections
)
3918 Elf_Internal_Shdr
*symtab_hdr
;
3919 struct elf_link_hash_entry
**sym_hashes
;
3920 Elf_Internal_Rela
*rel
, *relend
;
3921 struct spu_link_hash_table
*htab
;
3922 asection
*ea
= bfd_get_section_by_name (output_bfd
, "._ea");
3924 bfd_boolean emit_these_relocs
= FALSE
;
3925 bfd_boolean is_ea_sym
;
3928 htab
= spu_hash_table (info
);
3929 stubs
= (htab
->stub_sec
!= NULL
3930 && maybe_needs_stubs (input_section
, output_bfd
));
3931 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
3932 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
3935 relend
= relocs
+ input_section
->reloc_count
;
3936 for (; rel
< relend
; rel
++)
3939 reloc_howto_type
*howto
;
3940 unsigned int r_symndx
;
3941 Elf_Internal_Sym
*sym
;
3943 struct elf_link_hash_entry
*h
;
3944 const char *sym_name
;
3947 bfd_reloc_status_type r
;
3948 bfd_boolean unresolved_reloc
;
3950 enum _stub_type stub_type
;
3952 r_symndx
= ELF32_R_SYM (rel
->r_info
);
3953 r_type
= ELF32_R_TYPE (rel
->r_info
);
3954 howto
= elf_howto_table
+ r_type
;
3955 unresolved_reloc
= FALSE
;
3960 if (r_symndx
< symtab_hdr
->sh_info
)
3962 sym
= local_syms
+ r_symndx
;
3963 sec
= local_sections
[r_symndx
];
3964 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
3965 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
3969 RELOC_FOR_GLOBAL_SYMBOL (info
, input_bfd
, input_section
, rel
,
3970 r_symndx
, symtab_hdr
, sym_hashes
,
3972 unresolved_reloc
, warned
);
3973 sym_name
= h
->root
.root
.string
;
3976 if (sec
!= NULL
&& elf_discarded_section (sec
))
3978 /* For relocs against symbols from removed linkonce sections,
3979 or sections discarded by a linker script, we just want the
3980 section contents zeroed. Avoid any special processing. */
3981 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
3987 if (info
->relocatable
)
3990 is_ea_sym
= (ea
!= NULL
3992 && sec
->output_section
== ea
);
3994 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
3998 /* ._ea is a special section that isn't allocated in SPU
3999 memory, but rather occupies space in PPU memory as
4000 part of an embedded ELF image. If this reloc is
4001 against a symbol defined in ._ea, then transform the
4002 reloc into an equivalent one without a symbol
4003 relative to the start of the ELF image. */
4004 rel
->r_addend
+= (relocation
4006 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4007 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4009 emit_these_relocs
= TRUE
;
4014 unresolved_reloc
= TRUE
;
4016 if (unresolved_reloc
)
4018 (*_bfd_error_handler
)
4019 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4021 bfd_get_section_name (input_bfd
, input_section
),
4022 (long) rel
->r_offset
,
4028 /* If this symbol is in an overlay area, we may need to relocate
4029 to the overlay stub. */
4030 addend
= rel
->r_addend
;
4032 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4033 contents
, info
)) != no_stub
)
4035 unsigned int ovl
= 0;
4036 struct got_entry
*g
, **head
;
4038 if (stub_type
!= nonovl_stub
)
4039 ovl
= (spu_elf_section_data (input_section
->output_section
)
4043 head
= &h
->got
.glist
;
4045 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4047 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4048 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4053 relocation
= g
->stub_addr
;
4057 r
= _bfd_final_link_relocate (howto
,
4061 rel
->r_offset
, relocation
, addend
);
4063 if (r
!= bfd_reloc_ok
)
4065 const char *msg
= (const char *) 0;
4069 case bfd_reloc_overflow
:
4070 if (!((*info
->callbacks
->reloc_overflow
)
4071 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4072 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4076 case bfd_reloc_undefined
:
4077 if (!((*info
->callbacks
->undefined_symbol
)
4078 (info
, sym_name
, input_bfd
, input_section
,
4079 rel
->r_offset
, TRUE
)))
4083 case bfd_reloc_outofrange
:
4084 msg
= _("internal error: out of range error");
4087 case bfd_reloc_notsupported
:
4088 msg
= _("internal error: unsupported relocation error");
4091 case bfd_reloc_dangerous
:
4092 msg
= _("internal error: dangerous error");
4096 msg
= _("internal error: unknown error");
4101 if (!((*info
->callbacks
->warning
)
4102 (info
, msg
, sym_name
, input_bfd
, input_section
,
4111 && emit_these_relocs
4112 && !info
->emitrelocations
)
4114 Elf_Internal_Rela
*wrel
;
4115 Elf_Internal_Shdr
*rel_hdr
;
4117 wrel
= rel
= relocs
;
4118 relend
= relocs
+ input_section
->reloc_count
;
4119 for (; rel
< relend
; rel
++)
4123 r_type
= ELF32_R_TYPE (rel
->r_info
);
4124 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4127 input_section
->reloc_count
= wrel
- relocs
;
4128 /* Backflips for _bfd_elf_link_output_relocs. */
4129 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4130 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4137 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4140 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4141 const char *sym_name ATTRIBUTE_UNUSED
,
4142 Elf_Internal_Sym
*sym
,
4143 asection
*sym_sec ATTRIBUTE_UNUSED
,
4144 struct elf_link_hash_entry
*h
)
4146 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4148 if (!info
->relocatable
4149 && htab
->stub_sec
!= NULL
4151 && (h
->root
.type
== bfd_link_hash_defined
4152 || h
->root
.type
== bfd_link_hash_defweak
)
4154 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4156 struct got_entry
*g
;
4158 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4159 if (g
->addend
== 0 && g
->ovl
== 0)
4161 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4162 (htab
->stub_sec
[0]->output_section
->owner
,
4163 htab
->stub_sec
[0]->output_section
));
4164 sym
->st_value
= g
->stub_addr
;
4172 static int spu_plugin
= 0;
4175 spu_elf_plugin (int val
)
4180 /* Set ELF header e_type for plugins. */
4183 spu_elf_post_process_headers (bfd
*abfd
,
4184 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4188 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4190 i_ehdrp
->e_type
= ET_DYN
;
4194 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4195 segments for overlays. */
4198 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4200 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4201 int extra
= htab
->num_overlays
;
4207 sec
= bfd_get_section_by_name (abfd
, ".toe");
4208 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
4214 /* Remove .toe section from other PT_LOAD segments and put it in
4215 a segment of its own. Put overlays in separate segments too. */
4218 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
4221 struct elf_segment_map
*m
;
4227 toe
= bfd_get_section_by_name (abfd
, ".toe");
4228 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4229 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
4230 for (i
= 0; i
< m
->count
; i
++)
4231 if ((s
= m
->sections
[i
]) == toe
4232 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
4234 struct elf_segment_map
*m2
;
4237 if (i
+ 1 < m
->count
)
4239 amt
= sizeof (struct elf_segment_map
);
4240 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
4241 m2
= bfd_zalloc (abfd
, amt
);
4244 m2
->count
= m
->count
- (i
+ 1);
4245 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
4246 m2
->count
* sizeof (m
->sections
[0]));
4247 m2
->p_type
= PT_LOAD
;
4255 amt
= sizeof (struct elf_segment_map
);
4256 m2
= bfd_zalloc (abfd
, amt
);
4259 m2
->p_type
= PT_LOAD
;
4261 m2
->sections
[0] = s
;
4271 /* Tweak the section type of .note.spu_name. */
4274 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
4275 Elf_Internal_Shdr
*hdr
,
4278 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
4279 hdr
->sh_type
= SHT_NOTE
;
4283 /* Tweak phdrs before writing them out. */
4286 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
4288 const struct elf_backend_data
*bed
;
4289 struct elf_obj_tdata
*tdata
;
4290 Elf_Internal_Phdr
*phdr
, *last
;
4291 struct spu_link_hash_table
*htab
;
4298 bed
= get_elf_backend_data (abfd
);
4299 tdata
= elf_tdata (abfd
);
4301 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
4302 htab
= spu_hash_table (info
);
4303 if (htab
->num_overlays
!= 0)
4305 struct elf_segment_map
*m
;
4308 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
4310 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
4312 /* Mark this as an overlay header. */
4313 phdr
[i
].p_flags
|= PF_OVERLAY
;
4315 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0)
4317 bfd_byte
*p
= htab
->ovtab
->contents
;
4318 unsigned int off
= o
* 16 + 8;
4320 /* Write file_off into _ovly_table. */
4321 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
4326 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4327 of 16. This should always be possible when using the standard
4328 linker scripts, but don't create overlapping segments if
4329 someone is playing games with linker scripts. */
4331 for (i
= count
; i
-- != 0; )
4332 if (phdr
[i
].p_type
== PT_LOAD
)
4336 adjust
= -phdr
[i
].p_filesz
& 15;
4339 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
4342 adjust
= -phdr
[i
].p_memsz
& 15;
4345 && phdr
[i
].p_filesz
!= 0
4346 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
4347 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
4350 if (phdr
[i
].p_filesz
!= 0)
4354 if (i
== (unsigned int) -1)
4355 for (i
= count
; i
-- != 0; )
4356 if (phdr
[i
].p_type
== PT_LOAD
)
4360 adjust
= -phdr
[i
].p_filesz
& 15;
4361 phdr
[i
].p_filesz
+= adjust
;
4363 adjust
= -phdr
[i
].p_memsz
& 15;
4364 phdr
[i
].p_memsz
+= adjust
;
4370 #define TARGET_BIG_SYM bfd_elf32_spu_vec
4371 #define TARGET_BIG_NAME "elf32-spu"
4372 #define ELF_ARCH bfd_arch_spu
4373 #define ELF_MACHINE_CODE EM_SPU
4374 /* This matches the alignment need for DMA. */
4375 #define ELF_MAXPAGESIZE 0x80
4376 #define elf_backend_rela_normal 1
4377 #define elf_backend_can_gc_sections 1
4379 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
4380 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4381 #define elf_info_to_howto spu_elf_info_to_howto
4382 #define elf_backend_count_relocs spu_elf_count_relocs
4383 #define elf_backend_relocate_section spu_elf_relocate_section
4384 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
4385 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
4386 #define elf_backend_object_p spu_elf_object_p
4387 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
4388 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
4390 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
4391 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
4392 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
4393 #define elf_backend_post_process_headers spu_elf_post_process_headers
4394 #define elf_backend_fake_sections spu_elf_fake_sections
4395 #define elf_backend_special_sections spu_elf_special_sections
4396 #define bfd_elf32_bfd_final_link spu_elf_final_link
4398 #include "elf32-target.h"