1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type
spu_elf_rel9 (bfd
*, arelent
*, asymbol
*,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table
[] = {
40 HOWTO (R_SPU_NONE
, 0, 0, 0, FALSE
, 0, complain_overflow_dont
,
41 bfd_elf_generic_reloc
, "SPU_NONE",
42 FALSE
, 0, 0x00000000, FALSE
),
43 HOWTO (R_SPU_ADDR10
, 4, 2, 10, FALSE
, 14, complain_overflow_bitfield
,
44 bfd_elf_generic_reloc
, "SPU_ADDR10",
45 FALSE
, 0, 0x00ffc000, FALSE
),
46 HOWTO (R_SPU_ADDR16
, 2, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
47 bfd_elf_generic_reloc
, "SPU_ADDR16",
48 FALSE
, 0, 0x007fff80, FALSE
),
49 HOWTO (R_SPU_ADDR16_HI
, 16, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
50 bfd_elf_generic_reloc
, "SPU_ADDR16_HI",
51 FALSE
, 0, 0x007fff80, FALSE
),
52 HOWTO (R_SPU_ADDR16_LO
, 0, 2, 16, FALSE
, 7, complain_overflow_dont
,
53 bfd_elf_generic_reloc
, "SPU_ADDR16_LO",
54 FALSE
, 0, 0x007fff80, FALSE
),
55 HOWTO (R_SPU_ADDR18
, 0, 2, 18, FALSE
, 7, complain_overflow_bitfield
,
56 bfd_elf_generic_reloc
, "SPU_ADDR18",
57 FALSE
, 0, 0x01ffff80, FALSE
),
58 HOWTO (R_SPU_ADDR32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
59 bfd_elf_generic_reloc
, "SPU_ADDR32",
60 FALSE
, 0, 0xffffffff, FALSE
),
61 HOWTO (R_SPU_REL16
, 2, 2, 16, TRUE
, 7, complain_overflow_bitfield
,
62 bfd_elf_generic_reloc
, "SPU_REL16",
63 FALSE
, 0, 0x007fff80, TRUE
),
64 HOWTO (R_SPU_ADDR7
, 0, 2, 7, FALSE
, 14, complain_overflow_dont
,
65 bfd_elf_generic_reloc
, "SPU_ADDR7",
66 FALSE
, 0, 0x001fc000, FALSE
),
67 HOWTO (R_SPU_REL9
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
68 spu_elf_rel9
, "SPU_REL9",
69 FALSE
, 0, 0x0180007f, TRUE
),
70 HOWTO (R_SPU_REL9I
, 2, 2, 9, TRUE
, 0, complain_overflow_signed
,
71 spu_elf_rel9
, "SPU_REL9I",
72 FALSE
, 0, 0x0000c07f, TRUE
),
73 HOWTO (R_SPU_ADDR10I
, 0, 2, 10, FALSE
, 14, complain_overflow_signed
,
74 bfd_elf_generic_reloc
, "SPU_ADDR10I",
75 FALSE
, 0, 0x00ffc000, FALSE
),
76 HOWTO (R_SPU_ADDR16I
, 0, 2, 16, FALSE
, 7, complain_overflow_signed
,
77 bfd_elf_generic_reloc
, "SPU_ADDR16I",
78 FALSE
, 0, 0x007fff80, FALSE
),
79 HOWTO (R_SPU_REL32
, 0, 2, 32, TRUE
, 0, complain_overflow_dont
,
80 bfd_elf_generic_reloc
, "SPU_REL32",
81 FALSE
, 0, 0xffffffff, TRUE
),
82 HOWTO (R_SPU_ADDR16X
, 0, 2, 16, FALSE
, 7, complain_overflow_bitfield
,
83 bfd_elf_generic_reloc
, "SPU_ADDR16X",
84 FALSE
, 0, 0x007fff80, FALSE
),
85 HOWTO (R_SPU_PPU32
, 0, 2, 32, FALSE
, 0, complain_overflow_dont
,
86 bfd_elf_generic_reloc
, "SPU_PPU32",
87 FALSE
, 0, 0xffffffff, FALSE
),
88 HOWTO (R_SPU_PPU64
, 0, 4, 64, FALSE
, 0, complain_overflow_dont
,
89 bfd_elf_generic_reloc
, "SPU_PPU64",
93 static struct bfd_elf_special_section
const spu_elf_special_sections
[] = {
94 { "._ea", 4, 0, SHT_PROGBITS
, SHF_WRITE
},
95 { ".toe", 4, 0, SHT_NOBITS
, SHF_ALLOC
},
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code
)
106 case BFD_RELOC_SPU_IMM10W
:
108 case BFD_RELOC_SPU_IMM16W
:
110 case BFD_RELOC_SPU_LO16
:
111 return R_SPU_ADDR16_LO
;
112 case BFD_RELOC_SPU_HI16
:
113 return R_SPU_ADDR16_HI
;
114 case BFD_RELOC_SPU_IMM18
:
116 case BFD_RELOC_SPU_PCREL16
:
118 case BFD_RELOC_SPU_IMM7
:
120 case BFD_RELOC_SPU_IMM8
:
122 case BFD_RELOC_SPU_PCREL9a
:
124 case BFD_RELOC_SPU_PCREL9b
:
126 case BFD_RELOC_SPU_IMM10
:
127 return R_SPU_ADDR10I
;
128 case BFD_RELOC_SPU_IMM16
:
129 return R_SPU_ADDR16I
;
132 case BFD_RELOC_32_PCREL
:
134 case BFD_RELOC_SPU_PPU32
:
136 case BFD_RELOC_SPU_PPU64
:
142 spu_elf_info_to_howto (bfd
*abfd ATTRIBUTE_UNUSED
,
144 Elf_Internal_Rela
*dst
)
146 enum elf_spu_reloc_type r_type
;
148 r_type
= (enum elf_spu_reloc_type
) ELF32_R_TYPE (dst
->r_info
);
149 BFD_ASSERT (r_type
< R_SPU_max
);
150 cache_ptr
->howto
= &elf_howto_table
[(int) r_type
];
153 static reloc_howto_type
*
154 spu_elf_reloc_type_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
155 bfd_reloc_code_real_type code
)
157 enum elf_spu_reloc_type r_type
= spu_elf_bfd_to_reloc_type (code
);
159 if (r_type
== R_SPU_NONE
)
162 return elf_howto_table
+ r_type
;
165 static reloc_howto_type
*
166 spu_elf_reloc_name_lookup (bfd
*abfd ATTRIBUTE_UNUSED
,
171 for (i
= 0; i
< sizeof (elf_howto_table
) / sizeof (elf_howto_table
[0]); i
++)
172 if (elf_howto_table
[i
].name
!= NULL
173 && strcasecmp (elf_howto_table
[i
].name
, r_name
) == 0)
174 return &elf_howto_table
[i
];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd
*abfd
, arelent
*reloc_entry
, asymbol
*symbol
,
183 void *data
, asection
*input_section
,
184 bfd
*output_bfd
, char **error_message
)
186 bfd_size_type octets
;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd
!= NULL
)
194 return bfd_elf_generic_reloc (abfd
, reloc_entry
, symbol
, data
,
195 input_section
, output_bfd
, error_message
);
197 if (reloc_entry
->address
> bfd_get_section_limit (abfd
, input_section
))
198 return bfd_reloc_outofrange
;
199 octets
= reloc_entry
->address
* bfd_octets_per_byte (abfd
);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol
->section
))
205 if (symbol
->section
->output_section
)
206 val
+= symbol
->section
->output_section
->vma
;
208 val
+= reloc_entry
->addend
;
210 /* Make it pc-relative. */
211 val
-= input_section
->output_section
->vma
+ input_section
->output_offset
;
214 if (val
+ 256 >= 512)
215 return bfd_reloc_overflow
;
217 insn
= bfd_get_32 (abfd
, (bfd_byte
*) data
+ octets
);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val
= (val
& 0x7f) | ((val
& 0x180) << 7) | ((val
& 0x180) << 16);
222 insn
&= ~reloc_entry
->howto
->dst_mask
;
223 insn
|= val
& reloc_entry
->howto
->dst_mask
;
224 bfd_put_32 (abfd
, insn
, (bfd_byte
*) data
+ octets
);
229 spu_elf_new_section_hook (bfd
*abfd
, asection
*sec
)
231 if (!sec
->used_by_bfd
)
233 struct _spu_elf_section_data
*sdata
;
235 sdata
= bfd_zalloc (abfd
, sizeof (*sdata
));
238 sec
->used_by_bfd
= sdata
;
241 return _bfd_elf_new_section_hook (abfd
, sec
);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd
*abfd
)
249 if ((abfd
->flags
& (EXEC_P
| DYNAMIC
)) != 0)
251 unsigned int i
, num_ovl
, num_buf
;
252 Elf_Internal_Phdr
*phdr
= elf_tdata (abfd
)->phdr
;
253 Elf_Internal_Ehdr
*ehdr
= elf_elfheader (abfd
);
254 Elf_Internal_Phdr
*last_phdr
= NULL
;
256 for (num_buf
= 0, num_ovl
= 0, i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++)
257 if (phdr
->p_type
== PT_LOAD
&& (phdr
->p_flags
& PF_OVERLAY
) != 0)
262 if (last_phdr
== NULL
263 || ((last_phdr
->p_vaddr
^ phdr
->p_vaddr
) & 0x3ffff) != 0)
266 for (j
= 1; j
< elf_numsections (abfd
); j
++)
268 Elf_Internal_Shdr
*shdr
= elf_elfsections (abfd
)[j
];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr
, phdr
))
272 asection
*sec
= shdr
->bfd_section
;
273 spu_elf_section_data (sec
)->u
.o
.ovl_index
= num_ovl
;
274 spu_elf_section_data (sec
)->u
.o
.ovl_buf
= num_buf
;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd
*abfd ATTRIBUTE_UNUSED
, asymbol
*sym
)
288 if (sym
->name
!= NULL
289 && sym
->section
!= bfd_abs_section_ptr
290 && strncmp (sym
->name
, "_EAR_", 5) == 0)
291 sym
->flags
|= BSF_KEEP
;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf
;
300 struct spu_elf_params
*params
;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count
;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry
*ovly_entry
[2];
316 /* Number of overlay buffers. */
317 unsigned int num_buf
;
319 /* Total number of overlays. */
320 unsigned int num_overlays
;
322 /* For soft icache. */
323 unsigned int line_size_log2
;
324 unsigned int num_lines_log2
;
326 /* How much memory we have. */
327 unsigned int local_store
;
328 /* Local store --auto-overlay should reserve for non-overlay
329 functions and data. */
330 unsigned int overlay_fixed
;
331 /* Local store --auto-overlay should reserve for stack and heap. */
332 unsigned int reserved
;
333 /* If reserved is not specified, stack analysis will calculate a value
334 for the stack. This parameter adjusts that value to allow for
335 negative sp access (the ABI says 2000 bytes below sp are valid,
336 and the overlay manager uses some of this area). */
337 int extra_stack_space
;
338 /* Count of overlay stubs needed in non-overlay area. */
339 unsigned int non_ovly_stub
;
342 unsigned int stub_err
: 1;
345 /* Hijack the generic got fields for overlay stub accounting. */
349 struct got_entry
*next
;
358 #define spu_hash_table(p) \
359 ((struct spu_link_hash_table *) ((p)->hash))
363 struct function_info
*fun
;
364 struct call_info
*next
;
366 unsigned int max_depth
;
367 unsigned int is_tail
: 1;
368 unsigned int is_pasted
: 1;
369 unsigned int priority
: 13;
374 /* List of functions called. Also branches to hot/cold part of
376 struct call_info
*call_list
;
377 /* For hot/cold part of function, point to owner. */
378 struct function_info
*start
;
379 /* Symbol at start of function. */
381 Elf_Internal_Sym
*sym
;
382 struct elf_link_hash_entry
*h
;
384 /* Function section. */
387 /* Where last called from, and number of sections called from. */
388 asection
*last_caller
;
389 unsigned int call_count
;
390 /* Address range of (this part of) function. */
392 /* Offset where we found a store of lr, or -1 if none found. */
394 /* Offset where we found the stack adjustment insn. */
398 /* Distance from root of call tree. Tail and hot/cold branches
399 count as one deeper. We aren't counting stack frames here. */
401 /* Set if global symbol. */
402 unsigned int global
: 1;
403 /* Set if known to be start of function (as distinct from a hunk
404 in hot/cold section. */
405 unsigned int is_func
: 1;
406 /* Set if not a root node. */
407 unsigned int non_root
: 1;
408 /* Flags used during call tree traversal. It's cheaper to replicate
409 the visit flags than have one which needs clearing after a traversal. */
410 unsigned int visit1
: 1;
411 unsigned int visit2
: 1;
412 unsigned int marking
: 1;
413 unsigned int visit3
: 1;
414 unsigned int visit4
: 1;
415 unsigned int visit5
: 1;
416 unsigned int visit6
: 1;
417 unsigned int visit7
: 1;
420 struct spu_elf_stack_info
424 /* Variable size array describing functions, one per contiguous
425 address range belonging to a function. */
426 struct function_info fun
[1];
429 static struct function_info
*find_function (asection
*, bfd_vma
,
430 struct bfd_link_info
*);
432 /* Create a spu ELF linker hash table. */
434 static struct bfd_link_hash_table
*
435 spu_elf_link_hash_table_create (bfd
*abfd
)
437 struct spu_link_hash_table
*htab
;
439 htab
= bfd_malloc (sizeof (*htab
));
443 if (!_bfd_elf_link_hash_table_init (&htab
->elf
, abfd
,
444 _bfd_elf_link_hash_newfunc
,
445 sizeof (struct elf_link_hash_entry
)))
451 memset (&htab
->ovtab
, 0,
452 sizeof (*htab
) - offsetof (struct spu_link_hash_table
, ovtab
));
454 htab
->elf
.init_got_refcount
.refcount
= 0;
455 htab
->elf
.init_got_refcount
.glist
= NULL
;
456 htab
->elf
.init_got_offset
.offset
= 0;
457 htab
->elf
.init_got_offset
.glist
= NULL
;
458 return &htab
->elf
.root
;
462 spu_elf_setup (struct bfd_link_info
*info
, struct spu_elf_params
*params
)
464 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
465 htab
->params
= params
;
466 htab
->line_size_log2
= bfd_log2 (htab
->params
->line_size
);
467 htab
->num_lines_log2
= bfd_log2 (htab
->params
->num_lines
);
470 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
471 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
472 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
475 get_sym_h (struct elf_link_hash_entry
**hp
,
476 Elf_Internal_Sym
**symp
,
478 Elf_Internal_Sym
**locsymsp
,
479 unsigned long r_symndx
,
482 Elf_Internal_Shdr
*symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
484 if (r_symndx
>= symtab_hdr
->sh_info
)
486 struct elf_link_hash_entry
**sym_hashes
= elf_sym_hashes (ibfd
);
487 struct elf_link_hash_entry
*h
;
489 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
490 while (h
->root
.type
== bfd_link_hash_indirect
491 || h
->root
.type
== bfd_link_hash_warning
)
492 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
502 asection
*symsec
= NULL
;
503 if (h
->root
.type
== bfd_link_hash_defined
504 || h
->root
.type
== bfd_link_hash_defweak
)
505 symsec
= h
->root
.u
.def
.section
;
511 Elf_Internal_Sym
*sym
;
512 Elf_Internal_Sym
*locsyms
= *locsymsp
;
516 locsyms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
518 locsyms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
,
520 0, NULL
, NULL
, NULL
);
525 sym
= locsyms
+ r_symndx
;
534 *symsecp
= bfd_section_from_elf_index (ibfd
, sym
->st_shndx
);
540 /* Create the note section if not already present. This is done early so
541 that the linker maps the sections to the right place in the output. */
544 spu_elf_create_sections (struct bfd_link_info
*info
)
548 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
549 if (bfd_get_section_by_name (ibfd
, SPU_PTNOTE_SPUNAME
) != NULL
)
554 /* Make SPU_PTNOTE_SPUNAME section. */
561 ibfd
= info
->input_bfds
;
562 flags
= SEC_LOAD
| SEC_READONLY
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
;
563 s
= bfd_make_section_anyway_with_flags (ibfd
, SPU_PTNOTE_SPUNAME
, flags
);
565 || !bfd_set_section_alignment (ibfd
, s
, 4))
568 name_len
= strlen (bfd_get_filename (info
->output_bfd
)) + 1;
569 size
= 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4);
570 size
+= (name_len
+ 3) & -4;
572 if (!bfd_set_section_size (ibfd
, s
, size
))
575 data
= bfd_zalloc (ibfd
, size
);
579 bfd_put_32 (ibfd
, sizeof (SPU_PLUGIN_NAME
), data
+ 0);
580 bfd_put_32 (ibfd
, name_len
, data
+ 4);
581 bfd_put_32 (ibfd
, 1, data
+ 8);
582 memcpy (data
+ 12, SPU_PLUGIN_NAME
, sizeof (SPU_PLUGIN_NAME
));
583 memcpy (data
+ 12 + ((sizeof (SPU_PLUGIN_NAME
) + 3) & -4),
584 bfd_get_filename (info
->output_bfd
), name_len
);
591 /* qsort predicate to sort sections by vma. */
594 sort_sections (const void *a
, const void *b
)
596 const asection
*const *s1
= a
;
597 const asection
*const *s2
= b
;
598 bfd_signed_vma delta
= (*s1
)->vma
- (*s2
)->vma
;
601 return delta
< 0 ? -1 : 1;
603 return (*s1
)->index
- (*s2
)->index
;
606 /* Identify overlays in the output bfd, and number them.
607 Returns 0 on error, 1 if no overlays, 2 if overlays. */
610 spu_elf_find_overlays (struct bfd_link_info
*info
)
612 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
613 asection
**alloc_sec
;
614 unsigned int i
, n
, ovl_index
, num_buf
;
617 static const char *const entry_names
[2][2] = {
618 { "__ovly_load", "__icache_br_handler" },
619 { "__ovly_return", "__icache_call_handler" }
622 if (info
->output_bfd
->section_count
< 2)
626 = bfd_malloc (info
->output_bfd
->section_count
* sizeof (*alloc_sec
));
627 if (alloc_sec
== NULL
)
630 /* Pick out all the alloced sections. */
631 for (n
= 0, s
= info
->output_bfd
->sections
; s
!= NULL
; s
= s
->next
)
632 if ((s
->flags
& SEC_ALLOC
) != 0
633 && (s
->flags
& (SEC_LOAD
| SEC_THREAD_LOCAL
)) != SEC_THREAD_LOCAL
643 /* Sort them by vma. */
644 qsort (alloc_sec
, n
, sizeof (*alloc_sec
), sort_sections
);
646 ovl_end
= alloc_sec
[0]->vma
+ alloc_sec
[0]->size
;
647 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
649 /* Look for an overlapping vma to find the first overlay section. */
650 bfd_vma vma_start
= 0;
651 bfd_vma lma_start
= 0;
653 for (i
= 1; i
< n
; i
++)
656 if (s
->vma
< ovl_end
)
658 asection
*s0
= alloc_sec
[i
- 1];
660 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
666 << (htab
->num_lines_log2
+ htab
->line_size_log2
)));
671 ovl_end
= s
->vma
+ s
->size
;
674 /* Now find any sections within the cache area. */
675 for (ovl_index
= 0, num_buf
= 0; i
< n
; i
++)
678 if (s
->vma
>= ovl_end
)
681 /* A section in an overlay area called .ovl.init is not
682 an overlay, in the sense that it might be loaded in
683 by the overlay manager, but rather the initial
684 section contents for the overlay buffer. */
685 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
687 num_buf
= ((s
->vma
- vma_start
) >> htab
->line_size_log2
) + 1;
688 if (((s
->vma
- vma_start
) & (htab
->params
->line_size
- 1))
689 || ((s
->lma
- lma_start
) & (htab
->params
->line_size
- 1)))
691 info
->callbacks
->einfo (_("%X%P: overlay section %A "
692 "does not start on a cache line.\n"),
694 bfd_set_error (bfd_error_bad_value
);
697 else if (s
->size
> htab
->params
->line_size
)
699 info
->callbacks
->einfo (_("%X%P: overlay section %A "
700 "is larger than a cache line.\n"),
702 bfd_set_error (bfd_error_bad_value
);
706 alloc_sec
[ovl_index
++] = s
;
707 spu_elf_section_data (s
)->u
.o
.ovl_index
708 = ((s
->lma
- lma_start
) >> htab
->line_size_log2
) + 1;
709 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
713 /* Ensure there are no more overlay sections. */
717 if (s
->vma
< ovl_end
)
719 info
->callbacks
->einfo (_("%X%P: overlay section %A "
720 "is not in cache area.\n"),
722 bfd_set_error (bfd_error_bad_value
);
726 ovl_end
= s
->vma
+ s
->size
;
731 /* Look for overlapping vmas. Any with overlap must be overlays.
732 Count them. Also count the number of overlay regions. */
733 for (ovl_index
= 0, num_buf
= 0, i
= 1; i
< n
; i
++)
736 if (s
->vma
< ovl_end
)
738 asection
*s0
= alloc_sec
[i
- 1];
740 if (spu_elf_section_data (s0
)->u
.o
.ovl_index
== 0)
743 if (strncmp (s0
->name
, ".ovl.init", 9) != 0)
745 alloc_sec
[ovl_index
] = s0
;
746 spu_elf_section_data (s0
)->u
.o
.ovl_index
= ++ovl_index
;
747 spu_elf_section_data (s0
)->u
.o
.ovl_buf
= num_buf
;
750 ovl_end
= s
->vma
+ s
->size
;
752 if (strncmp (s
->name
, ".ovl.init", 9) != 0)
754 alloc_sec
[ovl_index
] = s
;
755 spu_elf_section_data (s
)->u
.o
.ovl_index
= ++ovl_index
;
756 spu_elf_section_data (s
)->u
.o
.ovl_buf
= num_buf
;
757 if (s0
->vma
!= s
->vma
)
759 info
->callbacks
->einfo (_("%X%P: overlay sections %A "
760 "and %A do not start at the "
763 bfd_set_error (bfd_error_bad_value
);
766 if (ovl_end
< s
->vma
+ s
->size
)
767 ovl_end
= s
->vma
+ s
->size
;
771 ovl_end
= s
->vma
+ s
->size
;
775 htab
->num_overlays
= ovl_index
;
776 htab
->num_buf
= num_buf
;
777 htab
->ovl_sec
= alloc_sec
;
782 for (i
= 0; i
< 2; i
++)
785 struct elf_link_hash_entry
*h
;
787 name
= entry_names
[i
][htab
->params
->ovly_flavour
];
788 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
792 if (h
->root
.type
== bfd_link_hash_new
)
794 h
->root
.type
= bfd_link_hash_undefined
;
796 h
->ref_regular_nonweak
= 1;
799 htab
->ovly_entry
[i
] = h
;
805 /* Non-zero to use bra in overlay stubs rather than br. */
808 #define BRA 0x30000000
809 #define BRASL 0x31000000
810 #define BR 0x32000000
811 #define BRSL 0x33000000
812 #define NOP 0x40200000
813 #define LNOP 0x00200000
814 #define ILA 0x42000000
816 /* Return true for all relative and absolute branch instructions.
824 brhnz 00100011 0.. */
827 is_branch (const unsigned char *insn
)
829 return (insn
[0] & 0xec) == 0x20 && (insn
[1] & 0x80) == 0;
832 /* Return true for all indirect branch instructions.
840 bihnz 00100101 011 */
843 is_indirect_branch (const unsigned char *insn
)
845 return (insn
[0] & 0xef) == 0x25 && (insn
[1] & 0x80) == 0;
848 /* Return true for branch hint instructions.
853 is_hint (const unsigned char *insn
)
855 return (insn
[0] & 0xfc) == 0x10;
858 /* True if INPUT_SECTION might need overlay stubs. */
861 maybe_needs_stubs (asection
*input_section
)
863 /* No stubs for debug sections and suchlike. */
864 if ((input_section
->flags
& SEC_ALLOC
) == 0)
867 /* No stubs for link-once sections that will be discarded. */
868 if (input_section
->output_section
== bfd_abs_section_ptr
)
871 /* Don't create stubs for .eh_frame references. */
872 if (strcmp (input_section
->name
, ".eh_frame") == 0)
894 /* Return non-zero if this reloc symbol should go via an overlay stub.
895 Return 2 if the stub must be in non-overlay area. */
897 static enum _stub_type
898 needs_ovl_stub (struct elf_link_hash_entry
*h
,
899 Elf_Internal_Sym
*sym
,
901 asection
*input_section
,
902 Elf_Internal_Rela
*irela
,
904 struct bfd_link_info
*info
)
906 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
907 enum elf_spu_reloc_type r_type
;
908 unsigned int sym_type
;
909 bfd_boolean branch
, hint
, call
;
910 enum _stub_type ret
= no_stub
;
914 || sym_sec
->output_section
== bfd_abs_section_ptr
915 || spu_elf_section_data (sym_sec
->output_section
) == NULL
)
920 /* Ensure no stubs for user supplied overlay manager syms. */
921 if (h
== htab
->ovly_entry
[0] || h
== htab
->ovly_entry
[1])
924 /* setjmp always goes via an overlay stub, because then the return
925 and hence the longjmp goes via __ovly_return. That magically
926 makes setjmp/longjmp between overlays work. */
927 if (strncmp (h
->root
.root
.string
, "setjmp", 6) == 0
928 && (h
->root
.root
.string
[6] == '\0' || h
->root
.root
.string
[6] == '@'))
935 sym_type
= ELF_ST_TYPE (sym
->st_info
);
937 r_type
= ELF32_R_TYPE (irela
->r_info
);
941 if (r_type
== R_SPU_REL16
|| r_type
== R_SPU_ADDR16
)
943 if (contents
== NULL
)
946 if (!bfd_get_section_contents (input_section
->owner
,
953 contents
+= irela
->r_offset
;
955 branch
= is_branch (contents
);
956 hint
= is_hint (contents
);
959 call
= (contents
[0] & 0xfd) == 0x31;
961 && sym_type
!= STT_FUNC
964 /* It's common for people to write assembly and forget
965 to give function symbols the right type. Handle
966 calls to such symbols, but warn so that (hopefully)
967 people will fix their code. We need the symbol
968 type to be correct to distinguish function pointer
969 initialisation from other pointer initialisations. */
970 const char *sym_name
;
973 sym_name
= h
->root
.root
.string
;
976 Elf_Internal_Shdr
*symtab_hdr
;
977 symtab_hdr
= &elf_tdata (input_section
->owner
)->symtab_hdr
;
978 sym_name
= bfd_elf_sym_name (input_section
->owner
,
983 (*_bfd_error_handler
) (_("warning: call to non-function"
984 " symbol %s defined in %B"),
985 sym_sec
->owner
, sym_name
);
991 if ((!branch
&& htab
->params
->ovly_flavour
== ovly_soft_icache
)
992 || (sym_type
!= STT_FUNC
994 && (sym_sec
->flags
& SEC_CODE
) == 0))
997 /* Usually, symbols in non-overlay sections don't need stubs. */
998 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
== 0
999 && !htab
->params
->non_overlay_stubs
)
1002 /* A reference from some other section to a symbol in an overlay
1003 section needs a stub. */
1004 if (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
1005 != spu_elf_section_data (input_section
->output_section
)->u
.o
.ovl_index
)
1007 if (call
|| sym_type
== STT_FUNC
)
1008 ret
= call_ovl_stub
;
1011 ret
= br000_ovl_stub
;
1015 unsigned int lrlive
= (contents
[1] & 0x70) >> 4;
1021 /* If this insn isn't a branch then we are possibly taking the
1022 address of a function and passing it out somehow. Soft-icache code
1023 always generates inline code to do indirect branches. */
1024 if (!(branch
|| hint
)
1025 && sym_type
== STT_FUNC
1026 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
1033 count_stub (struct spu_link_hash_table
*htab
,
1036 enum _stub_type stub_type
,
1037 struct elf_link_hash_entry
*h
,
1038 const Elf_Internal_Rela
*irela
)
1040 unsigned int ovl
= 0;
1041 struct got_entry
*g
, **head
;
1044 /* If this instruction is a branch or call, we need a stub
1045 for it. One stub per function per overlay.
1046 If it isn't a branch, then we are taking the address of
1047 this function so need a stub in the non-overlay area
1048 for it. One stub per function. */
1049 if (stub_type
!= nonovl_stub
)
1050 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1053 head
= &h
->got
.glist
;
1056 if (elf_local_got_ents (ibfd
) == NULL
)
1058 bfd_size_type amt
= (elf_tdata (ibfd
)->symtab_hdr
.sh_info
1059 * sizeof (*elf_local_got_ents (ibfd
)));
1060 elf_local_got_ents (ibfd
) = bfd_zmalloc (amt
);
1061 if (elf_local_got_ents (ibfd
) == NULL
)
1064 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1067 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1069 htab
->stub_count
[ovl
] += 1;
1075 addend
= irela
->r_addend
;
1079 struct got_entry
*gnext
;
1081 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1082 if (g
->addend
== addend
&& g
->ovl
== 0)
1087 /* Need a new non-overlay area stub. Zap other stubs. */
1088 for (g
= *head
; g
!= NULL
; g
= gnext
)
1091 if (g
->addend
== addend
)
1093 htab
->stub_count
[g
->ovl
] -= 1;
1101 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1102 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1108 g
= bfd_malloc (sizeof *g
);
1113 g
->stub_addr
= (bfd_vma
) -1;
1117 htab
->stub_count
[ovl
] += 1;
1123 /* Support two sizes of overlay stubs, a slower more compact stub of two
1124 intructions, and a faster stub of four instructions.
1125 Soft-icache stubs are four or eight words. */
1128 ovl_stub_size (struct spu_elf_params
*params
)
1130 return 16 << params
->ovly_flavour
>> params
->compact_stub
;
1134 ovl_stub_size_log2 (struct spu_elf_params
*params
)
1136 return 4 + params
->ovly_flavour
- params
->compact_stub
;
1139 /* Two instruction overlay stubs look like:
1141 brsl $75,__ovly_load
1142 .word target_ovl_and_address
1144 ovl_and_address is a word with the overlay number in the top 14 bits
1145 and local store address in the bottom 18 bits.
1147 Four instruction overlay stubs look like:
1151 ila $79,target_address
1154 Software icache stubs are:
1158 .word lrlive_branchlocalstoreaddr;
1159 brasl $75,__icache_br_handler
1164 build_stub (struct bfd_link_info
*info
,
1167 enum _stub_type stub_type
,
1168 struct elf_link_hash_entry
*h
,
1169 const Elf_Internal_Rela
*irela
,
1173 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1174 unsigned int ovl
, dest_ovl
, set_id
;
1175 struct got_entry
*g
, **head
;
1177 bfd_vma addend
, from
, to
, br_dest
, patt
;
1178 unsigned int lrlive
;
1181 if (stub_type
!= nonovl_stub
)
1182 ovl
= spu_elf_section_data (isec
->output_section
)->u
.o
.ovl_index
;
1185 head
= &h
->got
.glist
;
1187 head
= elf_local_got_ents (ibfd
) + ELF32_R_SYM (irela
->r_info
);
1191 addend
= irela
->r_addend
;
1193 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1195 g
= bfd_malloc (sizeof *g
);
1201 g
->br_addr
= (irela
->r_offset
1202 + isec
->output_offset
1203 + isec
->output_section
->vma
);
1209 for (g
= *head
; g
!= NULL
; g
= g
->next
)
1210 if (g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
1215 if (g
->ovl
== 0 && ovl
!= 0)
1218 if (g
->stub_addr
!= (bfd_vma
) -1)
1222 sec
= htab
->stub_sec
[ovl
];
1223 dest
+= dest_sec
->output_offset
+ dest_sec
->output_section
->vma
;
1224 from
= sec
->size
+ sec
->output_offset
+ sec
->output_section
->vma
;
1225 g
->stub_addr
= from
;
1226 to
= (htab
->ovly_entry
[0]->root
.u
.def
.value
1227 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_offset
1228 + htab
->ovly_entry
[0]->root
.u
.def
.section
->output_section
->vma
);
1230 if (((dest
| to
| from
) & 3) != 0)
1235 dest_ovl
= spu_elf_section_data (dest_sec
->output_section
)->u
.o
.ovl_index
;
1237 if (htab
->params
->ovly_flavour
== ovly_normal
1238 && !htab
->params
->compact_stub
)
1240 bfd_put_32 (sec
->owner
, ILA
+ ((dest_ovl
<< 7) & 0x01ffff80) + 78,
1241 sec
->contents
+ sec
->size
);
1242 bfd_put_32 (sec
->owner
, LNOP
,
1243 sec
->contents
+ sec
->size
+ 4);
1244 bfd_put_32 (sec
->owner
, ILA
+ ((dest
<< 7) & 0x01ffff80) + 79,
1245 sec
->contents
+ sec
->size
+ 8);
1247 bfd_put_32 (sec
->owner
, BR
+ (((to
- (from
+ 12)) << 5) & 0x007fff80),
1248 sec
->contents
+ sec
->size
+ 12);
1250 bfd_put_32 (sec
->owner
, BRA
+ ((to
<< 5) & 0x007fff80),
1251 sec
->contents
+ sec
->size
+ 12);
1253 else if (htab
->params
->ovly_flavour
== ovly_normal
1254 && htab
->params
->compact_stub
)
1257 bfd_put_32 (sec
->owner
, BRSL
+ (((to
- from
) << 5) & 0x007fff80) + 75,
1258 sec
->contents
+ sec
->size
);
1260 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1261 sec
->contents
+ sec
->size
);
1262 bfd_put_32 (sec
->owner
, (dest
& 0x3ffff) | (dest_ovl
<< 18),
1263 sec
->contents
+ sec
->size
+ 4);
1265 else if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1268 if (stub_type
== nonovl_stub
)
1270 else if (stub_type
== call_ovl_stub
)
1271 /* A brsl makes lr live and *(*sp+16) is live.
1272 Tail calls have the same liveness. */
1274 else if (!htab
->params
->lrlive_analysis
)
1275 /* Assume stack frame and lr save. */
1277 else if (irela
!= NULL
)
1279 /* Analyse branch instructions. */
1280 struct function_info
*caller
;
1283 caller
= find_function (isec
, irela
->r_offset
, info
);
1284 if (caller
->start
== NULL
)
1285 off
= irela
->r_offset
;
1288 struct function_info
*found
= NULL
;
1290 /* Find the earliest piece of this function that
1291 has frame adjusting instructions. We might
1292 see dynamic frame adjustment (eg. for alloca)
1293 in some later piece, but functions using
1294 alloca always set up a frame earlier. Frame
1295 setup instructions are always in one piece. */
1296 if (caller
->lr_store
!= (bfd_vma
) -1
1297 || caller
->sp_adjust
!= (bfd_vma
) -1)
1299 while (caller
->start
!= NULL
)
1301 caller
= caller
->start
;
1302 if (caller
->lr_store
!= (bfd_vma
) -1
1303 || caller
->sp_adjust
!= (bfd_vma
) -1)
1311 if (off
> caller
->sp_adjust
)
1313 if (off
> caller
->lr_store
)
1314 /* Only *(*sp+16) is live. */
1317 /* If no lr save, then we must be in a
1318 leaf function with a frame.
1319 lr is still live. */
1322 else if (off
> caller
->lr_store
)
1324 /* Between lr save and stack adjust. */
1326 /* This should never happen since prologues won't
1331 /* On entry to function. */
1334 if (stub_type
!= br000_ovl_stub
1335 && lrlive
!= stub_type
- br000_ovl_stub
)
1336 info
->callbacks
->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1337 "from analysis (%u)\n"),
1338 isec
, irela
->r_offset
, lrlive
,
1339 stub_type
- br000_ovl_stub
);
1342 /* If given lrlive info via .brinfo, use it. */
1343 if (stub_type
> br000_ovl_stub
)
1344 lrlive
= stub_type
- br000_ovl_stub
;
1347 to
= (htab
->ovly_entry
[1]->root
.u
.def
.value
1348 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_offset
1349 + htab
->ovly_entry
[1]->root
.u
.def
.section
->output_section
->vma
);
1351 if (!htab
->params
->compact_stub
)
1353 /* The branch that uses this stub goes to stub_addr + 12. We'll
1354 set up an xor pattern that can be used by the icache manager
1355 to modify this branch to go directly to its destination. */
1357 br_dest
= g
->stub_addr
;
1360 /* Except in the case of _SPUEAR_ stubs, the branch in
1361 question is the one in the stub itself. */
1362 BFD_ASSERT (stub_type
== nonovl_stub
);
1363 g
->br_addr
= g
->stub_addr
;
1367 bfd_put_32 (sec
->owner
, dest_ovl
- 1,
1368 sec
->contents
+ sec
->size
+ 0);
1369 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1370 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1371 sec
->contents
+ sec
->size
+ 4);
1372 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1373 sec
->contents
+ sec
->size
+ 8);
1374 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1375 sec
->contents
+ sec
->size
+ 12);
1376 patt
= dest
^ br_dest
;
1377 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1378 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1379 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1380 sec
->contents
+ sec
->size
+ 16 + (g
->br_addr
& 0xf));
1385 br_dest
= g
->stub_addr
;
1388 BFD_ASSERT (stub_type
== nonovl_stub
);
1389 g
->br_addr
= g
->stub_addr
;
1393 set_id
= ((dest_ovl
- 1) >> htab
->num_lines_log2
) + 1;
1394 bfd_put_32 (sec
->owner
, (set_id
<< 18) | (dest
& 0x3ffff),
1395 sec
->contents
+ sec
->size
);
1396 bfd_put_32 (sec
->owner
, BRASL
+ ((to
<< 5) & 0x007fff80) + 75,
1397 sec
->contents
+ sec
->size
+ 4);
1398 bfd_put_32 (sec
->owner
, (lrlive
<< 29) | (g
->br_addr
& 0x3ffff),
1399 sec
->contents
+ sec
->size
+ 8);
1400 patt
= dest
^ br_dest
;
1401 if (irela
!= NULL
&& ELF32_R_TYPE (irela
->r_info
) == R_SPU_REL16
)
1402 patt
= (dest
- g
->br_addr
) ^ (br_dest
- g
->br_addr
);
1403 bfd_put_32 (sec
->owner
, (patt
<< 5) & 0x007fff80,
1404 sec
->contents
+ sec
->size
+ 12);
1408 /* Extra space for linked list entries. */
1414 sec
->size
+= ovl_stub_size (htab
->params
);
1416 if (htab
->params
->emit_stub_syms
)
1422 len
= 8 + sizeof (".ovl_call.") - 1;
1424 len
+= strlen (h
->root
.root
.string
);
1429 add
= (int) irela
->r_addend
& 0xffffffff;
1432 name
= bfd_malloc (len
);
1436 sprintf (name
, "%08x.ovl_call.", g
->ovl
);
1438 strcpy (name
+ 8 + sizeof (".ovl_call.") - 1, h
->root
.root
.string
);
1440 sprintf (name
+ 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1441 dest_sec
->id
& 0xffffffff,
1442 (int) ELF32_R_SYM (irela
->r_info
) & 0xffffffff);
1444 sprintf (name
+ len
- 9, "+%x", add
);
1446 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
1450 if (h
->root
.type
== bfd_link_hash_new
)
1452 h
->root
.type
= bfd_link_hash_defined
;
1453 h
->root
.u
.def
.section
= sec
;
1454 h
->size
= ovl_stub_size (htab
->params
);
1455 h
->root
.u
.def
.value
= sec
->size
- h
->size
;
1459 h
->ref_regular_nonweak
= 1;
1460 h
->forced_local
= 1;
1468 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1472 allocate_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1474 /* Symbols starting with _SPUEAR_ need a stub because they may be
1475 invoked by the PPU. */
1476 struct bfd_link_info
*info
= inf
;
1477 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1480 if ((h
->root
.type
== bfd_link_hash_defined
1481 || h
->root
.type
== bfd_link_hash_defweak
)
1483 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1484 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1485 && sym_sec
->output_section
!= bfd_abs_section_ptr
1486 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1487 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1488 || htab
->params
->non_overlay_stubs
))
1490 return count_stub (htab
, NULL
, NULL
, nonovl_stub
, h
, NULL
);
1497 build_spuear_stubs (struct elf_link_hash_entry
*h
, void *inf
)
1499 /* Symbols starting with _SPUEAR_ need a stub because they may be
1500 invoked by the PPU. */
1501 struct bfd_link_info
*info
= inf
;
1502 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1505 if ((h
->root
.type
== bfd_link_hash_defined
1506 || h
->root
.type
== bfd_link_hash_defweak
)
1508 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0
1509 && (sym_sec
= h
->root
.u
.def
.section
) != NULL
1510 && sym_sec
->output_section
!= bfd_abs_section_ptr
1511 && spu_elf_section_data (sym_sec
->output_section
) != NULL
1512 && (spu_elf_section_data (sym_sec
->output_section
)->u
.o
.ovl_index
!= 0
1513 || htab
->params
->non_overlay_stubs
))
1515 return build_stub (info
, NULL
, NULL
, nonovl_stub
, h
, NULL
,
1516 h
->root
.u
.def
.value
, sym_sec
);
1522 /* Size or build stubs. */
1525 process_stubs (struct bfd_link_info
*info
, bfd_boolean build
)
1527 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1530 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
1532 extern const bfd_target bfd_elf32_spu_vec
;
1533 Elf_Internal_Shdr
*symtab_hdr
;
1535 Elf_Internal_Sym
*local_syms
= NULL
;
1537 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
1540 /* We'll need the symbol table in a second. */
1541 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
1542 if (symtab_hdr
->sh_info
== 0)
1545 /* Walk over each section attached to the input bfd. */
1546 for (isec
= ibfd
->sections
; isec
!= NULL
; isec
= isec
->next
)
1548 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
1550 /* If there aren't any relocs, then there's nothing more to do. */
1551 if ((isec
->flags
& SEC_RELOC
) == 0
1552 || isec
->reloc_count
== 0)
1555 if (!maybe_needs_stubs (isec
))
1558 /* Get the relocs. */
1559 internal_relocs
= _bfd_elf_link_read_relocs (ibfd
, isec
, NULL
, NULL
,
1561 if (internal_relocs
== NULL
)
1562 goto error_ret_free_local
;
1564 /* Now examine each relocation. */
1565 irela
= internal_relocs
;
1566 irelaend
= irela
+ isec
->reloc_count
;
1567 for (; irela
< irelaend
; irela
++)
1569 enum elf_spu_reloc_type r_type
;
1570 unsigned int r_indx
;
1572 Elf_Internal_Sym
*sym
;
1573 struct elf_link_hash_entry
*h
;
1574 enum _stub_type stub_type
;
1576 r_type
= ELF32_R_TYPE (irela
->r_info
);
1577 r_indx
= ELF32_R_SYM (irela
->r_info
);
1579 if (r_type
>= R_SPU_max
)
1581 bfd_set_error (bfd_error_bad_value
);
1582 error_ret_free_internal
:
1583 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1584 free (internal_relocs
);
1585 error_ret_free_local
:
1586 if (local_syms
!= NULL
1587 && (symtab_hdr
->contents
1588 != (unsigned char *) local_syms
))
1593 /* Determine the reloc target section. */
1594 if (!get_sym_h (&h
, &sym
, &sym_sec
, &local_syms
, r_indx
, ibfd
))
1595 goto error_ret_free_internal
;
1597 stub_type
= needs_ovl_stub (h
, sym
, sym_sec
, isec
, irela
,
1599 if (stub_type
== no_stub
)
1601 else if (stub_type
== stub_error
)
1602 goto error_ret_free_internal
;
1604 if (htab
->stub_count
== NULL
)
1607 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_count
);
1608 htab
->stub_count
= bfd_zmalloc (amt
);
1609 if (htab
->stub_count
== NULL
)
1610 goto error_ret_free_internal
;
1615 if (!count_stub (htab
, ibfd
, isec
, stub_type
, h
, irela
))
1616 goto error_ret_free_internal
;
1623 dest
= h
->root
.u
.def
.value
;
1625 dest
= sym
->st_value
;
1626 dest
+= irela
->r_addend
;
1627 if (!build_stub (info
, ibfd
, isec
, stub_type
, h
, irela
,
1629 goto error_ret_free_internal
;
1633 /* We're done with the internal relocs, free them. */
1634 if (elf_section_data (isec
)->relocs
!= internal_relocs
)
1635 free (internal_relocs
);
1638 if (local_syms
!= NULL
1639 && symtab_hdr
->contents
!= (unsigned char *) local_syms
)
1641 if (!info
->keep_memory
)
1644 symtab_hdr
->contents
= (unsigned char *) local_syms
;
1651 /* Allocate space for overlay call and return stubs.
1652 Return 0 on error, 1 if no stubs, 2 otherwise. */
1655 spu_elf_size_stubs (struct bfd_link_info
*info
)
1657 struct spu_link_hash_table
*htab
;
1665 if (!process_stubs (info
, FALSE
))
1668 htab
= spu_hash_table (info
);
1669 elf_link_hash_traverse (&htab
->elf
, allocate_spuear_stubs
, info
);
1673 if (htab
->stub_count
== NULL
)
1676 ibfd
= info
->input_bfds
;
1677 amt
= (htab
->num_overlays
+ 1) * sizeof (*htab
->stub_sec
);
1678 htab
->stub_sec
= bfd_zmalloc (amt
);
1679 if (htab
->stub_sec
== NULL
)
1682 flags
= (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_READONLY
1683 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1684 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1685 htab
->stub_sec
[0] = stub
;
1687 || !bfd_set_section_alignment (ibfd
, stub
,
1688 ovl_stub_size_log2 (htab
->params
)))
1690 stub
->size
= htab
->stub_count
[0] * ovl_stub_size (htab
->params
);
1691 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1692 /* Extra space for linked list entries. */
1693 stub
->size
+= htab
->stub_count
[0] * 16;
1694 (*htab
->params
->place_spu_section
) (stub
, NULL
, ".text");
1696 for (i
= 0; i
< htab
->num_overlays
; ++i
)
1698 asection
*osec
= htab
->ovl_sec
[i
];
1699 unsigned int ovl
= spu_elf_section_data (osec
)->u
.o
.ovl_index
;
1700 stub
= bfd_make_section_anyway_with_flags (ibfd
, ".stub", flags
);
1701 htab
->stub_sec
[ovl
] = stub
;
1703 || !bfd_set_section_alignment (ibfd
, stub
,
1704 ovl_stub_size_log2 (htab
->params
)))
1706 stub
->size
= htab
->stub_count
[ovl
] * ovl_stub_size (htab
->params
);
1707 (*htab
->params
->place_spu_section
) (stub
, osec
, NULL
);
1710 flags
= (SEC_ALLOC
| SEC_LOAD
1711 | SEC_HAS_CONTENTS
| SEC_IN_MEMORY
);
1712 htab
->ovtab
= bfd_make_section_anyway_with_flags (ibfd
, ".ovtab", flags
);
1713 if (htab
->ovtab
== NULL
1714 || !bfd_set_section_alignment (ibfd
, htab
->ovtab
, 4))
1717 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1719 /* Space for icache manager tables.
1720 a) Tag array, one quadword per cache line.
1721 b) Linked list elements, max_branch per line quadwords. */
1722 htab
->ovtab
->size
= 16 * ((1 + htab
->params
->max_branch
)
1723 << htab
->num_lines_log2
);
1725 htab
->init
= bfd_make_section_anyway_with_flags (ibfd
, ".ovini", flags
);
1726 if (htab
->init
== NULL
1727 || !bfd_set_section_alignment (ibfd
, htab
->init
, 4))
1730 htab
->init
->size
= 16;
1731 (*htab
->params
->place_spu_section
) (htab
->init
, NULL
, ".ovl.init");
1735 /* htab->ovtab consists of two arrays.
1745 . } _ovly_buf_table[];
1748 htab
->ovtab
->size
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
1751 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1752 ovout
= ".data.icache";
1753 (*htab
->params
->place_spu_section
) (htab
->ovtab
, NULL
, ovout
);
1755 htab
->toe
= bfd_make_section_anyway_with_flags (ibfd
, ".toe", SEC_ALLOC
);
1756 if (htab
->toe
== NULL
1757 || !bfd_set_section_alignment (ibfd
, htab
->toe
, 4))
1759 htab
->toe
->size
= htab
->params
->ovly_flavour
== ovly_soft_icache
? 256 : 16;
1760 (*htab
->params
->place_spu_section
) (htab
->toe
, NULL
, ".toe");
1765 /* Functions to handle embedded spu_ovl.o object. */
1768 ovl_mgr_open (struct bfd
*nbfd ATTRIBUTE_UNUSED
, void *stream
)
1774 ovl_mgr_pread (struct bfd
*abfd ATTRIBUTE_UNUSED
,
1780 struct _ovl_stream
*os
;
1784 os
= (struct _ovl_stream
*) stream
;
1785 max
= (const char *) os
->end
- (const char *) os
->start
;
1787 if ((ufile_ptr
) offset
>= max
)
1791 if (count
> max
- offset
)
1792 count
= max
- offset
;
1794 memcpy (buf
, (const char *) os
->start
+ offset
, count
);
1799 spu_elf_open_builtin_lib (bfd
**ovl_bfd
, const struct _ovl_stream
*stream
)
1801 *ovl_bfd
= bfd_openr_iovec ("builtin ovl_mgr",
1808 return *ovl_bfd
!= NULL
;
1812 overlay_index (asection
*sec
)
1815 || sec
->output_section
== bfd_abs_section_ptr
)
1817 return spu_elf_section_data (sec
->output_section
)->u
.o
.ovl_index
;
1820 /* Define an STT_OBJECT symbol. */
1822 static struct elf_link_hash_entry
*
1823 define_ovtab_symbol (struct spu_link_hash_table
*htab
, const char *name
)
1825 struct elf_link_hash_entry
*h
;
1827 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, FALSE
, FALSE
);
1831 if (h
->root
.type
!= bfd_link_hash_defined
1834 h
->root
.type
= bfd_link_hash_defined
;
1835 h
->root
.u
.def
.section
= htab
->ovtab
;
1836 h
->type
= STT_OBJECT
;
1839 h
->ref_regular_nonweak
= 1;
1842 else if (h
->root
.u
.def
.section
->owner
!= NULL
)
1844 (*_bfd_error_handler
) (_("%B is not allowed to define %s"),
1845 h
->root
.u
.def
.section
->owner
,
1846 h
->root
.root
.string
);
1847 bfd_set_error (bfd_error_bad_value
);
1852 (*_bfd_error_handler
) (_("you are not allowed to define %s in a script"),
1853 h
->root
.root
.string
);
1854 bfd_set_error (bfd_error_bad_value
);
1861 /* Fill in all stubs and the overlay tables. */
1864 spu_elf_build_stubs (struct bfd_link_info
*info
)
1866 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
1867 struct elf_link_hash_entry
*h
;
1873 if (htab
->stub_count
== NULL
)
1876 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1877 if (htab
->stub_sec
[i
]->size
!= 0)
1879 htab
->stub_sec
[i
]->contents
= bfd_zalloc (htab
->stub_sec
[i
]->owner
,
1880 htab
->stub_sec
[i
]->size
);
1881 if (htab
->stub_sec
[i
]->contents
== NULL
)
1883 htab
->stub_sec
[i
]->rawsize
= htab
->stub_sec
[i
]->size
;
1884 htab
->stub_sec
[i
]->size
= 0;
1887 for (i
= 0; i
< 2; i
++)
1889 h
= htab
->ovly_entry
[i
];
1890 BFD_ASSERT (h
!= NULL
);
1892 if ((h
->root
.type
== bfd_link_hash_defined
1893 || h
->root
.type
== bfd_link_hash_defweak
)
1896 s
= h
->root
.u
.def
.section
->output_section
;
1897 if (spu_elf_section_data (s
)->u
.o
.ovl_index
)
1899 (*_bfd_error_handler
) (_("%s in overlay section"),
1900 h
->root
.root
.string
);
1901 bfd_set_error (bfd_error_bad_value
);
1909 /* Fill in all the stubs. */
1910 process_stubs (info
, TRUE
);
1911 if (!htab
->stub_err
)
1912 elf_link_hash_traverse (&htab
->elf
, build_spuear_stubs
, info
);
1916 (*_bfd_error_handler
) (_("overlay stub relocation overflow"));
1917 bfd_set_error (bfd_error_bad_value
);
1921 for (i
= 0; i
<= htab
->num_overlays
; i
++)
1923 if (htab
->stub_sec
[i
]->size
!= htab
->stub_sec
[i
]->rawsize
)
1925 (*_bfd_error_handler
) (_("stubs don't match calculated size"));
1926 bfd_set_error (bfd_error_bad_value
);
1929 htab
->stub_sec
[i
]->rawsize
= 0;
1932 if (htab
->ovtab
== NULL
|| htab
->ovtab
->size
== 0)
1935 htab
->ovtab
->contents
= bfd_zalloc (htab
->ovtab
->owner
, htab
->ovtab
->size
);
1936 if (htab
->ovtab
->contents
== NULL
)
1939 p
= htab
->ovtab
->contents
;
1940 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
1942 bfd_vma off
, icache_base
, linklist
;
1944 h
= define_ovtab_symbol (htab
, "__icache_tag_array");
1947 h
->root
.u
.def
.value
= 0;
1948 h
->size
= 16 << htab
->num_lines_log2
;
1951 h
= define_ovtab_symbol (htab
, "__icache_tag_array_size");
1954 h
->root
.u
.def
.value
= 16 << htab
->num_lines_log2
;
1955 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1957 icache_base
= htab
->ovl_sec
[0]->vma
;
1958 linklist
= (htab
->ovtab
->output_section
->vma
1959 + htab
->ovtab
->output_offset
1961 for (i
= 0; i
< htab
->params
->num_lines
; i
++)
1963 bfd_vma line_end
= icache_base
+ ((i
+ 1) << htab
->line_size_log2
);
1964 bfd_vma stub_base
= line_end
- htab
->params
->max_branch
* 32;
1965 bfd_vma link_elem
= linklist
+ i
* htab
->params
->max_branch
* 16;
1966 bfd_vma locator
= link_elem
- stub_base
/ 2;
1968 bfd_put_32 (htab
->ovtab
->owner
, locator
, p
+ 4);
1969 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 8);
1970 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 10);
1971 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 12);
1972 bfd_put_16 (htab
->ovtab
->owner
, link_elem
, p
+ 14);
1976 h
= define_ovtab_symbol (htab
, "__icache_linked_list");
1979 h
->root
.u
.def
.value
= off
;
1980 h
->size
= htab
->params
->max_branch
<< (htab
->num_lines_log2
+ 4);
1984 h
= define_ovtab_symbol (htab
, "__icache_base");
1987 h
->root
.u
.def
.value
= htab
->ovl_sec
[0]->vma
;
1988 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1989 h
->size
= htab
->num_buf
<< htab
->line_size_log2
;
1991 h
= define_ovtab_symbol (htab
, "__icache_neg_log2_linesize");
1994 h
->root
.u
.def
.value
= -htab
->line_size_log2
;
1995 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
1997 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
1999 htab
->init
->contents
= bfd_zalloc (htab
->init
->owner
,
2001 if (htab
->init
->contents
== NULL
)
2004 h
= define_ovtab_symbol (htab
, "__icache_fileoff");
2007 h
->root
.u
.def
.value
= 0;
2008 h
->root
.u
.def
.section
= htab
->init
;
2014 /* Write out _ovly_table. */
2015 /* set low bit of .size to mark non-overlay area as present. */
2017 obfd
= htab
->ovtab
->output_section
->owner
;
2018 for (s
= obfd
->sections
; s
!= NULL
; s
= s
->next
)
2020 unsigned int ovl_index
= spu_elf_section_data (s
)->u
.o
.ovl_index
;
2024 unsigned long off
= ovl_index
* 16;
2025 unsigned int ovl_buf
= spu_elf_section_data (s
)->u
.o
.ovl_buf
;
2027 bfd_put_32 (htab
->ovtab
->owner
, s
->vma
, p
+ off
);
2028 bfd_put_32 (htab
->ovtab
->owner
, (s
->size
+ 15) & -16,
2030 /* file_off written later in spu_elf_modify_program_headers. */
2031 bfd_put_32 (htab
->ovtab
->owner
, ovl_buf
, p
+ off
+ 12);
2035 h
= define_ovtab_symbol (htab
, "_ovly_table");
2038 h
->root
.u
.def
.value
= 16;
2039 h
->size
= htab
->num_overlays
* 16;
2041 h
= define_ovtab_symbol (htab
, "_ovly_table_end");
2044 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2047 h
= define_ovtab_symbol (htab
, "_ovly_buf_table");
2050 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16;
2051 h
->size
= htab
->num_buf
* 4;
2053 h
= define_ovtab_symbol (htab
, "_ovly_buf_table_end");
2056 h
->root
.u
.def
.value
= htab
->num_overlays
* 16 + 16 + htab
->num_buf
* 4;
2060 h
= define_ovtab_symbol (htab
, "_EAR_");
2063 h
->root
.u
.def
.section
= htab
->toe
;
2064 h
->root
.u
.def
.value
= 0;
2065 h
->size
= htab
->params
->ovly_flavour
== ovly_soft_icache
? 16 * 16 : 16;
2070 /* Check that all loadable section VMAs lie in the range
2071 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2074 spu_elf_check_vma (struct bfd_link_info
*info
)
2076 struct elf_segment_map
*m
;
2078 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
2079 bfd
*abfd
= info
->output_bfd
;
2080 bfd_vma hi
= htab
->params
->local_store_hi
;
2081 bfd_vma lo
= htab
->params
->local_store_lo
;
2083 htab
->local_store
= hi
+ 1 - lo
;
2085 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
2086 if (m
->p_type
== PT_LOAD
)
2087 for (i
= 0; i
< m
->count
; i
++)
2088 if (m
->sections
[i
]->size
!= 0
2089 && (m
->sections
[i
]->vma
< lo
2090 || m
->sections
[i
]->vma
> hi
2091 || m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
))
2092 return m
->sections
[i
];
2097 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2098 Search for stack adjusting insns, and return the sp delta.
2099 If a store of lr is found save the instruction offset to *LR_STORE.
2100 If a stack adjusting instruction is found, save that offset to
2104 find_function_stack_adjust (asection
*sec
,
2111 memset (reg
, 0, sizeof (reg
));
2112 for ( ; offset
+ 4 <= sec
->size
; offset
+= 4)
2114 unsigned char buf
[4];
2118 /* Assume no relocs on stack adjusing insns. */
2119 if (!bfd_get_section_contents (sec
->owner
, sec
, buf
, offset
, 4))
2123 ra
= ((buf
[2] & 0x3f) << 1) | (buf
[3] >> 7);
2125 if (buf
[0] == 0x24 /* stqd */)
2127 if (rt
== 0 /* lr */ && ra
== 1 /* sp */)
2132 /* Partly decoded immediate field. */
2133 imm
= (buf
[1] << 9) | (buf
[2] << 1) | (buf
[3] >> 7);
2135 if (buf
[0] == 0x1c /* ai */)
2138 imm
= (imm
^ 0x200) - 0x200;
2139 reg
[rt
] = reg
[ra
] + imm
;
2141 if (rt
== 1 /* sp */)
2145 *sp_adjust
= offset
;
2149 else if (buf
[0] == 0x18 && (buf
[1] & 0xe0) == 0 /* a */)
2151 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2153 reg
[rt
] = reg
[ra
] + reg
[rb
];
2158 *sp_adjust
= offset
;
2162 else if (buf
[0] == 0x08 && (buf
[1] & 0xe0) == 0 /* sf */)
2164 int rb
= ((buf
[1] & 0x1f) << 2) | ((buf
[2] & 0xc0) >> 6);
2166 reg
[rt
] = reg
[rb
] - reg
[ra
];
2171 *sp_adjust
= offset
;
2175 else if ((buf
[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2177 if (buf
[0] >= 0x42 /* ila */)
2178 imm
|= (buf
[0] & 1) << 17;
2183 if (buf
[0] == 0x40 /* il */)
2185 if ((buf
[1] & 0x80) == 0)
2187 imm
= (imm
^ 0x8000) - 0x8000;
2189 else if ((buf
[1] & 0x80) == 0 /* ilhu */)
2195 else if (buf
[0] == 0x60 && (buf
[1] & 0x80) != 0 /* iohl */)
2197 reg
[rt
] |= imm
& 0xffff;
2200 else if (buf
[0] == 0x04 /* ori */)
2203 imm
= (imm
^ 0x200) - 0x200;
2204 reg
[rt
] = reg
[ra
] | imm
;
2207 else if (buf
[0] == 0x32 && (buf
[1] & 0x80) != 0 /* fsmbi */)
2209 reg
[rt
] = ( ((imm
& 0x8000) ? 0xff000000 : 0)
2210 | ((imm
& 0x4000) ? 0x00ff0000 : 0)
2211 | ((imm
& 0x2000) ? 0x0000ff00 : 0)
2212 | ((imm
& 0x1000) ? 0x000000ff : 0));
2215 else if (buf
[0] == 0x16 /* andbi */)
2221 reg
[rt
] = reg
[ra
] & imm
;
2224 else if (buf
[0] == 0x33 && imm
== 1 /* brsl .+4 */)
2226 /* Used in pic reg load. Say rt is trashed. Won't be used
2227 in stack adjust, but we need to continue past this branch. */
2231 else if (is_branch (buf
) || is_indirect_branch (buf
))
2232 /* If we hit a branch then we must be out of the prologue. */
2239 /* qsort predicate to sort symbols by section and value. */
2241 static Elf_Internal_Sym
*sort_syms_syms
;
2242 static asection
**sort_syms_psecs
;
2245 sort_syms (const void *a
, const void *b
)
2247 Elf_Internal_Sym
*const *s1
= a
;
2248 Elf_Internal_Sym
*const *s2
= b
;
2249 asection
*sec1
,*sec2
;
2250 bfd_signed_vma delta
;
2252 sec1
= sort_syms_psecs
[*s1
- sort_syms_syms
];
2253 sec2
= sort_syms_psecs
[*s2
- sort_syms_syms
];
2256 return sec1
->index
- sec2
->index
;
2258 delta
= (*s1
)->st_value
- (*s2
)->st_value
;
2260 return delta
< 0 ? -1 : 1;
2262 delta
= (*s2
)->st_size
- (*s1
)->st_size
;
2264 return delta
< 0 ? -1 : 1;
2266 return *s1
< *s2
? -1 : 1;
2269 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2270 entries for section SEC. */
2272 static struct spu_elf_stack_info
*
2273 alloc_stack_info (asection
*sec
, int max_fun
)
2275 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2278 amt
= sizeof (struct spu_elf_stack_info
);
2279 amt
+= (max_fun
- 1) * sizeof (struct function_info
);
2280 sec_data
->u
.i
.stack_info
= bfd_zmalloc (amt
);
2281 if (sec_data
->u
.i
.stack_info
!= NULL
)
2282 sec_data
->u
.i
.stack_info
->max_fun
= max_fun
;
2283 return sec_data
->u
.i
.stack_info
;
2286 /* Add a new struct function_info describing a (part of a) function
2287 starting at SYM_H. Keep the array sorted by address. */
2289 static struct function_info
*
2290 maybe_insert_function (asection
*sec
,
2293 bfd_boolean is_func
)
2295 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2296 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2302 sinfo
= alloc_stack_info (sec
, 20);
2309 Elf_Internal_Sym
*sym
= sym_h
;
2310 off
= sym
->st_value
;
2311 size
= sym
->st_size
;
2315 struct elf_link_hash_entry
*h
= sym_h
;
2316 off
= h
->root
.u
.def
.value
;
2320 for (i
= sinfo
->num_fun
; --i
>= 0; )
2321 if (sinfo
->fun
[i
].lo
<= off
)
2326 /* Don't add another entry for an alias, but do update some
2328 if (sinfo
->fun
[i
].lo
== off
)
2330 /* Prefer globals over local syms. */
2331 if (global
&& !sinfo
->fun
[i
].global
)
2333 sinfo
->fun
[i
].global
= TRUE
;
2334 sinfo
->fun
[i
].u
.h
= sym_h
;
2337 sinfo
->fun
[i
].is_func
= TRUE
;
2338 return &sinfo
->fun
[i
];
2340 /* Ignore a zero-size symbol inside an existing function. */
2341 else if (sinfo
->fun
[i
].hi
> off
&& size
== 0)
2342 return &sinfo
->fun
[i
];
2345 if (sinfo
->num_fun
>= sinfo
->max_fun
)
2347 bfd_size_type amt
= sizeof (struct spu_elf_stack_info
);
2348 bfd_size_type old
= amt
;
2350 old
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2351 sinfo
->max_fun
+= 20 + (sinfo
->max_fun
>> 1);
2352 amt
+= (sinfo
->max_fun
- 1) * sizeof (struct function_info
);
2353 sinfo
= bfd_realloc (sinfo
, amt
);
2356 memset ((char *) sinfo
+ old
, 0, amt
- old
);
2357 sec_data
->u
.i
.stack_info
= sinfo
;
2360 if (++i
< sinfo
->num_fun
)
2361 memmove (&sinfo
->fun
[i
+ 1], &sinfo
->fun
[i
],
2362 (sinfo
->num_fun
- i
) * sizeof (sinfo
->fun
[i
]));
2363 sinfo
->fun
[i
].is_func
= is_func
;
2364 sinfo
->fun
[i
].global
= global
;
2365 sinfo
->fun
[i
].sec
= sec
;
2367 sinfo
->fun
[i
].u
.h
= sym_h
;
2369 sinfo
->fun
[i
].u
.sym
= sym_h
;
2370 sinfo
->fun
[i
].lo
= off
;
2371 sinfo
->fun
[i
].hi
= off
+ size
;
2372 sinfo
->fun
[i
].lr_store
= -1;
2373 sinfo
->fun
[i
].sp_adjust
= -1;
2374 sinfo
->fun
[i
].stack
= -find_function_stack_adjust (sec
, off
,
2375 &sinfo
->fun
[i
].lr_store
,
2376 &sinfo
->fun
[i
].sp_adjust
);
2377 sinfo
->num_fun
+= 1;
2378 return &sinfo
->fun
[i
];
2381 /* Return the name of FUN. */
2384 func_name (struct function_info
*fun
)
2388 Elf_Internal_Shdr
*symtab_hdr
;
2390 while (fun
->start
!= NULL
)
2394 return fun
->u
.h
->root
.root
.string
;
2397 if (fun
->u
.sym
->st_name
== 0)
2399 size_t len
= strlen (sec
->name
);
2400 char *name
= bfd_malloc (len
+ 10);
2403 sprintf (name
, "%s+%lx", sec
->name
,
2404 (unsigned long) fun
->u
.sym
->st_value
& 0xffffffff);
2408 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2409 return bfd_elf_sym_name (ibfd
, symtab_hdr
, fun
->u
.sym
, sec
);
2412 /* Read the instruction at OFF in SEC. Return true iff the instruction
2413 is a nop, lnop, or stop 0 (all zero insn). */
2416 is_nop (asection
*sec
, bfd_vma off
)
2418 unsigned char insn
[4];
2420 if (off
+ 4 > sec
->size
2421 || !bfd_get_section_contents (sec
->owner
, sec
, insn
, off
, 4))
2423 if ((insn
[0] & 0xbf) == 0 && (insn
[1] & 0xe0) == 0x20)
2425 if (insn
[0] == 0 && insn
[1] == 0 && insn
[2] == 0 && insn
[3] == 0)
2430 /* Extend the range of FUN to cover nop padding up to LIMIT.
2431 Return TRUE iff some instruction other than a NOP was found. */
2434 insns_at_end (struct function_info
*fun
, bfd_vma limit
)
2436 bfd_vma off
= (fun
->hi
+ 3) & -4;
2438 while (off
< limit
&& is_nop (fun
->sec
, off
))
2449 /* Check and fix overlapping function ranges. Return TRUE iff there
2450 are gaps in the current info we have about functions in SEC. */
2453 check_function_ranges (asection
*sec
, struct bfd_link_info
*info
)
2455 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2456 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2458 bfd_boolean gaps
= FALSE
;
2463 for (i
= 1; i
< sinfo
->num_fun
; i
++)
2464 if (sinfo
->fun
[i
- 1].hi
> sinfo
->fun
[i
].lo
)
2466 /* Fix overlapping symbols. */
2467 const char *f1
= func_name (&sinfo
->fun
[i
- 1]);
2468 const char *f2
= func_name (&sinfo
->fun
[i
]);
2470 info
->callbacks
->einfo (_("warning: %s overlaps %s\n"), f1
, f2
);
2471 sinfo
->fun
[i
- 1].hi
= sinfo
->fun
[i
].lo
;
2473 else if (insns_at_end (&sinfo
->fun
[i
- 1], sinfo
->fun
[i
].lo
))
2476 if (sinfo
->num_fun
== 0)
2480 if (sinfo
->fun
[0].lo
!= 0)
2482 if (sinfo
->fun
[sinfo
->num_fun
- 1].hi
> sec
->size
)
2484 const char *f1
= func_name (&sinfo
->fun
[sinfo
->num_fun
- 1]);
2486 info
->callbacks
->einfo (_("warning: %s exceeds section size\n"), f1
);
2487 sinfo
->fun
[sinfo
->num_fun
- 1].hi
= sec
->size
;
2489 else if (insns_at_end (&sinfo
->fun
[sinfo
->num_fun
- 1], sec
->size
))
2495 /* Search current function info for a function that contains address
2496 OFFSET in section SEC. */
2498 static struct function_info
*
2499 find_function (asection
*sec
, bfd_vma offset
, struct bfd_link_info
*info
)
2501 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
2502 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
2506 hi
= sinfo
->num_fun
;
2509 mid
= (lo
+ hi
) / 2;
2510 if (offset
< sinfo
->fun
[mid
].lo
)
2512 else if (offset
>= sinfo
->fun
[mid
].hi
)
2515 return &sinfo
->fun
[mid
];
2517 info
->callbacks
->einfo (_("%A:0x%v not found in function table\n"),
2519 bfd_set_error (bfd_error_bad_value
);
2523 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2524 if CALLEE was new. If this function return FALSE, CALLEE should
2528 insert_callee (struct function_info
*caller
, struct call_info
*callee
)
2530 struct call_info
**pp
, *p
;
2532 for (pp
= &caller
->call_list
; (p
= *pp
) != NULL
; pp
= &p
->next
)
2533 if (p
->fun
== callee
->fun
)
2535 /* Tail calls use less stack than normal calls. Retain entry
2536 for normal call over one for tail call. */
2537 p
->is_tail
&= callee
->is_tail
;
2540 p
->fun
->start
= NULL
;
2541 p
->fun
->is_func
= TRUE
;
2544 /* Reorder list so most recent call is first. */
2546 p
->next
= caller
->call_list
;
2547 caller
->call_list
= p
;
2550 callee
->next
= caller
->call_list
;
2552 caller
->call_list
= callee
;
2556 /* Copy CALL and insert the copy into CALLER. */
2559 copy_callee (struct function_info
*caller
, const struct call_info
*call
)
2561 struct call_info
*callee
;
2562 callee
= bfd_malloc (sizeof (*callee
));
2566 if (!insert_callee (caller
, callee
))
2571 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2572 overlay stub sections. */
2575 interesting_section (asection
*s
)
2577 return (s
->output_section
!= bfd_abs_section_ptr
2578 && ((s
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
| SEC_IN_MEMORY
))
2579 == (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2583 /* Rummage through the relocs for SEC, looking for function calls.
2584 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2585 mark destination symbols on calls as being functions. Also
2586 look at branches, which may be tail calls or go to hot/cold
2587 section part of same function. */
2590 mark_functions_via_relocs (asection
*sec
,
2591 struct bfd_link_info
*info
,
2594 Elf_Internal_Rela
*internal_relocs
, *irelaend
, *irela
;
2595 Elf_Internal_Shdr
*symtab_hdr
;
2597 unsigned int priority
= 0;
2598 static bfd_boolean warned
;
2600 if (!interesting_section (sec
)
2601 || sec
->reloc_count
== 0)
2604 internal_relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
2606 if (internal_relocs
== NULL
)
2609 symtab_hdr
= &elf_tdata (sec
->owner
)->symtab_hdr
;
2610 psyms
= &symtab_hdr
->contents
;
2611 irela
= internal_relocs
;
2612 irelaend
= irela
+ sec
->reloc_count
;
2613 for (; irela
< irelaend
; irela
++)
2615 enum elf_spu_reloc_type r_type
;
2616 unsigned int r_indx
;
2618 Elf_Internal_Sym
*sym
;
2619 struct elf_link_hash_entry
*h
;
2621 bfd_boolean reject
, is_call
;
2622 struct function_info
*caller
;
2623 struct call_info
*callee
;
2626 r_type
= ELF32_R_TYPE (irela
->r_info
);
2627 if (r_type
!= R_SPU_REL16
2628 && r_type
!= R_SPU_ADDR16
)
2631 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
))
2635 r_indx
= ELF32_R_SYM (irela
->r_info
);
2636 if (!get_sym_h (&h
, &sym
, &sym_sec
, psyms
, r_indx
, sec
->owner
))
2640 || sym_sec
->output_section
== bfd_abs_section_ptr
)
2646 unsigned char insn
[4];
2648 if (!bfd_get_section_contents (sec
->owner
, sec
, insn
,
2649 irela
->r_offset
, 4))
2651 if (is_branch (insn
))
2653 is_call
= (insn
[0] & 0xfd) == 0x31;
2654 priority
= insn
[1] & 0x0f;
2656 priority
|= insn
[2];
2658 priority
|= insn
[3];
2660 if ((sym_sec
->flags
& (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2661 != (SEC_ALLOC
| SEC_LOAD
| SEC_CODE
))
2664 info
->callbacks
->einfo
2665 (_("%B(%A+0x%v): call to non-code section"
2666 " %B(%A), analysis incomplete\n"),
2667 sec
->owner
, sec
, irela
->r_offset
,
2668 sym_sec
->owner
, sym_sec
);
2676 if (!(call_tree
&& spu_hash_table (info
)->params
->auto_overlay
)
2684 /* For --auto-overlay, count possible stubs we need for
2685 function pointer references. */
2686 unsigned int sym_type
;
2690 sym_type
= ELF_ST_TYPE (sym
->st_info
);
2691 if (sym_type
== STT_FUNC
)
2692 spu_hash_table (info
)->non_ovly_stub
+= 1;
2697 val
= h
->root
.u
.def
.value
;
2699 val
= sym
->st_value
;
2700 val
+= irela
->r_addend
;
2704 struct function_info
*fun
;
2706 if (irela
->r_addend
!= 0)
2708 Elf_Internal_Sym
*fake
= bfd_zmalloc (sizeof (*fake
));
2711 fake
->st_value
= val
;
2713 = _bfd_elf_section_from_bfd_section (sym_sec
->owner
, sym_sec
);
2717 fun
= maybe_insert_function (sym_sec
, sym
, FALSE
, is_call
);
2719 fun
= maybe_insert_function (sym_sec
, h
, TRUE
, is_call
);
2722 if (irela
->r_addend
!= 0
2723 && fun
->u
.sym
!= sym
)
2728 caller
= find_function (sec
, irela
->r_offset
, info
);
2731 callee
= bfd_malloc (sizeof *callee
);
2735 callee
->fun
= find_function (sym_sec
, val
, info
);
2736 if (callee
->fun
== NULL
)
2738 callee
->is_tail
= !is_call
;
2739 callee
->is_pasted
= FALSE
;
2740 callee
->priority
= priority
;
2742 if (callee
->fun
->last_caller
!= sec
)
2744 callee
->fun
->last_caller
= sec
;
2745 callee
->fun
->call_count
+= 1;
2747 if (!insert_callee (caller
, callee
))
2750 && !callee
->fun
->is_func
2751 && callee
->fun
->stack
== 0)
2753 /* This is either a tail call or a branch from one part of
2754 the function to another, ie. hot/cold section. If the
2755 destination has been called by some other function then
2756 it is a separate function. We also assume that functions
2757 are not split across input files. */
2758 if (sec
->owner
!= sym_sec
->owner
)
2760 callee
->fun
->start
= NULL
;
2761 callee
->fun
->is_func
= TRUE
;
2763 else if (callee
->fun
->start
== NULL
)
2764 callee
->fun
->start
= caller
;
2767 struct function_info
*callee_start
;
2768 struct function_info
*caller_start
;
2769 callee_start
= callee
->fun
;
2770 while (callee_start
->start
)
2771 callee_start
= callee_start
->start
;
2772 caller_start
= caller
;
2773 while (caller_start
->start
)
2774 caller_start
= caller_start
->start
;
2775 if (caller_start
!= callee_start
)
2777 callee
->fun
->start
= NULL
;
2778 callee
->fun
->is_func
= TRUE
;
2787 /* Handle something like .init or .fini, which has a piece of a function.
2788 These sections are pasted together to form a single function. */
2791 pasted_function (asection
*sec
)
2793 struct bfd_link_order
*l
;
2794 struct _spu_elf_section_data
*sec_data
;
2795 struct spu_elf_stack_info
*sinfo
;
2796 Elf_Internal_Sym
*fake
;
2797 struct function_info
*fun
, *fun_start
;
2799 fake
= bfd_zmalloc (sizeof (*fake
));
2803 fake
->st_size
= sec
->size
;
2805 = _bfd_elf_section_from_bfd_section (sec
->owner
, sec
);
2806 fun
= maybe_insert_function (sec
, fake
, FALSE
, FALSE
);
2810 /* Find a function immediately preceding this section. */
2812 for (l
= sec
->output_section
->map_head
.link_order
; l
!= NULL
; l
= l
->next
)
2814 if (l
->u
.indirect
.section
== sec
)
2816 if (fun_start
!= NULL
)
2818 struct call_info
*callee
= bfd_malloc (sizeof *callee
);
2822 fun
->start
= fun_start
;
2824 callee
->is_tail
= TRUE
;
2825 callee
->is_pasted
= TRUE
;
2827 if (!insert_callee (fun_start
, callee
))
2833 if (l
->type
== bfd_indirect_link_order
2834 && (sec_data
= spu_elf_section_data (l
->u
.indirect
.section
)) != NULL
2835 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
2836 && sinfo
->num_fun
!= 0)
2837 fun_start
= &sinfo
->fun
[sinfo
->num_fun
- 1];
2840 /* Don't return an error if we did not find a function preceding this
2841 section. The section may have incorrect flags. */
2845 /* Map address ranges in code sections to functions. */
2848 discover_functions (struct bfd_link_info
*info
)
2852 Elf_Internal_Sym
***psym_arr
;
2853 asection
***sec_arr
;
2854 bfd_boolean gaps
= FALSE
;
2857 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
2860 psym_arr
= bfd_zmalloc (bfd_idx
* sizeof (*psym_arr
));
2861 if (psym_arr
== NULL
)
2863 sec_arr
= bfd_zmalloc (bfd_idx
* sizeof (*sec_arr
));
2864 if (sec_arr
== NULL
)
2867 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2869 ibfd
= ibfd
->link_next
, bfd_idx
++)
2871 extern const bfd_target bfd_elf32_spu_vec
;
2872 Elf_Internal_Shdr
*symtab_hdr
;
2875 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2876 asection
**psecs
, **p
;
2878 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
2881 /* Read all the symbols. */
2882 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
2883 symcount
= symtab_hdr
->sh_size
/ symtab_hdr
->sh_entsize
;
2887 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2888 if (interesting_section (sec
))
2896 if (symtab_hdr
->contents
!= NULL
)
2898 /* Don't use cached symbols since the generic ELF linker
2899 code only reads local symbols, and we need globals too. */
2900 free (symtab_hdr
->contents
);
2901 symtab_hdr
->contents
= NULL
;
2903 syms
= bfd_elf_get_elf_syms (ibfd
, symtab_hdr
, symcount
, 0,
2905 symtab_hdr
->contents
= (void *) syms
;
2909 /* Select defined function symbols that are going to be output. */
2910 psyms
= bfd_malloc ((symcount
+ 1) * sizeof (*psyms
));
2913 psym_arr
[bfd_idx
] = psyms
;
2914 psecs
= bfd_malloc (symcount
* sizeof (*psecs
));
2917 sec_arr
[bfd_idx
] = psecs
;
2918 for (psy
= psyms
, p
= psecs
, sy
= syms
; sy
< syms
+ symcount
; ++p
, ++sy
)
2919 if (ELF_ST_TYPE (sy
->st_info
) == STT_NOTYPE
2920 || ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2924 *p
= s
= bfd_section_from_elf_index (ibfd
, sy
->st_shndx
);
2925 if (s
!= NULL
&& interesting_section (s
))
2928 symcount
= psy
- psyms
;
2931 /* Sort them by section and offset within section. */
2932 sort_syms_syms
= syms
;
2933 sort_syms_psecs
= psecs
;
2934 qsort (psyms
, symcount
, sizeof (*psyms
), sort_syms
);
2936 /* Now inspect the function symbols. */
2937 for (psy
= psyms
; psy
< psyms
+ symcount
; )
2939 asection
*s
= psecs
[*psy
- syms
];
2940 Elf_Internal_Sym
**psy2
;
2942 for (psy2
= psy
; ++psy2
< psyms
+ symcount
; )
2943 if (psecs
[*psy2
- syms
] != s
)
2946 if (!alloc_stack_info (s
, psy2
- psy
))
2951 /* First install info about properly typed and sized functions.
2952 In an ideal world this will cover all code sections, except
2953 when partitioning functions into hot and cold sections,
2954 and the horrible pasted together .init and .fini functions. */
2955 for (psy
= psyms
; psy
< psyms
+ symcount
; ++psy
)
2958 if (ELF_ST_TYPE (sy
->st_info
) == STT_FUNC
)
2960 asection
*s
= psecs
[sy
- syms
];
2961 if (!maybe_insert_function (s
, sy
, FALSE
, TRUE
))
2966 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
2967 if (interesting_section (sec
))
2968 gaps
|= check_function_ranges (sec
, info
);
2973 /* See if we can discover more function symbols by looking at
2975 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2977 ibfd
= ibfd
->link_next
, bfd_idx
++)
2981 if (psym_arr
[bfd_idx
] == NULL
)
2984 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
2985 if (!mark_functions_via_relocs (sec
, info
, FALSE
))
2989 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
2991 ibfd
= ibfd
->link_next
, bfd_idx
++)
2993 Elf_Internal_Shdr
*symtab_hdr
;
2995 Elf_Internal_Sym
*syms
, *sy
, **psyms
, **psy
;
2998 if ((psyms
= psym_arr
[bfd_idx
]) == NULL
)
3001 psecs
= sec_arr
[bfd_idx
];
3003 symtab_hdr
= &elf_tdata (ibfd
)->symtab_hdr
;
3004 syms
= (Elf_Internal_Sym
*) symtab_hdr
->contents
;
3007 for (sec
= ibfd
->sections
; sec
!= NULL
&& !gaps
; sec
= sec
->next
)
3008 if (interesting_section (sec
))
3009 gaps
|= check_function_ranges (sec
, info
);
3013 /* Finally, install all globals. */
3014 for (psy
= psyms
; (sy
= *psy
) != NULL
; ++psy
)
3018 s
= psecs
[sy
- syms
];
3020 /* Global syms might be improperly typed functions. */
3021 if (ELF_ST_TYPE (sy
->st_info
) != STT_FUNC
3022 && ELF_ST_BIND (sy
->st_info
) == STB_GLOBAL
)
3024 if (!maybe_insert_function (s
, sy
, FALSE
, FALSE
))
3030 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3032 extern const bfd_target bfd_elf32_spu_vec
;
3035 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3038 /* Some of the symbols we've installed as marking the
3039 beginning of functions may have a size of zero. Extend
3040 the range of such functions to the beginning of the
3041 next symbol of interest. */
3042 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3043 if (interesting_section (sec
))
3045 struct _spu_elf_section_data
*sec_data
;
3046 struct spu_elf_stack_info
*sinfo
;
3048 sec_data
= spu_elf_section_data (sec
);
3049 sinfo
= sec_data
->u
.i
.stack_info
;
3050 if (sinfo
!= NULL
&& sinfo
->num_fun
!= 0)
3053 bfd_vma hi
= sec
->size
;
3055 for (fun_idx
= sinfo
->num_fun
; --fun_idx
>= 0; )
3057 sinfo
->fun
[fun_idx
].hi
= hi
;
3058 hi
= sinfo
->fun
[fun_idx
].lo
;
3061 sinfo
->fun
[0].lo
= 0;
3063 /* No symbols in this section. Must be .init or .fini
3064 or something similar. */
3065 else if (!pasted_function (sec
))
3071 for (ibfd
= info
->input_bfds
, bfd_idx
= 0;
3073 ibfd
= ibfd
->link_next
, bfd_idx
++)
3075 if (psym_arr
[bfd_idx
] == NULL
)
3078 free (psym_arr
[bfd_idx
]);
3079 free (sec_arr
[bfd_idx
]);
3088 /* Iterate over all function_info we have collected, calling DOIT on
3089 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3093 for_each_node (bfd_boolean (*doit
) (struct function_info
*,
3094 struct bfd_link_info
*,
3096 struct bfd_link_info
*info
,
3102 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3104 extern const bfd_target bfd_elf32_spu_vec
;
3107 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3110 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3112 struct _spu_elf_section_data
*sec_data
;
3113 struct spu_elf_stack_info
*sinfo
;
3115 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3116 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3119 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3120 if (!root_only
|| !sinfo
->fun
[i
].non_root
)
3121 if (!doit (&sinfo
->fun
[i
], info
, param
))
3129 /* Transfer call info attached to struct function_info entries for
3130 all of a given function's sections to the first entry. */
3133 transfer_calls (struct function_info
*fun
,
3134 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3135 void *param ATTRIBUTE_UNUSED
)
3137 struct function_info
*start
= fun
->start
;
3141 struct call_info
*call
, *call_next
;
3143 while (start
->start
!= NULL
)
3144 start
= start
->start
;
3145 for (call
= fun
->call_list
; call
!= NULL
; call
= call_next
)
3147 call_next
= call
->next
;
3148 if (!insert_callee (start
, call
))
3151 fun
->call_list
= NULL
;
3156 /* Mark nodes in the call graph that are called by some other node. */
3159 mark_non_root (struct function_info
*fun
,
3160 struct bfd_link_info
*info ATTRIBUTE_UNUSED
,
3161 void *param ATTRIBUTE_UNUSED
)
3163 struct call_info
*call
;
3168 for (call
= fun
->call_list
; call
; call
= call
->next
)
3170 call
->fun
->non_root
= TRUE
;
3171 mark_non_root (call
->fun
, 0, 0);
3176 /* Remove cycles from the call graph. Set depth of nodes. */
3179 remove_cycles (struct function_info
*fun
,
3180 struct bfd_link_info
*info
,
3183 struct call_info
**callp
, *call
;
3184 unsigned int depth
= *(unsigned int *) param
;
3185 unsigned int max_depth
= depth
;
3189 fun
->marking
= TRUE
;
3191 callp
= &fun
->call_list
;
3192 while ((call
= *callp
) != NULL
)
3194 call
->max_depth
= depth
+ !call
->is_pasted
;
3195 if (!call
->fun
->visit2
)
3197 if (!remove_cycles (call
->fun
, info
, &call
->max_depth
))
3199 if (max_depth
< call
->max_depth
)
3200 max_depth
= call
->max_depth
;
3202 else if (call
->fun
->marking
)
3204 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3206 if (!htab
->params
->auto_overlay
3207 && htab
->params
->stack_analysis
)
3209 const char *f1
= func_name (fun
);
3210 const char *f2
= func_name (call
->fun
);
3212 info
->callbacks
->info (_("Stack analysis will ignore the call "
3216 *callp
= call
->next
;
3220 callp
= &call
->next
;
3222 fun
->marking
= FALSE
;
3223 *(unsigned int *) param
= max_depth
;
3227 /* Check that we actually visited all nodes in remove_cycles. If we
3228 didn't, then there is some cycle in the call graph not attached to
3229 any root node. Arbitrarily choose a node in the cycle as a new
3230 root and break the cycle. */
3233 mark_detached_root (struct function_info
*fun
,
3234 struct bfd_link_info
*info
,
3239 fun
->non_root
= FALSE
;
3240 *(unsigned int *) param
= 0;
3241 return remove_cycles (fun
, info
, param
);
3244 /* Populate call_list for each function. */
3247 build_call_tree (struct bfd_link_info
*info
)
3252 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3254 extern const bfd_target bfd_elf32_spu_vec
;
3257 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3260 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3261 if (!mark_functions_via_relocs (sec
, info
, TRUE
))
3265 /* Transfer call info from hot/cold section part of function
3267 if (!spu_hash_table (info
)->params
->auto_overlay
3268 && !for_each_node (transfer_calls
, info
, 0, FALSE
))
3271 /* Find the call graph root(s). */
3272 if (!for_each_node (mark_non_root
, info
, 0, FALSE
))
3275 /* Remove cycles from the call graph. We start from the root node(s)
3276 so that we break cycles in a reasonable place. */
3278 if (!for_each_node (remove_cycles
, info
, &depth
, TRUE
))
3281 return for_each_node (mark_detached_root
, info
, &depth
, FALSE
);
3284 /* qsort predicate to sort calls by priority, max_depth then count. */
3287 sort_calls (const void *a
, const void *b
)
3289 struct call_info
*const *c1
= a
;
3290 struct call_info
*const *c2
= b
;
3293 delta
= (*c2
)->priority
- (*c1
)->priority
;
3297 delta
= (*c2
)->max_depth
- (*c1
)->max_depth
;
3301 delta
= (*c2
)->count
- (*c1
)->count
;
3305 return (char *) c1
- (char *) c2
;
3309 unsigned int max_overlay_size
;
3312 /* Set linker_mark and gc_mark on any sections that we will put in
3313 overlays. These flags are used by the generic ELF linker, but we
3314 won't be continuing on to bfd_elf_final_link so it is OK to use
3315 them. linker_mark is clear before we get here. Set segment_mark
3316 on sections that are part of a pasted function (excluding the last
3319 Set up function rodata section if --overlay-rodata. We don't
3320 currently include merged string constant rodata sections since
3322 Sort the call graph so that the deepest nodes will be visited
3326 mark_overlay_section (struct function_info
*fun
,
3327 struct bfd_link_info
*info
,
3330 struct call_info
*call
;
3332 struct _mos_param
*mos_param
= param
;
3333 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
3339 if (!fun
->sec
->linker_mark
3340 && (htab
->params
->ovly_flavour
!= ovly_soft_icache
3341 || htab
->params
->non_ia_text
3342 || strncmp (fun
->sec
->name
, ".text.ia.", 9) == 0))
3346 fun
->sec
->linker_mark
= 1;
3347 fun
->sec
->gc_mark
= 1;
3348 fun
->sec
->segment_mark
= 0;
3349 /* Ensure SEC_CODE is set on this text section (it ought to
3350 be!), and SEC_CODE is clear on rodata sections. We use
3351 this flag to differentiate the two overlay section types. */
3352 fun
->sec
->flags
|= SEC_CODE
;
3354 size
= fun
->sec
->size
;
3355 if (htab
->params
->auto_overlay
& OVERLAY_RODATA
)
3359 /* Find the rodata section corresponding to this function's
3361 if (strcmp (fun
->sec
->name
, ".text") == 0)
3363 name
= bfd_malloc (sizeof (".rodata"));
3366 memcpy (name
, ".rodata", sizeof (".rodata"));
3368 else if (strncmp (fun
->sec
->name
, ".text.", 6) == 0)
3370 size_t len
= strlen (fun
->sec
->name
);
3371 name
= bfd_malloc (len
+ 3);
3374 memcpy (name
, ".rodata", sizeof (".rodata"));
3375 memcpy (name
+ 7, fun
->sec
->name
+ 5, len
- 4);
3377 else if (strncmp (fun
->sec
->name
, ".gnu.linkonce.t.", 16) == 0)
3379 size_t len
= strlen (fun
->sec
->name
) + 1;
3380 name
= bfd_malloc (len
);
3383 memcpy (name
, fun
->sec
->name
, len
);
3389 asection
*rodata
= NULL
;
3390 asection
*group_sec
= elf_section_data (fun
->sec
)->next_in_group
;
3391 if (group_sec
== NULL
)
3392 rodata
= bfd_get_section_by_name (fun
->sec
->owner
, name
);
3394 while (group_sec
!= NULL
&& group_sec
!= fun
->sec
)
3396 if (strcmp (group_sec
->name
, name
) == 0)
3401 group_sec
= elf_section_data (group_sec
)->next_in_group
;
3403 fun
->rodata
= rodata
;
3406 size
+= fun
->rodata
->size
;
3407 if (htab
->params
->line_size
!= 0
3408 && size
> htab
->params
->line_size
)
3410 size
-= fun
->rodata
->size
;
3415 fun
->rodata
->linker_mark
= 1;
3416 fun
->rodata
->gc_mark
= 1;
3417 fun
->rodata
->flags
&= ~SEC_CODE
;
3423 if (mos_param
->max_overlay_size
< size
)
3424 mos_param
->max_overlay_size
= size
;
3427 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3432 struct call_info
**calls
= bfd_malloc (count
* sizeof (*calls
));
3436 for (count
= 0, call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3437 calls
[count
++] = call
;
3439 qsort (calls
, count
, sizeof (*calls
), sort_calls
);
3441 fun
->call_list
= NULL
;
3445 calls
[count
]->next
= fun
->call_list
;
3446 fun
->call_list
= calls
[count
];
3451 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3453 if (call
->is_pasted
)
3455 /* There can only be one is_pasted call per function_info. */
3456 BFD_ASSERT (!fun
->sec
->segment_mark
);
3457 fun
->sec
->segment_mark
= 1;
3459 if (!mark_overlay_section (call
->fun
, info
, param
))
3463 /* Don't put entry code into an overlay. The overlay manager needs
3464 a stack! Also, don't mark .ovl.init as an overlay. */
3465 if (fun
->lo
+ fun
->sec
->output_offset
+ fun
->sec
->output_section
->vma
3466 == info
->output_bfd
->start_address
3467 || strncmp (fun
->sec
->output_section
->name
, ".ovl.init", 9) == 0)
3469 fun
->sec
->linker_mark
= 0;
3470 if (fun
->rodata
!= NULL
)
3471 fun
->rodata
->linker_mark
= 0;
3476 /* If non-zero then unmark functions called from those within sections
3477 that we need to unmark. Unfortunately this isn't reliable since the
3478 call graph cannot know the destination of function pointer calls. */
3479 #define RECURSE_UNMARK 0
3482 asection
*exclude_input_section
;
3483 asection
*exclude_output_section
;
3484 unsigned long clearing
;
3487 /* Undo some of mark_overlay_section's work. */
3490 unmark_overlay_section (struct function_info
*fun
,
3491 struct bfd_link_info
*info
,
3494 struct call_info
*call
;
3495 struct _uos_param
*uos_param
= param
;
3496 unsigned int excluded
= 0;
3504 if (fun
->sec
== uos_param
->exclude_input_section
3505 || fun
->sec
->output_section
== uos_param
->exclude_output_section
)
3509 uos_param
->clearing
+= excluded
;
3511 if (RECURSE_UNMARK
? uos_param
->clearing
: excluded
)
3513 fun
->sec
->linker_mark
= 0;
3515 fun
->rodata
->linker_mark
= 0;
3518 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3519 if (!unmark_overlay_section (call
->fun
, info
, param
))
3523 uos_param
->clearing
-= excluded
;
3528 unsigned int lib_size
;
3529 asection
**lib_sections
;
3532 /* Add sections we have marked as belonging to overlays to an array
3533 for consideration as non-overlay sections. The array consist of
3534 pairs of sections, (text,rodata), for functions in the call graph. */
3537 collect_lib_sections (struct function_info
*fun
,
3538 struct bfd_link_info
*info
,
3541 struct _cl_param
*lib_param
= param
;
3542 struct call_info
*call
;
3549 if (!fun
->sec
->linker_mark
|| !fun
->sec
->gc_mark
|| fun
->sec
->segment_mark
)
3552 size
= fun
->sec
->size
;
3554 size
+= fun
->rodata
->size
;
3556 if (size
<= lib_param
->lib_size
)
3558 *lib_param
->lib_sections
++ = fun
->sec
;
3559 fun
->sec
->gc_mark
= 0;
3560 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3562 *lib_param
->lib_sections
++ = fun
->rodata
;
3563 fun
->rodata
->gc_mark
= 0;
3566 *lib_param
->lib_sections
++ = NULL
;
3569 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3570 collect_lib_sections (call
->fun
, info
, param
);
3575 /* qsort predicate to sort sections by call count. */
3578 sort_lib (const void *a
, const void *b
)
3580 asection
*const *s1
= a
;
3581 asection
*const *s2
= b
;
3582 struct _spu_elf_section_data
*sec_data
;
3583 struct spu_elf_stack_info
*sinfo
;
3587 if ((sec_data
= spu_elf_section_data (*s1
)) != NULL
3588 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3591 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3592 delta
-= sinfo
->fun
[i
].call_count
;
3595 if ((sec_data
= spu_elf_section_data (*s2
)) != NULL
3596 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3599 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3600 delta
+= sinfo
->fun
[i
].call_count
;
3609 /* Remove some sections from those marked to be in overlays. Choose
3610 those that are called from many places, likely library functions. */
3613 auto_ovl_lib_functions (struct bfd_link_info
*info
, unsigned int lib_size
)
3616 asection
**lib_sections
;
3617 unsigned int i
, lib_count
;
3618 struct _cl_param collect_lib_param
;
3619 struct function_info dummy_caller
;
3620 struct spu_link_hash_table
*htab
;
3622 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
3624 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
3626 extern const bfd_target bfd_elf32_spu_vec
;
3629 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
3632 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
3633 if (sec
->linker_mark
3634 && sec
->size
< lib_size
3635 && (sec
->flags
& SEC_CODE
) != 0)
3638 lib_sections
= bfd_malloc (lib_count
* 2 * sizeof (*lib_sections
));
3639 if (lib_sections
== NULL
)
3640 return (unsigned int) -1;
3641 collect_lib_param
.lib_size
= lib_size
;
3642 collect_lib_param
.lib_sections
= lib_sections
;
3643 if (!for_each_node (collect_lib_sections
, info
, &collect_lib_param
,
3645 return (unsigned int) -1;
3646 lib_count
= (collect_lib_param
.lib_sections
- lib_sections
) / 2;
3648 /* Sort sections so that those with the most calls are first. */
3650 qsort (lib_sections
, lib_count
, 2 * sizeof (*lib_sections
), sort_lib
);
3652 htab
= spu_hash_table (info
);
3653 for (i
= 0; i
< lib_count
; i
++)
3655 unsigned int tmp
, stub_size
;
3657 struct _spu_elf_section_data
*sec_data
;
3658 struct spu_elf_stack_info
*sinfo
;
3660 sec
= lib_sections
[2 * i
];
3661 /* If this section is OK, its size must be less than lib_size. */
3663 /* If it has a rodata section, then add that too. */
3664 if (lib_sections
[2 * i
+ 1])
3665 tmp
+= lib_sections
[2 * i
+ 1]->size
;
3666 /* Add any new overlay call stubs needed by the section. */
3669 && (sec_data
= spu_elf_section_data (sec
)) != NULL
3670 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3673 struct call_info
*call
;
3675 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3676 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
3677 if (call
->fun
->sec
->linker_mark
)
3679 struct call_info
*p
;
3680 for (p
= dummy_caller
.call_list
; p
; p
= p
->next
)
3681 if (p
->fun
== call
->fun
)
3684 stub_size
+= ovl_stub_size (htab
->params
);
3687 if (tmp
+ stub_size
< lib_size
)
3689 struct call_info
**pp
, *p
;
3691 /* This section fits. Mark it as non-overlay. */
3692 lib_sections
[2 * i
]->linker_mark
= 0;
3693 if (lib_sections
[2 * i
+ 1])
3694 lib_sections
[2 * i
+ 1]->linker_mark
= 0;
3695 lib_size
-= tmp
+ stub_size
;
3696 /* Call stubs to the section we just added are no longer
3698 pp
= &dummy_caller
.call_list
;
3699 while ((p
= *pp
) != NULL
)
3700 if (!p
->fun
->sec
->linker_mark
)
3702 lib_size
+= ovl_stub_size (htab
->params
);
3708 /* Add new call stubs to dummy_caller. */
3709 if ((sec_data
= spu_elf_section_data (sec
)) != NULL
3710 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3713 struct call_info
*call
;
3715 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3716 for (call
= sinfo
->fun
[k
].call_list
;
3719 if (call
->fun
->sec
->linker_mark
)
3721 struct call_info
*callee
;
3722 callee
= bfd_malloc (sizeof (*callee
));
3724 return (unsigned int) -1;
3726 if (!insert_callee (&dummy_caller
, callee
))
3732 while (dummy_caller
.call_list
!= NULL
)
3734 struct call_info
*call
= dummy_caller
.call_list
;
3735 dummy_caller
.call_list
= call
->next
;
3738 for (i
= 0; i
< 2 * lib_count
; i
++)
3739 if (lib_sections
[i
])
3740 lib_sections
[i
]->gc_mark
= 1;
3741 free (lib_sections
);
3745 /* Build an array of overlay sections. The deepest node's section is
3746 added first, then its parent node's section, then everything called
3747 from the parent section. The idea being to group sections to
3748 minimise calls between different overlays. */
3751 collect_overlays (struct function_info
*fun
,
3752 struct bfd_link_info
*info
,
3755 struct call_info
*call
;
3756 bfd_boolean added_fun
;
3757 asection
***ovly_sections
= param
;
3763 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3764 if (!call
->is_pasted
)
3766 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3772 if (fun
->sec
->linker_mark
&& fun
->sec
->gc_mark
)
3774 fun
->sec
->gc_mark
= 0;
3775 *(*ovly_sections
)++ = fun
->sec
;
3776 if (fun
->rodata
&& fun
->rodata
->linker_mark
&& fun
->rodata
->gc_mark
)
3778 fun
->rodata
->gc_mark
= 0;
3779 *(*ovly_sections
)++ = fun
->rodata
;
3782 *(*ovly_sections
)++ = NULL
;
3785 /* Pasted sections must stay with the first section. We don't
3786 put pasted sections in the array, just the first section.
3787 Mark subsequent sections as already considered. */
3788 if (fun
->sec
->segment_mark
)
3790 struct function_info
*call_fun
= fun
;
3793 for (call
= call_fun
->call_list
; call
!= NULL
; call
= call
->next
)
3794 if (call
->is_pasted
)
3796 call_fun
= call
->fun
;
3797 call_fun
->sec
->gc_mark
= 0;
3798 if (call_fun
->rodata
)
3799 call_fun
->rodata
->gc_mark
= 0;
3805 while (call_fun
->sec
->segment_mark
);
3809 for (call
= fun
->call_list
; call
!= NULL
; call
= call
->next
)
3810 if (!collect_overlays (call
->fun
, info
, ovly_sections
))
3815 struct _spu_elf_section_data
*sec_data
;
3816 struct spu_elf_stack_info
*sinfo
;
3818 if ((sec_data
= spu_elf_section_data (fun
->sec
)) != NULL
3819 && (sinfo
= sec_data
->u
.i
.stack_info
) != NULL
)
3822 for (i
= 0; i
< sinfo
->num_fun
; ++i
)
3823 if (!collect_overlays (&sinfo
->fun
[i
], info
, ovly_sections
))
3831 struct _sum_stack_param
{
3833 size_t overall_stack
;
3834 bfd_boolean emit_stack_syms
;
3837 /* Descend the call graph for FUN, accumulating total stack required. */
3840 sum_stack (struct function_info
*fun
,
3841 struct bfd_link_info
*info
,
3844 struct call_info
*call
;
3845 struct function_info
*max
;
3846 size_t stack
, cum_stack
;
3848 bfd_boolean has_call
;
3849 struct _sum_stack_param
*sum_stack_param
= param
;
3850 struct spu_link_hash_table
*htab
;
3852 cum_stack
= fun
->stack
;
3853 sum_stack_param
->cum_stack
= cum_stack
;
3859 for (call
= fun
->call_list
; call
; call
= call
->next
)
3861 if (!call
->is_pasted
)
3863 if (!sum_stack (call
->fun
, info
, sum_stack_param
))
3865 stack
= sum_stack_param
->cum_stack
;
3866 /* Include caller stack for normal calls, don't do so for
3867 tail calls. fun->stack here is local stack usage for
3869 if (!call
->is_tail
|| call
->is_pasted
|| call
->fun
->start
!= NULL
)
3870 stack
+= fun
->stack
;
3871 if (cum_stack
< stack
)
3878 sum_stack_param
->cum_stack
= cum_stack
;
3880 /* Now fun->stack holds cumulative stack. */
3881 fun
->stack
= cum_stack
;
3885 && sum_stack_param
->overall_stack
< cum_stack
)
3886 sum_stack_param
->overall_stack
= cum_stack
;
3888 htab
= spu_hash_table (info
);
3889 if (htab
->params
->auto_overlay
)
3892 f1
= func_name (fun
);
3893 if (htab
->params
->stack_analysis
)
3896 info
->callbacks
->info (_(" %s: 0x%v\n"), f1
, (bfd_vma
) cum_stack
);
3897 info
->callbacks
->minfo (_("%s: 0x%v 0x%v\n"),
3898 f1
, (bfd_vma
) stack
, (bfd_vma
) cum_stack
);
3902 info
->callbacks
->minfo (_(" calls:\n"));
3903 for (call
= fun
->call_list
; call
; call
= call
->next
)
3904 if (!call
->is_pasted
)
3906 const char *f2
= func_name (call
->fun
);
3907 const char *ann1
= call
->fun
== max
? "*" : " ";
3908 const char *ann2
= call
->is_tail
? "t" : " ";
3910 info
->callbacks
->minfo (_(" %s%s %s\n"), ann1
, ann2
, f2
);
3915 if (sum_stack_param
->emit_stack_syms
)
3917 char *name
= bfd_malloc (18 + strlen (f1
));
3918 struct elf_link_hash_entry
*h
;
3923 if (fun
->global
|| ELF_ST_BIND (fun
->u
.sym
->st_info
) == STB_GLOBAL
)
3924 sprintf (name
, "__stack_%s", f1
);
3926 sprintf (name
, "__stack_%x_%s", fun
->sec
->id
& 0xffffffff, f1
);
3928 h
= elf_link_hash_lookup (&htab
->elf
, name
, TRUE
, TRUE
, FALSE
);
3931 && (h
->root
.type
== bfd_link_hash_new
3932 || h
->root
.type
== bfd_link_hash_undefined
3933 || h
->root
.type
== bfd_link_hash_undefweak
))
3935 h
->root
.type
= bfd_link_hash_defined
;
3936 h
->root
.u
.def
.section
= bfd_abs_section_ptr
;
3937 h
->root
.u
.def
.value
= cum_stack
;
3942 h
->ref_regular_nonweak
= 1;
3943 h
->forced_local
= 1;
3951 /* SEC is part of a pasted function. Return the call_info for the
3952 next section of this function. */
3954 static struct call_info
*
3955 find_pasted_call (asection
*sec
)
3957 struct _spu_elf_section_data
*sec_data
= spu_elf_section_data (sec
);
3958 struct spu_elf_stack_info
*sinfo
= sec_data
->u
.i
.stack_info
;
3959 struct call_info
*call
;
3962 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
3963 for (call
= sinfo
->fun
[k
].call_list
; call
!= NULL
; call
= call
->next
)
3964 if (call
->is_pasted
)
3970 /* qsort predicate to sort bfds by file name. */
3973 sort_bfds (const void *a
, const void *b
)
3975 bfd
*const *abfd1
= a
;
3976 bfd
*const *abfd2
= b
;
3978 return strcmp ((*abfd1
)->filename
, (*abfd2
)->filename
);
3982 print_one_overlay_section (FILE *script
,
3985 unsigned int ovlynum
,
3986 unsigned int *ovly_map
,
3987 asection
**ovly_sections
,
3988 struct bfd_link_info
*info
)
3992 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
3994 asection
*sec
= ovly_sections
[2 * j
];
3996 if (fprintf (script
, " %s%c%s (%s)\n",
3997 (sec
->owner
->my_archive
!= NULL
3998 ? sec
->owner
->my_archive
->filename
: ""),
3999 info
->path_separator
,
4000 sec
->owner
->filename
,
4003 if (sec
->segment_mark
)
4005 struct call_info
*call
= find_pasted_call (sec
);
4006 while (call
!= NULL
)
4008 struct function_info
*call_fun
= call
->fun
;
4009 sec
= call_fun
->sec
;
4010 if (fprintf (script
, " %s%c%s (%s)\n",
4011 (sec
->owner
->my_archive
!= NULL
4012 ? sec
->owner
->my_archive
->filename
: ""),
4013 info
->path_separator
,
4014 sec
->owner
->filename
,
4017 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4018 if (call
->is_pasted
)
4024 for (j
= base
; j
< count
&& ovly_map
[j
] == ovlynum
; j
++)
4026 asection
*sec
= ovly_sections
[2 * j
+ 1];
4028 && fprintf (script
, " %s%c%s (%s)\n",
4029 (sec
->owner
->my_archive
!= NULL
4030 ? sec
->owner
->my_archive
->filename
: ""),
4031 info
->path_separator
,
4032 sec
->owner
->filename
,
4036 sec
= ovly_sections
[2 * j
];
4037 if (sec
->segment_mark
)
4039 struct call_info
*call
= find_pasted_call (sec
);
4040 while (call
!= NULL
)
4042 struct function_info
*call_fun
= call
->fun
;
4043 sec
= call_fun
->rodata
;
4045 && fprintf (script
, " %s%c%s (%s)\n",
4046 (sec
->owner
->my_archive
!= NULL
4047 ? sec
->owner
->my_archive
->filename
: ""),
4048 info
->path_separator
,
4049 sec
->owner
->filename
,
4052 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4053 if (call
->is_pasted
)
4062 /* Handle --auto-overlay. */
4065 spu_elf_auto_overlay (struct bfd_link_info
*info
)
4069 struct elf_segment_map
*m
;
4070 unsigned int fixed_size
, lo
, hi
;
4071 struct spu_link_hash_table
*htab
;
4072 unsigned int base
, i
, count
, bfd_count
;
4073 unsigned int region
, ovlynum
;
4074 asection
**ovly_sections
, **ovly_p
;
4075 unsigned int *ovly_map
;
4077 unsigned int total_overlay_size
, overlay_size
;
4078 const char *ovly_mgr_entry
;
4079 struct elf_link_hash_entry
*h
;
4080 struct _mos_param mos_param
;
4081 struct _uos_param uos_param
;
4082 struct function_info dummy_caller
;
4084 /* Find the extents of our loadable image. */
4085 lo
= (unsigned int) -1;
4087 for (m
= elf_tdata (info
->output_bfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
4088 if (m
->p_type
== PT_LOAD
)
4089 for (i
= 0; i
< m
->count
; i
++)
4090 if (m
->sections
[i
]->size
!= 0)
4092 if (m
->sections
[i
]->vma
< lo
)
4093 lo
= m
->sections
[i
]->vma
;
4094 if (m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1 > hi
)
4095 hi
= m
->sections
[i
]->vma
+ m
->sections
[i
]->size
- 1;
4097 fixed_size
= hi
+ 1 - lo
;
4099 if (!discover_functions (info
))
4102 if (!build_call_tree (info
))
4105 htab
= spu_hash_table (info
);
4106 if (htab
->reserved
== 0)
4108 struct _sum_stack_param sum_stack_param
;
4110 sum_stack_param
.emit_stack_syms
= 0;
4111 sum_stack_param
.overall_stack
= 0;
4112 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4114 htab
->reserved
= sum_stack_param
.overall_stack
+ htab
->extra_stack_space
;
4117 /* No need for overlays if everything already fits. */
4118 if (fixed_size
+ htab
->reserved
<= htab
->local_store
4119 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
4121 htab
->params
->auto_overlay
= 0;
4125 uos_param
.exclude_input_section
= 0;
4126 uos_param
.exclude_output_section
4127 = bfd_get_section_by_name (info
->output_bfd
, ".interrupt");
4129 ovly_mgr_entry
= "__ovly_load";
4130 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4131 ovly_mgr_entry
= "__icache_br_handler";
4132 h
= elf_link_hash_lookup (&htab
->elf
, ovly_mgr_entry
,
4133 FALSE
, FALSE
, FALSE
);
4135 && (h
->root
.type
== bfd_link_hash_defined
4136 || h
->root
.type
== bfd_link_hash_defweak
)
4139 /* We have a user supplied overlay manager. */
4140 uos_param
.exclude_input_section
= h
->root
.u
.def
.section
;
4144 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4145 builtin version to .text, and will adjust .text size. */
4146 fixed_size
+= (*htab
->params
->spu_elf_load_ovl_mgr
) ();
4149 /* Mark overlay sections, and find max overlay section size. */
4150 mos_param
.max_overlay_size
= 0;
4151 if (!for_each_node (mark_overlay_section
, info
, &mos_param
, TRUE
))
4154 /* We can't put the overlay manager or interrupt routines in
4156 uos_param
.clearing
= 0;
4157 if ((uos_param
.exclude_input_section
4158 || uos_param
.exclude_output_section
)
4159 && !for_each_node (unmark_overlay_section
, info
, &uos_param
, TRUE
))
4163 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4165 bfd_arr
= bfd_malloc (bfd_count
* sizeof (*bfd_arr
));
4166 if (bfd_arr
== NULL
)
4169 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4172 total_overlay_size
= 0;
4173 for (ibfd
= info
->input_bfds
; ibfd
!= NULL
; ibfd
= ibfd
->link_next
)
4175 extern const bfd_target bfd_elf32_spu_vec
;
4177 unsigned int old_count
;
4179 if (ibfd
->xvec
!= &bfd_elf32_spu_vec
)
4183 for (sec
= ibfd
->sections
; sec
!= NULL
; sec
= sec
->next
)
4184 if (sec
->linker_mark
)
4186 if ((sec
->flags
& SEC_CODE
) != 0)
4188 fixed_size
-= sec
->size
;
4189 total_overlay_size
+= sec
->size
;
4191 else if ((sec
->flags
& (SEC_ALLOC
| SEC_LOAD
)) == (SEC_ALLOC
| SEC_LOAD
)
4192 && sec
->output_section
->owner
== info
->output_bfd
4193 && strncmp (sec
->output_section
->name
, ".ovl.init", 9) == 0)
4194 fixed_size
-= sec
->size
;
4195 if (count
!= old_count
)
4196 bfd_arr
[bfd_count
++] = ibfd
;
4199 /* Since the overlay link script selects sections by file name and
4200 section name, ensure that file names are unique. */
4203 bfd_boolean ok
= TRUE
;
4205 qsort (bfd_arr
, bfd_count
, sizeof (*bfd_arr
), sort_bfds
);
4206 for (i
= 1; i
< bfd_count
; ++i
)
4207 if (strcmp (bfd_arr
[i
- 1]->filename
, bfd_arr
[i
]->filename
) == 0)
4209 if (bfd_arr
[i
- 1]->my_archive
== bfd_arr
[i
]->my_archive
)
4211 if (bfd_arr
[i
- 1]->my_archive
&& bfd_arr
[i
]->my_archive
)
4212 info
->callbacks
->einfo (_("%s duplicated in %s\n"),
4213 bfd_arr
[i
]->filename
,
4214 bfd_arr
[i
]->my_archive
->filename
);
4216 info
->callbacks
->einfo (_("%s duplicated\n"),
4217 bfd_arr
[i
]->filename
);
4223 info
->callbacks
->einfo (_("sorry, no support for duplicate "
4224 "object files in auto-overlay script\n"));
4225 bfd_set_error (bfd_error_bad_value
);
4231 fixed_size
+= htab
->reserved
;
4232 fixed_size
+= htab
->non_ovly_stub
* ovl_stub_size (htab
->params
);
4233 if (fixed_size
+ mos_param
.max_overlay_size
<= htab
->local_store
)
4235 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4237 /* Stubs in the non-icache area are bigger. */
4238 fixed_size
+= htab
->non_ovly_stub
* 16;
4239 /* Space for icache manager tables.
4240 a) Tag array, one quadword per cache line.
4241 - word 0: ia address of present line, init to zero.
4242 - word 1: link locator. link_elem=stub_addr/2+locator
4243 - halfwords 4-7: head/tail pointers for linked lists. */
4244 fixed_size
+= 16 << htab
->num_lines_log2
;
4245 /* b) Linked list elements, max_branch per line. */
4246 fixed_size
+= htab
->params
->max_branch
<< (htab
->num_lines_log2
+ 4);
4247 /* c) Indirect branch descriptors, 8 quadwords. */
4248 fixed_size
+= 8 * 16;
4249 /* d) Pointers to __ea backing store, 16 quadwords. */
4250 fixed_size
+= 16 * 16;
4254 /* Guess number of overlays. Assuming overlay buffer is on
4255 average only half full should be conservative. */
4256 ovlynum
= (total_overlay_size
* 2 * htab
->params
->num_lines
4257 / (htab
->local_store
- fixed_size
));
4258 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4259 fixed_size
+= ovlynum
* 16 + 16 + 4 + 16;
4263 if (fixed_size
+ mos_param
.max_overlay_size
> htab
->local_store
)
4264 info
->callbacks
->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4265 "size of 0x%v exceeds local store\n"),
4266 (bfd_vma
) fixed_size
,
4267 (bfd_vma
) mos_param
.max_overlay_size
);
4269 /* Now see if we should put some functions in the non-overlay area. */
4270 else if (fixed_size
< htab
->overlay_fixed
)
4272 unsigned int max_fixed
, lib_size
;
4274 max_fixed
= htab
->local_store
- mos_param
.max_overlay_size
;
4275 if (max_fixed
> htab
->overlay_fixed
)
4276 max_fixed
= htab
->overlay_fixed
;
4277 lib_size
= max_fixed
- fixed_size
;
4278 lib_size
= auto_ovl_lib_functions (info
, lib_size
);
4279 if (lib_size
== (unsigned int) -1)
4281 fixed_size
= max_fixed
- lib_size
;
4284 /* Build an array of sections, suitably sorted to place into
4286 ovly_sections
= bfd_malloc (2 * count
* sizeof (*ovly_sections
));
4287 if (ovly_sections
== NULL
)
4289 ovly_p
= ovly_sections
;
4290 if (!for_each_node (collect_overlays
, info
, &ovly_p
, TRUE
))
4292 count
= (size_t) (ovly_p
- ovly_sections
) / 2;
4293 ovly_map
= bfd_malloc (count
* sizeof (*ovly_map
));
4294 if (ovly_map
== NULL
)
4297 memset (&dummy_caller
, 0, sizeof (dummy_caller
));
4298 overlay_size
= (htab
->local_store
- fixed_size
) / htab
->params
->num_lines
;
4299 if (htab
->params
->line_size
!= 0)
4300 overlay_size
= htab
->params
->line_size
;
4303 while (base
< count
)
4305 unsigned int size
= 0;
4307 for (i
= base
; i
< count
; i
++)
4311 unsigned int num_stubs
;
4312 struct call_info
*call
, *pasty
;
4313 struct _spu_elf_section_data
*sec_data
;
4314 struct spu_elf_stack_info
*sinfo
;
4317 /* See whether we can add this section to the current
4318 overlay without overflowing our overlay buffer. */
4319 sec
= ovly_sections
[2 * i
];
4320 tmp
= size
+ sec
->size
;
4321 if (ovly_sections
[2 * i
+ 1])
4322 tmp
+= ovly_sections
[2 * i
+ 1]->size
;
4323 if (tmp
> overlay_size
)
4325 if (sec
->segment_mark
)
4327 /* Pasted sections must stay together, so add their
4329 struct call_info
*pasty
= find_pasted_call (sec
);
4330 while (pasty
!= NULL
)
4332 struct function_info
*call_fun
= pasty
->fun
;
4333 tmp
+= call_fun
->sec
->size
;
4334 if (call_fun
->rodata
)
4335 tmp
+= call_fun
->rodata
->size
;
4336 for (pasty
= call_fun
->call_list
; pasty
; pasty
= pasty
->next
)
4337 if (pasty
->is_pasted
)
4341 if (tmp
> overlay_size
)
4344 /* If we add this section, we might need new overlay call
4345 stubs. Add any overlay section calls to dummy_call. */
4347 sec_data
= spu_elf_section_data (sec
);
4348 sinfo
= sec_data
->u
.i
.stack_info
;
4349 for (k
= 0; k
< sinfo
->num_fun
; ++k
)
4350 for (call
= sinfo
->fun
[k
].call_list
; call
; call
= call
->next
)
4351 if (call
->is_pasted
)
4353 BFD_ASSERT (pasty
== NULL
);
4356 else if (call
->fun
->sec
->linker_mark
)
4358 if (!copy_callee (&dummy_caller
, call
))
4361 while (pasty
!= NULL
)
4363 struct function_info
*call_fun
= pasty
->fun
;
4365 for (call
= call_fun
->call_list
; call
; call
= call
->next
)
4366 if (call
->is_pasted
)
4368 BFD_ASSERT (pasty
== NULL
);
4371 else if (!copy_callee (&dummy_caller
, call
))
4375 /* Calculate call stub size. */
4377 for (call
= dummy_caller
.call_list
; call
; call
= call
->next
)
4382 /* If the call is within this overlay, we won't need a
4384 for (k
= base
; k
< i
+ 1; k
++)
4385 if (call
->fun
->sec
== ovly_sections
[2 * k
])
4391 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4392 && num_stubs
> htab
->params
->max_branch
)
4394 if (tmp
+ num_stubs
* ovl_stub_size (htab
->params
)
4402 info
->callbacks
->einfo (_("%B:%A%s exceeds overlay size\n"),
4403 ovly_sections
[2 * i
]->owner
,
4404 ovly_sections
[2 * i
],
4405 ovly_sections
[2 * i
+ 1] ? " + rodata" : "");
4406 bfd_set_error (bfd_error_bad_value
);
4410 while (dummy_caller
.call_list
!= NULL
)
4412 struct call_info
*call
= dummy_caller
.call_list
;
4413 dummy_caller
.call_list
= call
->next
;
4419 ovly_map
[base
++] = ovlynum
;
4422 script
= htab
->params
->spu_elf_open_overlay_script ();
4424 if (fprintf (script
, "SECTIONS\n{\n") <= 0)
4427 if (htab
->params
->ovly_flavour
== ovly_soft_icache
)
4429 if (fprintf (script
,
4430 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4431 " . = ALIGN (%u);\n"
4432 " .ovl.init : { *(.ovl.init) }\n"
4433 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4434 htab
->params
->line_size
) <= 0)
4439 while (base
< count
)
4441 unsigned int indx
= ovlynum
- 1;
4442 unsigned int vma
, lma
;
4444 vma
= (indx
& (htab
->params
->num_lines
- 1)) << htab
->line_size_log2
;
4445 lma
= indx
<< htab
->line_size_log2
;
4447 if (fprintf (script
, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4448 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4449 ovlynum
, vma
, lma
) <= 0)
4452 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4453 ovly_map
, ovly_sections
, info
);
4454 if (base
== (unsigned) -1)
4457 if (fprintf (script
, " }\n") <= 0)
4463 if (fprintf (script
, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4464 1 << (htab
->num_lines_log2
+ htab
->line_size_log2
)) <= 0)
4469 if (fprintf (script
,
4470 " . = ALIGN (16);\n"
4471 " .ovl.init : { *(.ovl.init) }\n"
4472 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4475 for (region
= 1; region
<= htab
->params
->num_lines
; region
++)
4479 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4487 /* We need to set lma since we are overlaying .ovl.init. */
4488 if (fprintf (script
,
4489 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4494 if (fprintf (script
, " OVERLAY :\n {\n") <= 0)
4498 while (base
< count
)
4500 if (fprintf (script
, " .ovly%u {\n", ovlynum
) <= 0)
4503 base
= print_one_overlay_section (script
, base
, count
, ovlynum
,
4504 ovly_map
, ovly_sections
, info
);
4505 if (base
== (unsigned) -1)
4508 if (fprintf (script
, " }\n") <= 0)
4511 ovlynum
+= htab
->params
->num_lines
;
4512 while (base
< count
&& ovly_map
[base
] < ovlynum
)
4516 if (fprintf (script
, " }\n") <= 0)
4523 free (ovly_sections
);
4525 if (fprintf (script
, "}\nINSERT BEFORE .text;\n") <= 0)
4527 if (fclose (script
) != 0)
4530 if (htab
->params
->auto_overlay
& AUTO_RELINK
)
4531 (*htab
->params
->spu_elf_relink
) ();
4536 bfd_set_error (bfd_error_system_call
);
4538 info
->callbacks
->einfo ("%F%P: auto overlay error: %E\n");
4542 /* Provide an estimate of total stack required. */
4545 spu_elf_stack_analysis (struct bfd_link_info
*info
)
4547 struct spu_link_hash_table
*htab
;
4548 struct _sum_stack_param sum_stack_param
;
4550 if (!discover_functions (info
))
4553 if (!build_call_tree (info
))
4556 htab
= spu_hash_table (info
);
4557 if (htab
->params
->stack_analysis
)
4559 info
->callbacks
->info (_("Stack size for call graph root nodes.\n"));
4560 info
->callbacks
->minfo (_("\nStack size for functions. "
4561 "Annotations: '*' max stack, 't' tail call\n"));
4564 sum_stack_param
.emit_stack_syms
= htab
->params
->emit_stack_syms
;
4565 sum_stack_param
.overall_stack
= 0;
4566 if (!for_each_node (sum_stack
, info
, &sum_stack_param
, TRUE
))
4569 if (htab
->params
->stack_analysis
)
4570 info
->callbacks
->info (_("Maximum stack required is 0x%v\n"),
4571 (bfd_vma
) sum_stack_param
.overall_stack
);
4575 /* Perform a final link. */
4578 spu_elf_final_link (bfd
*output_bfd
, struct bfd_link_info
*info
)
4580 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4582 if (htab
->params
->auto_overlay
)
4583 spu_elf_auto_overlay (info
);
4585 if ((htab
->params
->stack_analysis
4586 || (htab
->params
->ovly_flavour
== ovly_soft_icache
4587 && htab
->params
->lrlive_analysis
))
4588 && !spu_elf_stack_analysis (info
))
4589 info
->callbacks
->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4591 if (!spu_elf_build_stubs (info
))
4592 info
->callbacks
->einfo ("%F%P: can not build overlay stubs: %E\n");
4594 return bfd_elf_final_link (output_bfd
, info
);
4597 /* Called when not normally emitting relocs, ie. !info->relocatable
4598 and !info->emitrelocations. Returns a count of special relocs
4599 that need to be emitted. */
4602 spu_elf_count_relocs (struct bfd_link_info
*info
, asection
*sec
)
4604 Elf_Internal_Rela
*relocs
;
4605 unsigned int count
= 0;
4607 relocs
= _bfd_elf_link_read_relocs (sec
->owner
, sec
, NULL
, NULL
,
4611 Elf_Internal_Rela
*rel
;
4612 Elf_Internal_Rela
*relend
= relocs
+ sec
->reloc_count
;
4614 for (rel
= relocs
; rel
< relend
; rel
++)
4616 int r_type
= ELF32_R_TYPE (rel
->r_info
);
4617 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4621 if (elf_section_data (sec
)->relocs
!= relocs
)
4628 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4631 spu_elf_relocate_section (bfd
*output_bfd
,
4632 struct bfd_link_info
*info
,
4634 asection
*input_section
,
4636 Elf_Internal_Rela
*relocs
,
4637 Elf_Internal_Sym
*local_syms
,
4638 asection
**local_sections
)
4640 Elf_Internal_Shdr
*symtab_hdr
;
4641 struct elf_link_hash_entry
**sym_hashes
;
4642 Elf_Internal_Rela
*rel
, *relend
;
4643 struct spu_link_hash_table
*htab
;
4646 bfd_boolean emit_these_relocs
= FALSE
;
4647 bfd_boolean is_ea_sym
;
4649 unsigned int iovl
= 0;
4651 htab
= spu_hash_table (info
);
4652 stubs
= (htab
->stub_sec
!= NULL
4653 && maybe_needs_stubs (input_section
));
4654 iovl
= overlay_index (input_section
);
4655 ea
= bfd_get_section_by_name (output_bfd
, "._ea");
4656 symtab_hdr
= &elf_tdata (input_bfd
)->symtab_hdr
;
4657 sym_hashes
= (struct elf_link_hash_entry
**) (elf_sym_hashes (input_bfd
));
4660 relend
= relocs
+ input_section
->reloc_count
;
4661 for (; rel
< relend
; rel
++)
4664 reloc_howto_type
*howto
;
4665 unsigned int r_symndx
;
4666 Elf_Internal_Sym
*sym
;
4668 struct elf_link_hash_entry
*h
;
4669 const char *sym_name
;
4672 bfd_reloc_status_type r
;
4673 bfd_boolean unresolved_reloc
;
4675 bfd_boolean overlay_encoded
;
4676 enum _stub_type stub_type
;
4678 r_symndx
= ELF32_R_SYM (rel
->r_info
);
4679 r_type
= ELF32_R_TYPE (rel
->r_info
);
4680 howto
= elf_howto_table
+ r_type
;
4681 unresolved_reloc
= FALSE
;
4686 if (r_symndx
< symtab_hdr
->sh_info
)
4688 sym
= local_syms
+ r_symndx
;
4689 sec
= local_sections
[r_symndx
];
4690 sym_name
= bfd_elf_sym_name (input_bfd
, symtab_hdr
, sym
, sec
);
4691 relocation
= _bfd_elf_rela_local_sym (output_bfd
, sym
, &sec
, rel
);
4695 if (sym_hashes
== NULL
)
4698 h
= sym_hashes
[r_symndx
- symtab_hdr
->sh_info
];
4700 while (h
->root
.type
== bfd_link_hash_indirect
4701 || h
->root
.type
== bfd_link_hash_warning
)
4702 h
= (struct elf_link_hash_entry
*) h
->root
.u
.i
.link
;
4705 if (h
->root
.type
== bfd_link_hash_defined
4706 || h
->root
.type
== bfd_link_hash_defweak
)
4708 sec
= h
->root
.u
.def
.section
;
4710 || sec
->output_section
== NULL
)
4711 /* Set a flag that will be cleared later if we find a
4712 relocation value for this symbol. output_section
4713 is typically NULL for symbols satisfied by a shared
4715 unresolved_reloc
= TRUE
;
4717 relocation
= (h
->root
.u
.def
.value
4718 + sec
->output_section
->vma
4719 + sec
->output_offset
);
4721 else if (h
->root
.type
== bfd_link_hash_undefweak
)
4723 else if (info
->unresolved_syms_in_objects
== RM_IGNORE
4724 && ELF_ST_VISIBILITY (h
->other
) == STV_DEFAULT
)
4726 else if (!info
->relocatable
4727 && !(r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
))
4730 err
= (info
->unresolved_syms_in_objects
== RM_GENERATE_ERROR
4731 || ELF_ST_VISIBILITY (h
->other
) != STV_DEFAULT
);
4732 if (!info
->callbacks
->undefined_symbol (info
,
4733 h
->root
.root
.string
,
4736 rel
->r_offset
, err
))
4740 sym_name
= h
->root
.root
.string
;
4743 if (sec
!= NULL
&& elf_discarded_section (sec
))
4745 /* For relocs against symbols from removed linkonce sections,
4746 or sections discarded by a linker script, we just want the
4747 section contents zeroed. Avoid any special processing. */
4748 _bfd_clear_contents (howto
, input_bfd
, contents
+ rel
->r_offset
);
4754 if (info
->relocatable
)
4757 is_ea_sym
= (ea
!= NULL
4759 && sec
->output_section
== ea
);
4760 overlay_encoded
= FALSE
;
4762 /* If this symbol is in an overlay area, we may need to relocate
4763 to the overlay stub. */
4764 addend
= rel
->r_addend
;
4767 && (stub_type
= needs_ovl_stub (h
, sym
, sec
, input_section
, rel
,
4768 contents
, info
)) != no_stub
)
4770 unsigned int ovl
= 0;
4771 struct got_entry
*g
, **head
;
4773 if (stub_type
!= nonovl_stub
)
4777 head
= &h
->got
.glist
;
4779 head
= elf_local_got_ents (input_bfd
) + r_symndx
;
4781 for (g
= *head
; g
!= NULL
; g
= g
->next
)
4782 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4783 ? g
->br_addr
== (rel
->r_offset
4784 + input_section
->output_offset
4785 + input_section
->output_section
->vma
)
4786 : g
->addend
== addend
&& (g
->ovl
== ovl
|| g
->ovl
== 0))
4791 relocation
= g
->stub_addr
;
4796 /* For soft icache, encode the overlay index into addresses. */
4797 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4800 unsigned int ovl
= overlay_index (sec
);
4803 unsigned int set_id
= ((ovl
- 1) >> htab
->num_lines_log2
) + 1;
4804 relocation
+= set_id
<< 18;
4805 overlay_encoded
= TRUE
;
4810 if (unresolved_reloc
)
4812 else if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4816 /* ._ea is a special section that isn't allocated in SPU
4817 memory, but rather occupies space in PPU memory as
4818 part of an embedded ELF image. If this reloc is
4819 against a symbol defined in ._ea, then transform the
4820 reloc into an equivalent one without a symbol
4821 relative to the start of the ELF image. */
4822 rel
->r_addend
+= (relocation
4824 + elf_section_data (ea
)->this_hdr
.sh_offset
);
4825 rel
->r_info
= ELF32_R_INFO (0, r_type
);
4827 emit_these_relocs
= TRUE
;
4831 unresolved_reloc
= TRUE
;
4833 if (unresolved_reloc
)
4835 (*_bfd_error_handler
)
4836 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4838 bfd_get_section_name (input_bfd
, input_section
),
4839 (long) rel
->r_offset
,
4845 r
= _bfd_final_link_relocate (howto
,
4849 rel
->r_offset
, relocation
, addend
);
4851 if (r
!= bfd_reloc_ok
)
4853 const char *msg
= (const char *) 0;
4857 case bfd_reloc_overflow
:
4858 /* FIXME: We don't want to warn on most references
4859 within an overlay to itself, but this may silence a
4860 warning that should be reported. */
4861 if (overlay_encoded
&& sec
== input_section
)
4863 if (!((*info
->callbacks
->reloc_overflow
)
4864 (info
, (h
? &h
->root
: NULL
), sym_name
, howto
->name
,
4865 (bfd_vma
) 0, input_bfd
, input_section
, rel
->r_offset
)))
4869 case bfd_reloc_undefined
:
4870 if (!((*info
->callbacks
->undefined_symbol
)
4871 (info
, sym_name
, input_bfd
, input_section
,
4872 rel
->r_offset
, TRUE
)))
4876 case bfd_reloc_outofrange
:
4877 msg
= _("internal error: out of range error");
4880 case bfd_reloc_notsupported
:
4881 msg
= _("internal error: unsupported relocation error");
4884 case bfd_reloc_dangerous
:
4885 msg
= _("internal error: dangerous error");
4889 msg
= _("internal error: unknown error");
4894 if (!((*info
->callbacks
->warning
)
4895 (info
, msg
, sym_name
, input_bfd
, input_section
,
4904 && emit_these_relocs
4905 && !info
->emitrelocations
)
4907 Elf_Internal_Rela
*wrel
;
4908 Elf_Internal_Shdr
*rel_hdr
;
4910 wrel
= rel
= relocs
;
4911 relend
= relocs
+ input_section
->reloc_count
;
4912 for (; rel
< relend
; rel
++)
4916 r_type
= ELF32_R_TYPE (rel
->r_info
);
4917 if (r_type
== R_SPU_PPU32
|| r_type
== R_SPU_PPU64
)
4920 input_section
->reloc_count
= wrel
- relocs
;
4921 /* Backflips for _bfd_elf_link_output_relocs. */
4922 rel_hdr
= &elf_section_data (input_section
)->rel_hdr
;
4923 rel_hdr
->sh_size
= input_section
->reloc_count
* rel_hdr
->sh_entsize
;
4930 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4933 spu_elf_output_symbol_hook (struct bfd_link_info
*info
,
4934 const char *sym_name ATTRIBUTE_UNUSED
,
4935 Elf_Internal_Sym
*sym
,
4936 asection
*sym_sec ATTRIBUTE_UNUSED
,
4937 struct elf_link_hash_entry
*h
)
4939 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
4941 if (!info
->relocatable
4942 && htab
->stub_sec
!= NULL
4944 && (h
->root
.type
== bfd_link_hash_defined
4945 || h
->root
.type
== bfd_link_hash_defweak
)
4947 && strncmp (h
->root
.root
.string
, "_SPUEAR_", 8) == 0)
4949 struct got_entry
*g
;
4951 for (g
= h
->got
.glist
; g
!= NULL
; g
= g
->next
)
4952 if (htab
->params
->ovly_flavour
== ovly_soft_icache
4953 ? g
->br_addr
== g
->stub_addr
4954 : g
->addend
== 0 && g
->ovl
== 0)
4956 sym
->st_shndx
= (_bfd_elf_section_from_bfd_section
4957 (htab
->stub_sec
[0]->output_section
->owner
,
4958 htab
->stub_sec
[0]->output_section
));
4959 sym
->st_value
= g
->stub_addr
;
4967 static int spu_plugin
= 0;
4970 spu_elf_plugin (int val
)
4975 /* Set ELF header e_type for plugins. */
4978 spu_elf_post_process_headers (bfd
*abfd
,
4979 struct bfd_link_info
*info ATTRIBUTE_UNUSED
)
4983 Elf_Internal_Ehdr
*i_ehdrp
= elf_elfheader (abfd
);
4985 i_ehdrp
->e_type
= ET_DYN
;
4989 /* We may add an extra PT_LOAD segment for .toe. We also need extra
4990 segments for overlays. */
4993 spu_elf_additional_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5000 struct spu_link_hash_table
*htab
= spu_hash_table (info
);
5001 extra
= htab
->num_overlays
;
5007 sec
= bfd_get_section_by_name (abfd
, ".toe");
5008 if (sec
!= NULL
&& (sec
->flags
& SEC_LOAD
) != 0)
5014 /* Remove .toe section from other PT_LOAD segments and put it in
5015 a segment of its own. Put overlays in separate segments too. */
5018 spu_elf_modify_segment_map (bfd
*abfd
, struct bfd_link_info
*info
)
5021 struct elf_segment_map
*m
;
5027 toe
= bfd_get_section_by_name (abfd
, ".toe");
5028 for (m
= elf_tdata (abfd
)->segment_map
; m
!= NULL
; m
= m
->next
)
5029 if (m
->p_type
== PT_LOAD
&& m
->count
> 1)
5030 for (i
= 0; i
< m
->count
; i
++)
5031 if ((s
= m
->sections
[i
]) == toe
5032 || spu_elf_section_data (s
)->u
.o
.ovl_index
!= 0)
5034 struct elf_segment_map
*m2
;
5037 if (i
+ 1 < m
->count
)
5039 amt
= sizeof (struct elf_segment_map
);
5040 amt
+= (m
->count
- (i
+ 2)) * sizeof (m
->sections
[0]);
5041 m2
= bfd_zalloc (abfd
, amt
);
5044 m2
->count
= m
->count
- (i
+ 1);
5045 memcpy (m2
->sections
, m
->sections
+ i
+ 1,
5046 m2
->count
* sizeof (m
->sections
[0]));
5047 m2
->p_type
= PT_LOAD
;
5055 amt
= sizeof (struct elf_segment_map
);
5056 m2
= bfd_zalloc (abfd
, amt
);
5059 m2
->p_type
= PT_LOAD
;
5061 m2
->sections
[0] = s
;
5071 /* Tweak the section type of .note.spu_name. */
5074 spu_elf_fake_sections (bfd
*obfd ATTRIBUTE_UNUSED
,
5075 Elf_Internal_Shdr
*hdr
,
5078 if (strcmp (sec
->name
, SPU_PTNOTE_SPUNAME
) == 0)
5079 hdr
->sh_type
= SHT_NOTE
;
5083 /* Tweak phdrs before writing them out. */
5086 spu_elf_modify_program_headers (bfd
*abfd
, struct bfd_link_info
*info
)
5088 const struct elf_backend_data
*bed
;
5089 struct elf_obj_tdata
*tdata
;
5090 Elf_Internal_Phdr
*phdr
, *last
;
5091 struct spu_link_hash_table
*htab
;
5098 bed
= get_elf_backend_data (abfd
);
5099 tdata
= elf_tdata (abfd
);
5101 count
= tdata
->program_header_size
/ bed
->s
->sizeof_phdr
;
5102 htab
= spu_hash_table (info
);
5103 if (htab
->num_overlays
!= 0)
5105 struct elf_segment_map
*m
;
5108 for (i
= 0, m
= elf_tdata (abfd
)->segment_map
; m
; ++i
, m
= m
->next
)
5110 && (o
= spu_elf_section_data (m
->sections
[0])->u
.o
.ovl_index
) != 0)
5112 /* Mark this as an overlay header. */
5113 phdr
[i
].p_flags
|= PF_OVERLAY
;
5115 if (htab
->ovtab
!= NULL
&& htab
->ovtab
->size
!= 0
5116 && htab
->params
->ovly_flavour
!= ovly_soft_icache
)
5118 bfd_byte
*p
= htab
->ovtab
->contents
;
5119 unsigned int off
= o
* 16 + 8;
5121 /* Write file_off into _ovly_table. */
5122 bfd_put_32 (htab
->ovtab
->owner
, phdr
[i
].p_offset
, p
+ off
);
5125 /* Soft-icache has its file offset put in .ovl.init. */
5126 if (htab
->init
!= NULL
&& htab
->init
->size
!= 0)
5128 bfd_vma val
= elf_section_data (htab
->ovl_sec
[0])->this_hdr
.sh_offset
;
5130 bfd_put_32 (htab
->init
->owner
, val
, htab
->init
->contents
+ 4);
5134 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5135 of 16. This should always be possible when using the standard
5136 linker scripts, but don't create overlapping segments if
5137 someone is playing games with linker scripts. */
5139 for (i
= count
; i
-- != 0; )
5140 if (phdr
[i
].p_type
== PT_LOAD
)
5144 adjust
= -phdr
[i
].p_filesz
& 15;
5147 && phdr
[i
].p_offset
+ phdr
[i
].p_filesz
> last
->p_offset
- adjust
)
5150 adjust
= -phdr
[i
].p_memsz
& 15;
5153 && phdr
[i
].p_filesz
!= 0
5154 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
> last
->p_vaddr
- adjust
5155 && phdr
[i
].p_vaddr
+ phdr
[i
].p_memsz
<= last
->p_vaddr
)
5158 if (phdr
[i
].p_filesz
!= 0)
5162 if (i
== (unsigned int) -1)
5163 for (i
= count
; i
-- != 0; )
5164 if (phdr
[i
].p_type
== PT_LOAD
)
5168 adjust
= -phdr
[i
].p_filesz
& 15;
5169 phdr
[i
].p_filesz
+= adjust
;
5171 adjust
= -phdr
[i
].p_memsz
& 15;
5172 phdr
[i
].p_memsz
+= adjust
;
5178 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5179 #define TARGET_BIG_NAME "elf32-spu"
5180 #define ELF_ARCH bfd_arch_spu
5181 #define ELF_MACHINE_CODE EM_SPU
5182 /* This matches the alignment need for DMA. */
5183 #define ELF_MAXPAGESIZE 0x80
5184 #define elf_backend_rela_normal 1
5185 #define elf_backend_can_gc_sections 1
5187 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5188 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5189 #define elf_info_to_howto spu_elf_info_to_howto
5190 #define elf_backend_count_relocs spu_elf_count_relocs
5191 #define elf_backend_relocate_section spu_elf_relocate_section
5192 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5193 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5194 #define elf_backend_object_p spu_elf_object_p
5195 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5196 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5198 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5199 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5200 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5201 #define elf_backend_post_process_headers spu_elf_post_process_headers
5202 #define elf_backend_fake_sections spu_elf_fake_sections
5203 #define elf_backend_special_sections spu_elf_special_sections
5204 #define bfd_elf32_bfd_final_link spu_elf_final_link
5206 #include "elf32-target.h"