1 /* SPU specific support for 32-bit ELF
3 Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
22 #include "libiberty.h"
28 #include "elf32-spu.h"
30 /* We use RELA style relocs. Don't define USE_REL. */
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37 array, so it must be declared in the order of that type. */
39 static reloc_howto_type elf_howto_table[] = {
40 HOWTO (R_SPU_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
41 bfd_elf_generic_reloc, "SPU_NONE",
42 FALSE, 0, 0x00000000, FALSE),
43 HOWTO (R_SPU_ADDR10, 4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 bfd_elf_generic_reloc, "SPU_ADDR10",
45 FALSE, 0, 0x00ffc000, FALSE),
46 HOWTO (R_SPU_ADDR16, 2, 2, 16, FALSE, 7, complain_overflow_bitfield,
47 bfd_elf_generic_reloc, "SPU_ADDR16",
48 FALSE, 0, 0x007fff80, FALSE),
49 HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE, 7, complain_overflow_bitfield,
50 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 FALSE, 0, 0x007fff80, FALSE),
52 HOWTO (R_SPU_ADDR16_LO, 0, 2, 16, FALSE, 7, complain_overflow_dont,
53 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 FALSE, 0, 0x007fff80, FALSE),
55 HOWTO (R_SPU_ADDR18, 0, 2, 18, FALSE, 7, complain_overflow_bitfield,
56 bfd_elf_generic_reloc, "SPU_ADDR18",
57 FALSE, 0, 0x01ffff80, FALSE),
58 HOWTO (R_SPU_ADDR32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
59 bfd_elf_generic_reloc, "SPU_ADDR32",
60 FALSE, 0, 0xffffffff, FALSE),
61 HOWTO (R_SPU_REL16, 2, 2, 16, TRUE, 7, complain_overflow_bitfield,
62 bfd_elf_generic_reloc, "SPU_REL16",
63 FALSE, 0, 0x007fff80, TRUE),
64 HOWTO (R_SPU_ADDR7, 0, 2, 7, FALSE, 14, complain_overflow_dont,
65 bfd_elf_generic_reloc, "SPU_ADDR7",
66 FALSE, 0, 0x001fc000, FALSE),
67 HOWTO (R_SPU_REL9, 2, 2, 9, TRUE, 0, complain_overflow_signed,
68 spu_elf_rel9, "SPU_REL9",
69 FALSE, 0, 0x0180007f, TRUE),
70 HOWTO (R_SPU_REL9I, 2, 2, 9, TRUE, 0, complain_overflow_signed,
71 spu_elf_rel9, "SPU_REL9I",
72 FALSE, 0, 0x0000c07f, TRUE),
73 HOWTO (R_SPU_ADDR10I, 0, 2, 10, FALSE, 14, complain_overflow_signed,
74 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 FALSE, 0, 0x00ffc000, FALSE),
76 HOWTO (R_SPU_ADDR16I, 0, 2, 16, FALSE, 7, complain_overflow_signed,
77 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 FALSE, 0, 0x007fff80, FALSE),
79 HOWTO (R_SPU_REL32, 0, 2, 32, TRUE, 0, complain_overflow_dont,
80 bfd_elf_generic_reloc, "SPU_REL32",
81 FALSE, 0, 0xffffffff, TRUE),
82 HOWTO (R_SPU_ADDR16X, 0, 2, 16, FALSE, 7, complain_overflow_bitfield,
83 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 FALSE, 0, 0x007fff80, FALSE),
85 HOWTO (R_SPU_PPU32, 0, 2, 32, FALSE, 0, complain_overflow_dont,
86 bfd_elf_generic_reloc, "SPU_PPU32",
87 FALSE, 0, 0xffffffff, FALSE),
88 HOWTO (R_SPU_PPU64, 0, 4, 64, FALSE, 0, complain_overflow_dont,
89 bfd_elf_generic_reloc, "SPU_PPU64",
93 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94 { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
95 { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99 static enum elf_spu_reloc_type
100 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
106 case BFD_RELOC_SPU_IMM10W:
108 case BFD_RELOC_SPU_IMM16W:
110 case BFD_RELOC_SPU_LO16:
111 return R_SPU_ADDR16_LO;
112 case BFD_RELOC_SPU_HI16:
113 return R_SPU_ADDR16_HI;
114 case BFD_RELOC_SPU_IMM18:
116 case BFD_RELOC_SPU_PCREL16:
118 case BFD_RELOC_SPU_IMM7:
120 case BFD_RELOC_SPU_IMM8:
122 case BFD_RELOC_SPU_PCREL9a:
124 case BFD_RELOC_SPU_PCREL9b:
126 case BFD_RELOC_SPU_IMM10:
127 return R_SPU_ADDR10I;
128 case BFD_RELOC_SPU_IMM16:
129 return R_SPU_ADDR16I;
132 case BFD_RELOC_32_PCREL:
134 case BFD_RELOC_SPU_PPU32:
136 case BFD_RELOC_SPU_PPU64:
142 spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
144 Elf_Internal_Rela *dst)
146 enum elf_spu_reloc_type r_type;
148 r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149 BFD_ASSERT (r_type < R_SPU_max);
150 cache_ptr->howto = &elf_howto_table[(int) r_type];
153 static reloc_howto_type *
154 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155 bfd_reloc_code_real_type code)
157 enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
159 if (r_type == R_SPU_NONE)
162 return elf_howto_table + r_type;
165 static reloc_howto_type *
166 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
171 for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172 if (elf_howto_table[i].name != NULL
173 && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174 return &elf_howto_table[i];
179 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs. */
181 static bfd_reloc_status_type
182 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183 void *data, asection *input_section,
184 bfd *output_bfd, char **error_message)
186 bfd_size_type octets;
190 /* If this is a relocatable link (output_bfd test tells us), just
191 call the generic function. Any adjustment will be done at final
193 if (output_bfd != NULL)
194 return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195 input_section, output_bfd, error_message);
197 if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198 return bfd_reloc_outofrange;
199 octets = reloc_entry->address * bfd_octets_per_byte (abfd);
201 /* Get symbol value. */
203 if (!bfd_is_com_section (symbol->section))
205 if (symbol->section->output_section)
206 val += symbol->section->output_section->vma;
208 val += reloc_entry->addend;
210 /* Make it pc-relative. */
211 val -= input_section->output_section->vma + input_section->output_offset;
214 if (val + 256 >= 512)
215 return bfd_reloc_overflow;
217 insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
219 /* Move two high bits of value to REL9I and REL9 position.
220 The mask will take care of selecting the right field. */
221 val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222 insn &= ~reloc_entry->howto->dst_mask;
223 insn |= val & reloc_entry->howto->dst_mask;
224 bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
229 spu_elf_new_section_hook (bfd *abfd, asection *sec)
231 if (!sec->used_by_bfd)
233 struct _spu_elf_section_data *sdata;
235 sdata = bfd_zalloc (abfd, sizeof (*sdata));
238 sec->used_by_bfd = sdata;
241 return _bfd_elf_new_section_hook (abfd, sec);
244 /* Set up overlay info for executables. */
247 spu_elf_object_p (bfd *abfd)
249 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
251 unsigned int i, num_ovl, num_buf;
252 Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253 Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254 Elf_Internal_Phdr *last_phdr = NULL;
256 for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
262 if (last_phdr == NULL
263 || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
266 for (j = 1; j < elf_numsections (abfd); j++)
268 Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
270 if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
272 asection *sec = shdr->bfd_section;
273 spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274 spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
282 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283 strip --strip-unneeded will not remove them. */
286 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
288 if (sym->name != NULL
289 && sym->section != bfd_abs_section_ptr
290 && strncmp (sym->name, "_EAR_", 5) == 0)
291 sym->flags |= BSF_KEEP;
294 /* SPU ELF linker hash table. */
296 struct spu_link_hash_table
298 struct elf_link_hash_table elf;
300 struct spu_elf_params *params;
302 /* Shortcuts to overlay sections. */
308 /* Count of stubs in each overlay section. */
309 unsigned int *stub_count;
311 /* The stub section for each overlay section. */
314 struct elf_link_hash_entry *ovly_entry[2];
316 /* Number of overlay buffers. */
317 unsigned int num_buf;
319 /* Total number of overlays. */
320 unsigned int num_overlays;
322 /* For soft icache. */
323 unsigned int line_size_log2;
324 unsigned int num_lines_log2;
325 unsigned int fromelem_size_log2;
327 /* How much memory we have. */
328 unsigned int local_store;
329 /* Local store --auto-overlay should reserve for non-overlay
330 functions and data. */
331 unsigned int overlay_fixed;
332 /* Local store --auto-overlay should reserve for stack and heap. */
333 unsigned int reserved;
334 /* If reserved is not specified, stack analysis will calculate a value
335 for the stack. This parameter adjusts that value to allow for
336 negative sp access (the ABI says 2000 bytes below sp are valid,
337 and the overlay manager uses some of this area). */
338 int extra_stack_space;
339 /* Count of overlay stubs needed in non-overlay area. */
340 unsigned int non_ovly_stub;
343 unsigned int stub_err : 1;
346 /* Hijack the generic got fields for overlay stub accounting. */
350 struct got_entry *next;
359 #define spu_hash_table(p) \
360 ((struct spu_link_hash_table *) ((p)->hash))
364 struct function_info *fun;
365 struct call_info *next;
367 unsigned int max_depth;
368 unsigned int is_tail : 1;
369 unsigned int is_pasted : 1;
370 unsigned int priority : 13;
375 /* List of functions called. Also branches to hot/cold part of
377 struct call_info *call_list;
378 /* For hot/cold part of function, point to owner. */
379 struct function_info *start;
380 /* Symbol at start of function. */
382 Elf_Internal_Sym *sym;
383 struct elf_link_hash_entry *h;
385 /* Function section. */
388 /* Where last called from, and number of sections called from. */
389 asection *last_caller;
390 unsigned int call_count;
391 /* Address range of (this part of) function. */
393 /* Offset where we found a store of lr, or -1 if none found. */
395 /* Offset where we found the stack adjustment insn. */
399 /* Distance from root of call tree. Tail and hot/cold branches
400 count as one deeper. We aren't counting stack frames here. */
402 /* Set if global symbol. */
403 unsigned int global : 1;
404 /* Set if known to be start of function (as distinct from a hunk
405 in hot/cold section. */
406 unsigned int is_func : 1;
407 /* Set if not a root node. */
408 unsigned int non_root : 1;
409 /* Flags used during call tree traversal. It's cheaper to replicate
410 the visit flags than have one which needs clearing after a traversal. */
411 unsigned int visit1 : 1;
412 unsigned int visit2 : 1;
413 unsigned int marking : 1;
414 unsigned int visit3 : 1;
415 unsigned int visit4 : 1;
416 unsigned int visit5 : 1;
417 unsigned int visit6 : 1;
418 unsigned int visit7 : 1;
421 struct spu_elf_stack_info
425 /* Variable size array describing functions, one per contiguous
426 address range belonging to a function. */
427 struct function_info fun[1];
430 static struct function_info *find_function (asection *, bfd_vma,
431 struct bfd_link_info *);
433 /* Create a spu ELF linker hash table. */
435 static struct bfd_link_hash_table *
436 spu_elf_link_hash_table_create (bfd *abfd)
438 struct spu_link_hash_table *htab;
440 htab = bfd_malloc (sizeof (*htab));
444 if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
445 _bfd_elf_link_hash_newfunc,
446 sizeof (struct elf_link_hash_entry)))
452 memset (&htab->ovtab, 0,
453 sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
455 htab->elf.init_got_refcount.refcount = 0;
456 htab->elf.init_got_refcount.glist = NULL;
457 htab->elf.init_got_offset.offset = 0;
458 htab->elf.init_got_offset.glist = NULL;
459 return &htab->elf.root;
463 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
465 bfd_vma max_branch_log2;
467 struct spu_link_hash_table *htab = spu_hash_table (info);
468 htab->params = params;
469 htab->line_size_log2 = bfd_log2 (htab->params->line_size);
470 htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
472 /* For the software i-cache, we provide a "from" list whose size
473 is a power-of-two number of quadwords, big enough to hold one
474 byte per outgoing branch. Compute this number here. */
475 max_branch_log2 = bfd_log2 (htab->params->max_branch);
476 htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
479 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
480 to (hash, NULL) for global symbols, and (NULL, sym) for locals. Set
481 *SYMSECP to the symbol's section. *LOCSYMSP caches local syms. */
484 get_sym_h (struct elf_link_hash_entry **hp,
485 Elf_Internal_Sym **symp,
487 Elf_Internal_Sym **locsymsp,
488 unsigned long r_symndx,
491 Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
493 if (r_symndx >= symtab_hdr->sh_info)
495 struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
496 struct elf_link_hash_entry *h;
498 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
499 while (h->root.type == bfd_link_hash_indirect
500 || h->root.type == bfd_link_hash_warning)
501 h = (struct elf_link_hash_entry *) h->root.u.i.link;
511 asection *symsec = NULL;
512 if (h->root.type == bfd_link_hash_defined
513 || h->root.type == bfd_link_hash_defweak)
514 symsec = h->root.u.def.section;
520 Elf_Internal_Sym *sym;
521 Elf_Internal_Sym *locsyms = *locsymsp;
525 locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
527 locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
529 0, NULL, NULL, NULL);
534 sym = locsyms + r_symndx;
543 *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
549 /* Create the note section if not already present. This is done early so
550 that the linker maps the sections to the right place in the output. */
553 spu_elf_create_sections (struct bfd_link_info *info)
557 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
558 if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
563 /* Make SPU_PTNOTE_SPUNAME section. */
570 ibfd = info->input_bfds;
571 flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
572 s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
574 || !bfd_set_section_alignment (ibfd, s, 4))
577 name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
578 size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
579 size += (name_len + 3) & -4;
581 if (!bfd_set_section_size (ibfd, s, size))
584 data = bfd_zalloc (ibfd, size);
588 bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
589 bfd_put_32 (ibfd, name_len, data + 4);
590 bfd_put_32 (ibfd, 1, data + 8);
591 memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
592 memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
593 bfd_get_filename (info->output_bfd), name_len);
600 /* qsort predicate to sort sections by vma. */
603 sort_sections (const void *a, const void *b)
605 const asection *const *s1 = a;
606 const asection *const *s2 = b;
607 bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
610 return delta < 0 ? -1 : 1;
612 return (*s1)->index - (*s2)->index;
615 /* Identify overlays in the output bfd, and number them.
616 Returns 0 on error, 1 if no overlays, 2 if overlays. */
619 spu_elf_find_overlays (struct bfd_link_info *info)
621 struct spu_link_hash_table *htab = spu_hash_table (info);
622 asection **alloc_sec;
623 unsigned int i, n, ovl_index, num_buf;
626 static const char *const entry_names[2][2] = {
627 { "__ovly_load", "__icache_br_handler" },
628 { "__ovly_return", "__icache_call_handler" }
631 if (info->output_bfd->section_count < 2)
635 = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
636 if (alloc_sec == NULL)
639 /* Pick out all the alloced sections. */
640 for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
641 if ((s->flags & SEC_ALLOC) != 0
642 && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
652 /* Sort them by vma. */
653 qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
655 ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
656 if (htab->params->ovly_flavour == ovly_soft_icache)
658 /* Look for an overlapping vma to find the first overlay section. */
659 bfd_vma vma_start = 0;
660 bfd_vma lma_start = 0;
662 for (i = 1; i < n; i++)
665 if (s->vma < ovl_end)
667 asection *s0 = alloc_sec[i - 1];
669 if (strncmp (s0->name, ".ovl.init", 9) != 0)
675 << (htab->num_lines_log2 + htab->line_size_log2)));
680 ovl_end = s->vma + s->size;
683 /* Now find any sections within the cache area. */
684 for (ovl_index = 0, num_buf = 0; i < n; i++)
687 if (s->vma >= ovl_end)
690 /* A section in an overlay area called .ovl.init is not
691 an overlay, in the sense that it might be loaded in
692 by the overlay manager, but rather the initial
693 section contents for the overlay buffer. */
694 if (strncmp (s->name, ".ovl.init", 9) != 0)
696 num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
697 if (((s->vma - vma_start) & (htab->params->line_size - 1))
698 || ((s->lma - lma_start) & (htab->params->line_size - 1)))
700 info->callbacks->einfo (_("%X%P: overlay section %A "
701 "does not start on a cache line.\n"),
703 bfd_set_error (bfd_error_bad_value);
706 else if (s->size > htab->params->line_size)
708 info->callbacks->einfo (_("%X%P: overlay section %A "
709 "is larger than a cache line.\n"),
711 bfd_set_error (bfd_error_bad_value);
715 alloc_sec[ovl_index++] = s;
716 spu_elf_section_data (s)->u.o.ovl_index
717 = ((s->lma - lma_start) >> htab->line_size_log2) + 1;
718 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
722 /* Ensure there are no more overlay sections. */
726 if (s->vma < ovl_end)
728 info->callbacks->einfo (_("%X%P: overlay section %A "
729 "is not in cache area.\n"),
731 bfd_set_error (bfd_error_bad_value);
735 ovl_end = s->vma + s->size;
740 /* Look for overlapping vmas. Any with overlap must be overlays.
741 Count them. Also count the number of overlay regions. */
742 for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
745 if (s->vma < ovl_end)
747 asection *s0 = alloc_sec[i - 1];
749 if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
752 if (strncmp (s0->name, ".ovl.init", 9) != 0)
754 alloc_sec[ovl_index] = s0;
755 spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
756 spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
759 ovl_end = s->vma + s->size;
761 if (strncmp (s->name, ".ovl.init", 9) != 0)
763 alloc_sec[ovl_index] = s;
764 spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
765 spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
766 if (s0->vma != s->vma)
768 info->callbacks->einfo (_("%X%P: overlay sections %A "
769 "and %A do not start at the "
772 bfd_set_error (bfd_error_bad_value);
775 if (ovl_end < s->vma + s->size)
776 ovl_end = s->vma + s->size;
780 ovl_end = s->vma + s->size;
784 htab->num_overlays = ovl_index;
785 htab->num_buf = num_buf;
786 htab->ovl_sec = alloc_sec;
791 for (i = 0; i < 2; i++)
794 struct elf_link_hash_entry *h;
796 name = entry_names[i][htab->params->ovly_flavour];
797 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
801 if (h->root.type == bfd_link_hash_new)
803 h->root.type = bfd_link_hash_undefined;
805 h->ref_regular_nonweak = 1;
808 htab->ovly_entry[i] = h;
814 /* Non-zero to use bra in overlay stubs rather than br. */
817 #define BRA 0x30000000
818 #define BRASL 0x31000000
819 #define BR 0x32000000
820 #define BRSL 0x33000000
821 #define NOP 0x40200000
822 #define LNOP 0x00200000
823 #define ILA 0x42000000
825 /* Return true for all relative and absolute branch instructions.
833 brhnz 00100011 0.. */
836 is_branch (const unsigned char *insn)
838 return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
841 /* Return true for all indirect branch instructions.
849 bihnz 00100101 011 */
852 is_indirect_branch (const unsigned char *insn)
854 return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
857 /* Return true for branch hint instructions.
862 is_hint (const unsigned char *insn)
864 return (insn[0] & 0xfc) == 0x10;
867 /* True if INPUT_SECTION might need overlay stubs. */
870 maybe_needs_stubs (asection *input_section)
872 /* No stubs for debug sections and suchlike. */
873 if ((input_section->flags & SEC_ALLOC) == 0)
876 /* No stubs for link-once sections that will be discarded. */
877 if (input_section->output_section == bfd_abs_section_ptr)
880 /* Don't create stubs for .eh_frame references. */
881 if (strcmp (input_section->name, ".eh_frame") == 0)
903 /* Return non-zero if this reloc symbol should go via an overlay stub.
904 Return 2 if the stub must be in non-overlay area. */
906 static enum _stub_type
907 needs_ovl_stub (struct elf_link_hash_entry *h,
908 Elf_Internal_Sym *sym,
910 asection *input_section,
911 Elf_Internal_Rela *irela,
913 struct bfd_link_info *info)
915 struct spu_link_hash_table *htab = spu_hash_table (info);
916 enum elf_spu_reloc_type r_type;
917 unsigned int sym_type;
918 bfd_boolean branch, hint, call;
919 enum _stub_type ret = no_stub;
923 || sym_sec->output_section == bfd_abs_section_ptr
924 || spu_elf_section_data (sym_sec->output_section) == NULL)
929 /* Ensure no stubs for user supplied overlay manager syms. */
930 if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
933 /* setjmp always goes via an overlay stub, because then the return
934 and hence the longjmp goes via __ovly_return. That magically
935 makes setjmp/longjmp between overlays work. */
936 if (strncmp (h->root.root.string, "setjmp", 6) == 0
937 && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
944 sym_type = ELF_ST_TYPE (sym->st_info);
946 r_type = ELF32_R_TYPE (irela->r_info);
950 if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
952 if (contents == NULL)
955 if (!bfd_get_section_contents (input_section->owner,
962 contents += irela->r_offset;
964 branch = is_branch (contents);
965 hint = is_hint (contents);
968 call = (contents[0] & 0xfd) == 0x31;
970 && sym_type != STT_FUNC
973 /* It's common for people to write assembly and forget
974 to give function symbols the right type. Handle
975 calls to such symbols, but warn so that (hopefully)
976 people will fix their code. We need the symbol
977 type to be correct to distinguish function pointer
978 initialisation from other pointer initialisations. */
979 const char *sym_name;
982 sym_name = h->root.root.string;
985 Elf_Internal_Shdr *symtab_hdr;
986 symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
987 sym_name = bfd_elf_sym_name (input_section->owner,
992 (*_bfd_error_handler) (_("warning: call to non-function"
993 " symbol %s defined in %B"),
994 sym_sec->owner, sym_name);
1000 if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1001 || (sym_type != STT_FUNC
1002 && !(branch || hint)
1003 && (sym_sec->flags & SEC_CODE) == 0))
1006 /* Usually, symbols in non-overlay sections don't need stubs. */
1007 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1008 && !htab->params->non_overlay_stubs)
1011 /* A reference from some other section to a symbol in an overlay
1012 section needs a stub. */
1013 if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1014 != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1016 if (call || sym_type == STT_FUNC)
1017 ret = call_ovl_stub;
1020 ret = br000_ovl_stub;
1024 unsigned int lrlive = (contents[1] & 0x70) >> 4;
1030 /* If this insn isn't a branch then we are possibly taking the
1031 address of a function and passing it out somehow. Soft-icache code
1032 always generates inline code to do indirect branches. */
1033 if (!(branch || hint)
1034 && sym_type == STT_FUNC
1035 && htab->params->ovly_flavour != ovly_soft_icache)
1042 count_stub (struct spu_link_hash_table *htab,
1045 enum _stub_type stub_type,
1046 struct elf_link_hash_entry *h,
1047 const Elf_Internal_Rela *irela)
1049 unsigned int ovl = 0;
1050 struct got_entry *g, **head;
1053 /* If this instruction is a branch or call, we need a stub
1054 for it. One stub per function per overlay.
1055 If it isn't a branch, then we are taking the address of
1056 this function so need a stub in the non-overlay area
1057 for it. One stub per function. */
1058 if (stub_type != nonovl_stub)
1059 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1062 head = &h->got.glist;
1065 if (elf_local_got_ents (ibfd) == NULL)
1067 bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1068 * sizeof (*elf_local_got_ents (ibfd)));
1069 elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1070 if (elf_local_got_ents (ibfd) == NULL)
1073 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1076 if (htab->params->ovly_flavour == ovly_soft_icache)
1078 htab->stub_count[ovl] += 1;
1084 addend = irela->r_addend;
1088 struct got_entry *gnext;
1090 for (g = *head; g != NULL; g = g->next)
1091 if (g->addend == addend && g->ovl == 0)
1096 /* Need a new non-overlay area stub. Zap other stubs. */
1097 for (g = *head; g != NULL; g = gnext)
1100 if (g->addend == addend)
1102 htab->stub_count[g->ovl] -= 1;
1110 for (g = *head; g != NULL; g = g->next)
1111 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1117 g = bfd_malloc (sizeof *g);
1122 g->stub_addr = (bfd_vma) -1;
1126 htab->stub_count[ovl] += 1;
1132 /* Support two sizes of overlay stubs, a slower more compact stub of two
1133 intructions, and a faster stub of four instructions.
1134 Soft-icache stubs are four or eight words. */
1137 ovl_stub_size (struct spu_elf_params *params)
1139 return 16 << params->ovly_flavour >> params->compact_stub;
1143 ovl_stub_size_log2 (struct spu_elf_params *params)
1145 return 4 + params->ovly_flavour - params->compact_stub;
1148 /* Two instruction overlay stubs look like:
1150 brsl $75,__ovly_load
1151 .word target_ovl_and_address
1153 ovl_and_address is a word with the overlay number in the top 14 bits
1154 and local store address in the bottom 18 bits.
1156 Four instruction overlay stubs look like:
1160 ila $79,target_address
1163 Software icache stubs are:
1167 .word lrlive_branchlocalstoreaddr;
1168 brasl $75,__icache_br_handler
1173 build_stub (struct bfd_link_info *info,
1176 enum _stub_type stub_type,
1177 struct elf_link_hash_entry *h,
1178 const Elf_Internal_Rela *irela,
1182 struct spu_link_hash_table *htab = spu_hash_table (info);
1183 unsigned int ovl, dest_ovl, set_id;
1184 struct got_entry *g, **head;
1186 bfd_vma addend, from, to, br_dest, patt;
1187 unsigned int lrlive;
1190 if (stub_type != nonovl_stub)
1191 ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1194 head = &h->got.glist;
1196 head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1200 addend = irela->r_addend;
1202 if (htab->params->ovly_flavour == ovly_soft_icache)
1204 g = bfd_malloc (sizeof *g);
1210 g->br_addr = (irela->r_offset
1211 + isec->output_offset
1212 + isec->output_section->vma);
1218 for (g = *head; g != NULL; g = g->next)
1219 if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1224 if (g->ovl == 0 && ovl != 0)
1227 if (g->stub_addr != (bfd_vma) -1)
1231 sec = htab->stub_sec[ovl];
1232 dest += dest_sec->output_offset + dest_sec->output_section->vma;
1233 from = sec->size + sec->output_offset + sec->output_section->vma;
1234 g->stub_addr = from;
1235 to = (htab->ovly_entry[0]->root.u.def.value
1236 + htab->ovly_entry[0]->root.u.def.section->output_offset
1237 + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1239 if (((dest | to | from) & 3) != 0)
1244 dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1246 if (htab->params->ovly_flavour == ovly_normal
1247 && !htab->params->compact_stub)
1249 bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1250 sec->contents + sec->size);
1251 bfd_put_32 (sec->owner, LNOP,
1252 sec->contents + sec->size + 4);
1253 bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1254 sec->contents + sec->size + 8);
1256 bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1257 sec->contents + sec->size + 12);
1259 bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1260 sec->contents + sec->size + 12);
1262 else if (htab->params->ovly_flavour == ovly_normal
1263 && htab->params->compact_stub)
1266 bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1267 sec->contents + sec->size);
1269 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1270 sec->contents + sec->size);
1271 bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1272 sec->contents + sec->size + 4);
1274 else if (htab->params->ovly_flavour == ovly_soft_icache
1275 && htab->params->compact_stub)
1278 if (stub_type == nonovl_stub)
1280 else if (stub_type == call_ovl_stub)
1281 /* A brsl makes lr live and *(*sp+16) is live.
1282 Tail calls have the same liveness. */
1284 else if (!htab->params->lrlive_analysis)
1285 /* Assume stack frame and lr save. */
1287 else if (irela != NULL)
1289 /* Analyse branch instructions. */
1290 struct function_info *caller;
1293 caller = find_function (isec, irela->r_offset, info);
1294 if (caller->start == NULL)
1295 off = irela->r_offset;
1298 struct function_info *found = NULL;
1300 /* Find the earliest piece of this function that
1301 has frame adjusting instructions. We might
1302 see dynamic frame adjustment (eg. for alloca)
1303 in some later piece, but functions using
1304 alloca always set up a frame earlier. Frame
1305 setup instructions are always in one piece. */
1306 if (caller->lr_store != (bfd_vma) -1
1307 || caller->sp_adjust != (bfd_vma) -1)
1309 while (caller->start != NULL)
1311 caller = caller->start;
1312 if (caller->lr_store != (bfd_vma) -1
1313 || caller->sp_adjust != (bfd_vma) -1)
1321 if (off > caller->sp_adjust)
1323 if (off > caller->lr_store)
1324 /* Only *(*sp+16) is live. */
1327 /* If no lr save, then we must be in a
1328 leaf function with a frame.
1329 lr is still live. */
1332 else if (off > caller->lr_store)
1334 /* Between lr save and stack adjust. */
1336 /* This should never happen since prologues won't
1341 /* On entry to function. */
1344 if (stub_type != br000_ovl_stub
1345 && lrlive != stub_type - br000_ovl_stub)
1346 info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1347 "from analysis (%u)\n"),
1348 isec, irela->r_offset, lrlive,
1349 stub_type - br000_ovl_stub);
1352 /* If given lrlive info via .brinfo, use it. */
1353 if (stub_type > br000_ovl_stub)
1354 lrlive = stub_type - br000_ovl_stub;
1357 to = (htab->ovly_entry[1]->root.u.def.value
1358 + htab->ovly_entry[1]->root.u.def.section->output_offset
1359 + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1361 /* The branch that uses this stub goes to stub_addr + 4. We'll
1362 set up an xor pattern that can be used by the icache manager
1363 to modify this branch to go directly to its destination. */
1365 br_dest = g->stub_addr;
1368 /* Except in the case of _SPUEAR_ stubs, the branch in
1369 question is the one in the stub itself. */
1370 BFD_ASSERT (stub_type == nonovl_stub);
1371 g->br_addr = g->stub_addr;
1375 set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1376 bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1377 sec->contents + sec->size);
1378 bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1379 sec->contents + sec->size + 4);
1380 bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1381 sec->contents + sec->size + 8);
1382 patt = dest ^ br_dest;
1383 if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1384 patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1385 bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1386 sec->contents + sec->size + 12);
1389 /* Extra space for linked list entries. */
1395 sec->size += ovl_stub_size (htab->params);
1397 if (htab->params->emit_stub_syms)
1403 len = 8 + sizeof (".ovl_call.") - 1;
1405 len += strlen (h->root.root.string);
1410 add = (int) irela->r_addend & 0xffffffff;
1413 name = bfd_malloc (len);
1417 sprintf (name, "%08x.ovl_call.", g->ovl);
1419 strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1421 sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1422 dest_sec->id & 0xffffffff,
1423 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1425 sprintf (name + len - 9, "+%x", add);
1427 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1431 if (h->root.type == bfd_link_hash_new)
1433 h->root.type = bfd_link_hash_defined;
1434 h->root.u.def.section = sec;
1435 h->size = ovl_stub_size (htab->params);
1436 h->root.u.def.value = sec->size - h->size;
1440 h->ref_regular_nonweak = 1;
1441 h->forced_local = 1;
1449 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1453 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1455 /* Symbols starting with _SPUEAR_ need a stub because they may be
1456 invoked by the PPU. */
1457 struct bfd_link_info *info = inf;
1458 struct spu_link_hash_table *htab = spu_hash_table (info);
1461 if ((h->root.type == bfd_link_hash_defined
1462 || h->root.type == bfd_link_hash_defweak)
1464 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1465 && (sym_sec = h->root.u.def.section) != NULL
1466 && sym_sec->output_section != bfd_abs_section_ptr
1467 && spu_elf_section_data (sym_sec->output_section) != NULL
1468 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1469 || htab->params->non_overlay_stubs))
1471 return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1478 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1480 /* Symbols starting with _SPUEAR_ need a stub because they may be
1481 invoked by the PPU. */
1482 struct bfd_link_info *info = inf;
1483 struct spu_link_hash_table *htab = spu_hash_table (info);
1486 if ((h->root.type == bfd_link_hash_defined
1487 || h->root.type == bfd_link_hash_defweak)
1489 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1490 && (sym_sec = h->root.u.def.section) != NULL
1491 && sym_sec->output_section != bfd_abs_section_ptr
1492 && spu_elf_section_data (sym_sec->output_section) != NULL
1493 && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1494 || htab->params->non_overlay_stubs))
1496 return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1497 h->root.u.def.value, sym_sec);
1503 /* Size or build stubs. */
1506 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1508 struct spu_link_hash_table *htab = spu_hash_table (info);
1511 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1513 extern const bfd_target bfd_elf32_spu_vec;
1514 Elf_Internal_Shdr *symtab_hdr;
1516 Elf_Internal_Sym *local_syms = NULL;
1518 if (ibfd->xvec != &bfd_elf32_spu_vec)
1521 /* We'll need the symbol table in a second. */
1522 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1523 if (symtab_hdr->sh_info == 0)
1526 /* Walk over each section attached to the input bfd. */
1527 for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1529 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1531 /* If there aren't any relocs, then there's nothing more to do. */
1532 if ((isec->flags & SEC_RELOC) == 0
1533 || isec->reloc_count == 0)
1536 if (!maybe_needs_stubs (isec))
1539 /* Get the relocs. */
1540 internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1542 if (internal_relocs == NULL)
1543 goto error_ret_free_local;
1545 /* Now examine each relocation. */
1546 irela = internal_relocs;
1547 irelaend = irela + isec->reloc_count;
1548 for (; irela < irelaend; irela++)
1550 enum elf_spu_reloc_type r_type;
1551 unsigned int r_indx;
1553 Elf_Internal_Sym *sym;
1554 struct elf_link_hash_entry *h;
1555 enum _stub_type stub_type;
1557 r_type = ELF32_R_TYPE (irela->r_info);
1558 r_indx = ELF32_R_SYM (irela->r_info);
1560 if (r_type >= R_SPU_max)
1562 bfd_set_error (bfd_error_bad_value);
1563 error_ret_free_internal:
1564 if (elf_section_data (isec)->relocs != internal_relocs)
1565 free (internal_relocs);
1566 error_ret_free_local:
1567 if (local_syms != NULL
1568 && (symtab_hdr->contents
1569 != (unsigned char *) local_syms))
1574 /* Determine the reloc target section. */
1575 if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1576 goto error_ret_free_internal;
1578 stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1580 if (stub_type == no_stub)
1582 else if (stub_type == stub_error)
1583 goto error_ret_free_internal;
1585 if (htab->stub_count == NULL)
1588 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1589 htab->stub_count = bfd_zmalloc (amt);
1590 if (htab->stub_count == NULL)
1591 goto error_ret_free_internal;
1596 if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1597 goto error_ret_free_internal;
1604 dest = h->root.u.def.value;
1606 dest = sym->st_value;
1607 dest += irela->r_addend;
1608 if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1610 goto error_ret_free_internal;
1614 /* We're done with the internal relocs, free them. */
1615 if (elf_section_data (isec)->relocs != internal_relocs)
1616 free (internal_relocs);
1619 if (local_syms != NULL
1620 && symtab_hdr->contents != (unsigned char *) local_syms)
1622 if (!info->keep_memory)
1625 symtab_hdr->contents = (unsigned char *) local_syms;
1632 /* Allocate space for overlay call and return stubs.
1633 Return 0 on error, 1 if no stubs, 2 otherwise. */
1636 spu_elf_size_stubs (struct bfd_link_info *info)
1638 struct spu_link_hash_table *htab;
1645 if (!process_stubs (info, FALSE))
1648 htab = spu_hash_table (info);
1649 elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1653 if (htab->stub_count == NULL)
1656 ibfd = info->input_bfds;
1657 amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1658 htab->stub_sec = bfd_zmalloc (amt);
1659 if (htab->stub_sec == NULL)
1662 flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1663 | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1664 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1665 htab->stub_sec[0] = stub;
1667 || !bfd_set_section_alignment (ibfd, stub,
1668 ovl_stub_size_log2 (htab->params)))
1670 stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1671 if (htab->params->ovly_flavour == ovly_soft_icache)
1672 /* Extra space for linked list entries. */
1673 stub->size += htab->stub_count[0] * 16;
1675 for (i = 0; i < htab->num_overlays; ++i)
1677 asection *osec = htab->ovl_sec[i];
1678 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1679 stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1680 htab->stub_sec[ovl] = stub;
1682 || !bfd_set_section_alignment (ibfd, stub,
1683 ovl_stub_size_log2 (htab->params)))
1685 stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1688 if (htab->params->ovly_flavour == ovly_soft_icache)
1690 /* Space for icache manager tables.
1691 a) Tag array, one quadword per cache line.
1692 b) Rewrite "to" list, one quadword per cache line.
1693 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1694 a power-of-two number of full quadwords) per cache line. */
1697 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1698 if (htab->ovtab == NULL
1699 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1702 htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1703 << htab->num_lines_log2;
1705 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1706 htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1707 if (htab->init == NULL
1708 || !bfd_set_section_alignment (ibfd, htab->init, 4))
1711 htab->init->size = 16;
1715 /* htab->ovtab consists of two arrays.
1725 . } _ovly_buf_table[];
1728 flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1729 htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1730 if (htab->ovtab == NULL
1731 || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1734 htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1737 htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1738 if (htab->toe == NULL
1739 || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1741 htab->toe->size = 16;
1746 /* Called from ld to place overlay manager data sections. This is done
1747 after the overlay manager itself is loaded, mainly so that the
1748 linker's htab->init section is placed after any other .ovl.init
1752 spu_elf_place_overlay_data (struct bfd_link_info *info)
1754 struct spu_link_hash_table *htab = spu_hash_table (info);
1758 if (htab->stub_count == NULL)
1761 (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1763 for (i = 0; i < htab->num_overlays; ++i)
1765 asection *osec = htab->ovl_sec[i];
1766 unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1767 (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1770 if (htab->params->ovly_flavour == ovly_soft_icache)
1771 (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1774 if (htab->params->ovly_flavour == ovly_soft_icache)
1776 (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1778 (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1781 /* Functions to handle embedded spu_ovl.o object. */
1784 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1790 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1796 struct _ovl_stream *os;
1800 os = (struct _ovl_stream *) stream;
1801 max = (const char *) os->end - (const char *) os->start;
1803 if ((ufile_ptr) offset >= max)
1807 if (count > max - offset)
1808 count = max - offset;
1810 memcpy (buf, (const char *) os->start + offset, count);
1815 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1817 *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1824 return *ovl_bfd != NULL;
1828 overlay_index (asection *sec)
1831 || sec->output_section == bfd_abs_section_ptr)
1833 return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1836 /* Define an STT_OBJECT symbol. */
1838 static struct elf_link_hash_entry *
1839 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1841 struct elf_link_hash_entry *h;
1843 h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1847 if (h->root.type != bfd_link_hash_defined
1850 h->root.type = bfd_link_hash_defined;
1851 h->root.u.def.section = htab->ovtab;
1852 h->type = STT_OBJECT;
1855 h->ref_regular_nonweak = 1;
1858 else if (h->root.u.def.section->owner != NULL)
1860 (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1861 h->root.u.def.section->owner,
1862 h->root.root.string);
1863 bfd_set_error (bfd_error_bad_value);
1868 (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1869 h->root.root.string);
1870 bfd_set_error (bfd_error_bad_value);
1877 /* Fill in all stubs and the overlay tables. */
1880 spu_elf_build_stubs (struct bfd_link_info *info)
1882 struct spu_link_hash_table *htab = spu_hash_table (info);
1883 struct elf_link_hash_entry *h;
1889 if (htab->stub_count == NULL)
1892 for (i = 0; i <= htab->num_overlays; i++)
1893 if (htab->stub_sec[i]->size != 0)
1895 htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1896 htab->stub_sec[i]->size);
1897 if (htab->stub_sec[i]->contents == NULL)
1899 htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1900 htab->stub_sec[i]->size = 0;
1903 for (i = 0; i < 2; i++)
1905 h = htab->ovly_entry[i];
1906 BFD_ASSERT (h != NULL);
1908 if ((h->root.type == bfd_link_hash_defined
1909 || h->root.type == bfd_link_hash_defweak)
1912 s = h->root.u.def.section->output_section;
1913 if (spu_elf_section_data (s)->u.o.ovl_index)
1915 (*_bfd_error_handler) (_("%s in overlay section"),
1916 h->root.root.string);
1917 bfd_set_error (bfd_error_bad_value);
1925 /* Fill in all the stubs. */
1926 process_stubs (info, TRUE);
1927 if (!htab->stub_err)
1928 elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1932 (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1933 bfd_set_error (bfd_error_bad_value);
1937 for (i = 0; i <= htab->num_overlays; i++)
1939 if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1941 (*_bfd_error_handler) (_("stubs don't match calculated size"));
1942 bfd_set_error (bfd_error_bad_value);
1945 htab->stub_sec[i]->rawsize = 0;
1948 if (htab->ovtab == NULL || htab->ovtab->size == 0)
1951 htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1952 if (htab->ovtab->contents == NULL)
1955 p = htab->ovtab->contents;
1956 if (htab->params->ovly_flavour == ovly_soft_icache)
1960 h = define_ovtab_symbol (htab, "__icache_tag_array");
1963 h->root.u.def.value = 0;
1964 h->size = 16 << htab->num_lines_log2;
1967 h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1970 h->root.u.def.value = 16 << htab->num_lines_log2;
1971 h->root.u.def.section = bfd_abs_section_ptr;
1973 h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1976 h->root.u.def.value = off;
1977 h->size = 16 << htab->num_lines_log2;
1980 h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1983 h->root.u.def.value = 16 << htab->num_lines_log2;
1984 h->root.u.def.section = bfd_abs_section_ptr;
1986 h = define_ovtab_symbol (htab, "__icache_rewrite_from");
1989 h->root.u.def.value = off;
1990 h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
1993 h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
1996 h->root.u.def.value = 16 << (htab->fromelem_size_log2
1997 + htab->num_lines_log2);
1998 h->root.u.def.section = bfd_abs_section_ptr;
2000 h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2003 h->root.u.def.value = htab->fromelem_size_log2;
2004 h->root.u.def.section = bfd_abs_section_ptr;
2006 h = define_ovtab_symbol (htab, "__icache_base");
2009 h->root.u.def.value = htab->ovl_sec[0]->vma;
2010 h->root.u.def.section = bfd_abs_section_ptr;
2011 h->size = htab->num_buf << htab->line_size_log2;
2013 h = define_ovtab_symbol (htab, "__icache_linesize");
2016 h->root.u.def.value = 1 << htab->line_size_log2;
2017 h->root.u.def.section = bfd_abs_section_ptr;
2019 h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2022 h->root.u.def.value = htab->line_size_log2;
2023 h->root.u.def.section = bfd_abs_section_ptr;
2025 h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2028 h->root.u.def.value = -htab->line_size_log2;
2029 h->root.u.def.section = bfd_abs_section_ptr;
2031 h = define_ovtab_symbol (htab, "__icache_cachesize");
2034 h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2035 h->root.u.def.section = bfd_abs_section_ptr;
2037 h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2040 h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2041 h->root.u.def.section = bfd_abs_section_ptr;
2043 h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2046 h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2047 h->root.u.def.section = bfd_abs_section_ptr;
2049 if (htab->init != NULL && htab->init->size != 0)
2051 htab->init->contents = bfd_zalloc (htab->init->owner,
2053 if (htab->init->contents == NULL)
2056 h = define_ovtab_symbol (htab, "__icache_fileoff");
2059 h->root.u.def.value = 0;
2060 h->root.u.def.section = htab->init;
2066 /* Write out _ovly_table. */
2067 /* set low bit of .size to mark non-overlay area as present. */
2069 obfd = htab->ovtab->output_section->owner;
2070 for (s = obfd->sections; s != NULL; s = s->next)
2072 unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2076 unsigned long off = ovl_index * 16;
2077 unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2079 bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2080 bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2082 /* file_off written later in spu_elf_modify_program_headers. */
2083 bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2087 h = define_ovtab_symbol (htab, "_ovly_table");
2090 h->root.u.def.value = 16;
2091 h->size = htab->num_overlays * 16;
2093 h = define_ovtab_symbol (htab, "_ovly_table_end");
2096 h->root.u.def.value = htab->num_overlays * 16 + 16;
2099 h = define_ovtab_symbol (htab, "_ovly_buf_table");
2102 h->root.u.def.value = htab->num_overlays * 16 + 16;
2103 h->size = htab->num_buf * 4;
2105 h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2108 h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2112 h = define_ovtab_symbol (htab, "_EAR_");
2115 h->root.u.def.section = htab->toe;
2116 h->root.u.def.value = 0;
2122 /* Check that all loadable section VMAs lie in the range
2123 LO .. HI inclusive, and stash some parameters for --auto-overlay. */
2126 spu_elf_check_vma (struct bfd_link_info *info)
2128 struct elf_segment_map *m;
2130 struct spu_link_hash_table *htab = spu_hash_table (info);
2131 bfd *abfd = info->output_bfd;
2132 bfd_vma hi = htab->params->local_store_hi;
2133 bfd_vma lo = htab->params->local_store_lo;
2135 htab->local_store = hi + 1 - lo;
2137 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2138 if (m->p_type == PT_LOAD)
2139 for (i = 0; i < m->count; i++)
2140 if (m->sections[i]->size != 0
2141 && (m->sections[i]->vma < lo
2142 || m->sections[i]->vma > hi
2143 || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2144 return m->sections[i];
2149 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2150 Search for stack adjusting insns, and return the sp delta.
2151 If a store of lr is found save the instruction offset to *LR_STORE.
2152 If a stack adjusting instruction is found, save that offset to
2156 find_function_stack_adjust (asection *sec,
2163 memset (reg, 0, sizeof (reg));
2164 for ( ; offset + 4 <= sec->size; offset += 4)
2166 unsigned char buf[4];
2170 /* Assume no relocs on stack adjusing insns. */
2171 if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2175 ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2177 if (buf[0] == 0x24 /* stqd */)
2179 if (rt == 0 /* lr */ && ra == 1 /* sp */)
2184 /* Partly decoded immediate field. */
2185 imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2187 if (buf[0] == 0x1c /* ai */)
2190 imm = (imm ^ 0x200) - 0x200;
2191 reg[rt] = reg[ra] + imm;
2193 if (rt == 1 /* sp */)
2197 *sp_adjust = offset;
2201 else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2203 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2205 reg[rt] = reg[ra] + reg[rb];
2210 *sp_adjust = offset;
2214 else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2216 int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2218 reg[rt] = reg[rb] - reg[ra];
2223 *sp_adjust = offset;
2227 else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2229 if (buf[0] >= 0x42 /* ila */)
2230 imm |= (buf[0] & 1) << 17;
2235 if (buf[0] == 0x40 /* il */)
2237 if ((buf[1] & 0x80) == 0)
2239 imm = (imm ^ 0x8000) - 0x8000;
2241 else if ((buf[1] & 0x80) == 0 /* ilhu */)
2247 else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2249 reg[rt] |= imm & 0xffff;
2252 else if (buf[0] == 0x04 /* ori */)
2255 imm = (imm ^ 0x200) - 0x200;
2256 reg[rt] = reg[ra] | imm;
2259 else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2261 reg[rt] = ( ((imm & 0x8000) ? 0xff000000 : 0)
2262 | ((imm & 0x4000) ? 0x00ff0000 : 0)
2263 | ((imm & 0x2000) ? 0x0000ff00 : 0)
2264 | ((imm & 0x1000) ? 0x000000ff : 0));
2267 else if (buf[0] == 0x16 /* andbi */)
2273 reg[rt] = reg[ra] & imm;
2276 else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2278 /* Used in pic reg load. Say rt is trashed. Won't be used
2279 in stack adjust, but we need to continue past this branch. */
2283 else if (is_branch (buf) || is_indirect_branch (buf))
2284 /* If we hit a branch then we must be out of the prologue. */
2291 /* qsort predicate to sort symbols by section and value. */
2293 static Elf_Internal_Sym *sort_syms_syms;
2294 static asection **sort_syms_psecs;
2297 sort_syms (const void *a, const void *b)
2299 Elf_Internal_Sym *const *s1 = a;
2300 Elf_Internal_Sym *const *s2 = b;
2301 asection *sec1,*sec2;
2302 bfd_signed_vma delta;
2304 sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2305 sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2308 return sec1->index - sec2->index;
2310 delta = (*s1)->st_value - (*s2)->st_value;
2312 return delta < 0 ? -1 : 1;
2314 delta = (*s2)->st_size - (*s1)->st_size;
2316 return delta < 0 ? -1 : 1;
2318 return *s1 < *s2 ? -1 : 1;
2321 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2322 entries for section SEC. */
2324 static struct spu_elf_stack_info *
2325 alloc_stack_info (asection *sec, int max_fun)
2327 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2330 amt = sizeof (struct spu_elf_stack_info);
2331 amt += (max_fun - 1) * sizeof (struct function_info);
2332 sec_data->u.i.stack_info = bfd_zmalloc (amt);
2333 if (sec_data->u.i.stack_info != NULL)
2334 sec_data->u.i.stack_info->max_fun = max_fun;
2335 return sec_data->u.i.stack_info;
2338 /* Add a new struct function_info describing a (part of a) function
2339 starting at SYM_H. Keep the array sorted by address. */
2341 static struct function_info *
2342 maybe_insert_function (asection *sec,
2345 bfd_boolean is_func)
2347 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2348 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2354 sinfo = alloc_stack_info (sec, 20);
2361 Elf_Internal_Sym *sym = sym_h;
2362 off = sym->st_value;
2363 size = sym->st_size;
2367 struct elf_link_hash_entry *h = sym_h;
2368 off = h->root.u.def.value;
2372 for (i = sinfo->num_fun; --i >= 0; )
2373 if (sinfo->fun[i].lo <= off)
2378 /* Don't add another entry for an alias, but do update some
2380 if (sinfo->fun[i].lo == off)
2382 /* Prefer globals over local syms. */
2383 if (global && !sinfo->fun[i].global)
2385 sinfo->fun[i].global = TRUE;
2386 sinfo->fun[i].u.h = sym_h;
2389 sinfo->fun[i].is_func = TRUE;
2390 return &sinfo->fun[i];
2392 /* Ignore a zero-size symbol inside an existing function. */
2393 else if (sinfo->fun[i].hi > off && size == 0)
2394 return &sinfo->fun[i];
2397 if (sinfo->num_fun >= sinfo->max_fun)
2399 bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2400 bfd_size_type old = amt;
2402 old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2403 sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2404 amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2405 sinfo = bfd_realloc (sinfo, amt);
2408 memset ((char *) sinfo + old, 0, amt - old);
2409 sec_data->u.i.stack_info = sinfo;
2412 if (++i < sinfo->num_fun)
2413 memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2414 (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2415 sinfo->fun[i].is_func = is_func;
2416 sinfo->fun[i].global = global;
2417 sinfo->fun[i].sec = sec;
2419 sinfo->fun[i].u.h = sym_h;
2421 sinfo->fun[i].u.sym = sym_h;
2422 sinfo->fun[i].lo = off;
2423 sinfo->fun[i].hi = off + size;
2424 sinfo->fun[i].lr_store = -1;
2425 sinfo->fun[i].sp_adjust = -1;
2426 sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2427 &sinfo->fun[i].lr_store,
2428 &sinfo->fun[i].sp_adjust);
2429 sinfo->num_fun += 1;
2430 return &sinfo->fun[i];
2433 /* Return the name of FUN. */
2436 func_name (struct function_info *fun)
2440 Elf_Internal_Shdr *symtab_hdr;
2442 while (fun->start != NULL)
2446 return fun->u.h->root.root.string;
2449 if (fun->u.sym->st_name == 0)
2451 size_t len = strlen (sec->name);
2452 char *name = bfd_malloc (len + 10);
2455 sprintf (name, "%s+%lx", sec->name,
2456 (unsigned long) fun->u.sym->st_value & 0xffffffff);
2460 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2461 return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2464 /* Read the instruction at OFF in SEC. Return true iff the instruction
2465 is a nop, lnop, or stop 0 (all zero insn). */
2468 is_nop (asection *sec, bfd_vma off)
2470 unsigned char insn[4];
2472 if (off + 4 > sec->size
2473 || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2475 if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2477 if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2482 /* Extend the range of FUN to cover nop padding up to LIMIT.
2483 Return TRUE iff some instruction other than a NOP was found. */
2486 insns_at_end (struct function_info *fun, bfd_vma limit)
2488 bfd_vma off = (fun->hi + 3) & -4;
2490 while (off < limit && is_nop (fun->sec, off))
2501 /* Check and fix overlapping function ranges. Return TRUE iff there
2502 are gaps in the current info we have about functions in SEC. */
2505 check_function_ranges (asection *sec, struct bfd_link_info *info)
2507 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2508 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2510 bfd_boolean gaps = FALSE;
2515 for (i = 1; i < sinfo->num_fun; i++)
2516 if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2518 /* Fix overlapping symbols. */
2519 const char *f1 = func_name (&sinfo->fun[i - 1]);
2520 const char *f2 = func_name (&sinfo->fun[i]);
2522 info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2523 sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2525 else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2528 if (sinfo->num_fun == 0)
2532 if (sinfo->fun[0].lo != 0)
2534 if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2536 const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2538 info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2539 sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2541 else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2547 /* Search current function info for a function that contains address
2548 OFFSET in section SEC. */
2550 static struct function_info *
2551 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2553 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2554 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2558 hi = sinfo->num_fun;
2561 mid = (lo + hi) / 2;
2562 if (offset < sinfo->fun[mid].lo)
2564 else if (offset >= sinfo->fun[mid].hi)
2567 return &sinfo->fun[mid];
2569 info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2571 bfd_set_error (bfd_error_bad_value);
2575 /* Add CALLEE to CALLER call list if not already present. Return TRUE
2576 if CALLEE was new. If this function return FALSE, CALLEE should
2580 insert_callee (struct function_info *caller, struct call_info *callee)
2582 struct call_info **pp, *p;
2584 for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2585 if (p->fun == callee->fun)
2587 /* Tail calls use less stack than normal calls. Retain entry
2588 for normal call over one for tail call. */
2589 p->is_tail &= callee->is_tail;
2592 p->fun->start = NULL;
2593 p->fun->is_func = TRUE;
2595 p->count += callee->count;
2596 /* Reorder list so most recent call is first. */
2598 p->next = caller->call_list;
2599 caller->call_list = p;
2602 callee->next = caller->call_list;
2603 caller->call_list = callee;
2607 /* Copy CALL and insert the copy into CALLER. */
2610 copy_callee (struct function_info *caller, const struct call_info *call)
2612 struct call_info *callee;
2613 callee = bfd_malloc (sizeof (*callee));
2617 if (!insert_callee (caller, callee))
2622 /* We're only interested in code sections. Testing SEC_IN_MEMORY excludes
2623 overlay stub sections. */
2626 interesting_section (asection *s)
2628 return (s->output_section != bfd_abs_section_ptr
2629 && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2630 == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2634 /* Rummage through the relocs for SEC, looking for function calls.
2635 If CALL_TREE is true, fill in call graph. If CALL_TREE is false,
2636 mark destination symbols on calls as being functions. Also
2637 look at branches, which may be tail calls or go to hot/cold
2638 section part of same function. */
2641 mark_functions_via_relocs (asection *sec,
2642 struct bfd_link_info *info,
2645 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2646 Elf_Internal_Shdr *symtab_hdr;
2648 unsigned int priority = 0;
2649 static bfd_boolean warned;
2651 if (!interesting_section (sec)
2652 || sec->reloc_count == 0)
2655 internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2657 if (internal_relocs == NULL)
2660 symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2661 psyms = &symtab_hdr->contents;
2662 irela = internal_relocs;
2663 irelaend = irela + sec->reloc_count;
2664 for (; irela < irelaend; irela++)
2666 enum elf_spu_reloc_type r_type;
2667 unsigned int r_indx;
2669 Elf_Internal_Sym *sym;
2670 struct elf_link_hash_entry *h;
2672 bfd_boolean reject, is_call;
2673 struct function_info *caller;
2674 struct call_info *callee;
2677 r_type = ELF32_R_TYPE (irela->r_info);
2678 if (r_type != R_SPU_REL16
2679 && r_type != R_SPU_ADDR16)
2682 if (!(call_tree && spu_hash_table (info)->params->auto_overlay))
2686 r_indx = ELF32_R_SYM (irela->r_info);
2687 if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2691 || sym_sec->output_section == bfd_abs_section_ptr)
2697 unsigned char insn[4];
2699 if (!bfd_get_section_contents (sec->owner, sec, insn,
2700 irela->r_offset, 4))
2702 if (is_branch (insn))
2704 is_call = (insn[0] & 0xfd) == 0x31;
2705 priority = insn[1] & 0x0f;
2707 priority |= insn[2];
2709 priority |= insn[3];
2711 if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2712 != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2715 info->callbacks->einfo
2716 (_("%B(%A+0x%v): call to non-code section"
2717 " %B(%A), analysis incomplete\n"),
2718 sec->owner, sec, irela->r_offset,
2719 sym_sec->owner, sym_sec);
2727 if (!(call_tree && spu_hash_table (info)->params->auto_overlay)
2735 /* For --auto-overlay, count possible stubs we need for
2736 function pointer references. */
2737 unsigned int sym_type;
2741 sym_type = ELF_ST_TYPE (sym->st_info);
2742 if (sym_type == STT_FUNC)
2743 spu_hash_table (info)->non_ovly_stub += 1;
2748 val = h->root.u.def.value;
2750 val = sym->st_value;
2751 val += irela->r_addend;
2755 struct function_info *fun;
2757 if (irela->r_addend != 0)
2759 Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2762 fake->st_value = val;
2764 = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2768 fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2770 fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2773 if (irela->r_addend != 0
2774 && fun->u.sym != sym)
2779 caller = find_function (sec, irela->r_offset, info);
2782 callee = bfd_malloc (sizeof *callee);
2786 callee->fun = find_function (sym_sec, val, info);
2787 if (callee->fun == NULL)
2789 callee->is_tail = !is_call;
2790 callee->is_pasted = FALSE;
2791 callee->priority = priority;
2793 if (callee->fun->last_caller != sec)
2795 callee->fun->last_caller = sec;
2796 callee->fun->call_count += 1;
2798 if (!insert_callee (caller, callee))
2801 && !callee->fun->is_func
2802 && callee->fun->stack == 0)
2804 /* This is either a tail call or a branch from one part of
2805 the function to another, ie. hot/cold section. If the
2806 destination has been called by some other function then
2807 it is a separate function. We also assume that functions
2808 are not split across input files. */
2809 if (sec->owner != sym_sec->owner)
2811 callee->fun->start = NULL;
2812 callee->fun->is_func = TRUE;
2814 else if (callee->fun->start == NULL)
2816 struct function_info *caller_start = caller;
2817 while (caller_start->start)
2818 caller_start = caller_start->start;
2820 if (caller_start != callee->fun)
2821 callee->fun->start = caller_start;
2825 struct function_info *callee_start;
2826 struct function_info *caller_start;
2827 callee_start = callee->fun;
2828 while (callee_start->start)
2829 callee_start = callee_start->start;
2830 caller_start = caller;
2831 while (caller_start->start)
2832 caller_start = caller_start->start;
2833 if (caller_start != callee_start)
2835 callee->fun->start = NULL;
2836 callee->fun->is_func = TRUE;
2845 /* Handle something like .init or .fini, which has a piece of a function.
2846 These sections are pasted together to form a single function. */
2849 pasted_function (asection *sec)
2851 struct bfd_link_order *l;
2852 struct _spu_elf_section_data *sec_data;
2853 struct spu_elf_stack_info *sinfo;
2854 Elf_Internal_Sym *fake;
2855 struct function_info *fun, *fun_start;
2857 fake = bfd_zmalloc (sizeof (*fake));
2861 fake->st_size = sec->size;
2863 = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2864 fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2868 /* Find a function immediately preceding this section. */
2870 for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2872 if (l->u.indirect.section == sec)
2874 if (fun_start != NULL)
2876 struct call_info *callee = bfd_malloc (sizeof *callee);
2880 fun->start = fun_start;
2882 callee->is_tail = TRUE;
2883 callee->is_pasted = TRUE;
2885 if (!insert_callee (fun_start, callee))
2891 if (l->type == bfd_indirect_link_order
2892 && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2893 && (sinfo = sec_data->u.i.stack_info) != NULL
2894 && sinfo->num_fun != 0)
2895 fun_start = &sinfo->fun[sinfo->num_fun - 1];
2898 /* Don't return an error if we did not find a function preceding this
2899 section. The section may have incorrect flags. */
2903 /* Map address ranges in code sections to functions. */
2906 discover_functions (struct bfd_link_info *info)
2910 Elf_Internal_Sym ***psym_arr;
2911 asection ***sec_arr;
2912 bfd_boolean gaps = FALSE;
2915 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2918 psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2919 if (psym_arr == NULL)
2921 sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2922 if (sec_arr == NULL)
2925 for (ibfd = info->input_bfds, bfd_idx = 0;
2927 ibfd = ibfd->link_next, bfd_idx++)
2929 extern const bfd_target bfd_elf32_spu_vec;
2930 Elf_Internal_Shdr *symtab_hdr;
2933 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2934 asection **psecs, **p;
2936 if (ibfd->xvec != &bfd_elf32_spu_vec)
2939 /* Read all the symbols. */
2940 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2941 symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2945 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2946 if (interesting_section (sec))
2954 if (symtab_hdr->contents != NULL)
2956 /* Don't use cached symbols since the generic ELF linker
2957 code only reads local symbols, and we need globals too. */
2958 free (symtab_hdr->contents);
2959 symtab_hdr->contents = NULL;
2961 syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2963 symtab_hdr->contents = (void *) syms;
2967 /* Select defined function symbols that are going to be output. */
2968 psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2971 psym_arr[bfd_idx] = psyms;
2972 psecs = bfd_malloc (symcount * sizeof (*psecs));
2975 sec_arr[bfd_idx] = psecs;
2976 for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2977 if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2978 || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2982 *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2983 if (s != NULL && interesting_section (s))
2986 symcount = psy - psyms;
2989 /* Sort them by section and offset within section. */
2990 sort_syms_syms = syms;
2991 sort_syms_psecs = psecs;
2992 qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2994 /* Now inspect the function symbols. */
2995 for (psy = psyms; psy < psyms + symcount; )
2997 asection *s = psecs[*psy - syms];
2998 Elf_Internal_Sym **psy2;
3000 for (psy2 = psy; ++psy2 < psyms + symcount; )
3001 if (psecs[*psy2 - syms] != s)
3004 if (!alloc_stack_info (s, psy2 - psy))
3009 /* First install info about properly typed and sized functions.
3010 In an ideal world this will cover all code sections, except
3011 when partitioning functions into hot and cold sections,
3012 and the horrible pasted together .init and .fini functions. */
3013 for (psy = psyms; psy < psyms + symcount; ++psy)
3016 if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3018 asection *s = psecs[sy - syms];
3019 if (!maybe_insert_function (s, sy, FALSE, TRUE))
3024 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3025 if (interesting_section (sec))
3026 gaps |= check_function_ranges (sec, info);
3031 /* See if we can discover more function symbols by looking at
3033 for (ibfd = info->input_bfds, bfd_idx = 0;
3035 ibfd = ibfd->link_next, bfd_idx++)
3039 if (psym_arr[bfd_idx] == NULL)
3042 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3043 if (!mark_functions_via_relocs (sec, info, FALSE))
3047 for (ibfd = info->input_bfds, bfd_idx = 0;
3049 ibfd = ibfd->link_next, bfd_idx++)
3051 Elf_Internal_Shdr *symtab_hdr;
3053 Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3056 if ((psyms = psym_arr[bfd_idx]) == NULL)
3059 psecs = sec_arr[bfd_idx];
3061 symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3062 syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3065 for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3066 if (interesting_section (sec))
3067 gaps |= check_function_ranges (sec, info);
3071 /* Finally, install all globals. */
3072 for (psy = psyms; (sy = *psy) != NULL; ++psy)
3076 s = psecs[sy - syms];
3078 /* Global syms might be improperly typed functions. */
3079 if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3080 && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3082 if (!maybe_insert_function (s, sy, FALSE, FALSE))
3088 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3090 extern const bfd_target bfd_elf32_spu_vec;
3093 if (ibfd->xvec != &bfd_elf32_spu_vec)
3096 /* Some of the symbols we've installed as marking the
3097 beginning of functions may have a size of zero. Extend
3098 the range of such functions to the beginning of the
3099 next symbol of interest. */
3100 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3101 if (interesting_section (sec))
3103 struct _spu_elf_section_data *sec_data;
3104 struct spu_elf_stack_info *sinfo;
3106 sec_data = spu_elf_section_data (sec);
3107 sinfo = sec_data->u.i.stack_info;
3108 if (sinfo != NULL && sinfo->num_fun != 0)
3111 bfd_vma hi = sec->size;
3113 for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3115 sinfo->fun[fun_idx].hi = hi;
3116 hi = sinfo->fun[fun_idx].lo;
3119 sinfo->fun[0].lo = 0;
3121 /* No symbols in this section. Must be .init or .fini
3122 or something similar. */
3123 else if (!pasted_function (sec))
3129 for (ibfd = info->input_bfds, bfd_idx = 0;
3131 ibfd = ibfd->link_next, bfd_idx++)
3133 if (psym_arr[bfd_idx] == NULL)
3136 free (psym_arr[bfd_idx]);
3137 free (sec_arr[bfd_idx]);
3146 /* Iterate over all function_info we have collected, calling DOIT on
3147 each node if ROOT_ONLY is false. Only call DOIT on root nodes
3151 for_each_node (bfd_boolean (*doit) (struct function_info *,
3152 struct bfd_link_info *,
3154 struct bfd_link_info *info,
3160 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3162 extern const bfd_target bfd_elf32_spu_vec;
3165 if (ibfd->xvec != &bfd_elf32_spu_vec)
3168 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3170 struct _spu_elf_section_data *sec_data;
3171 struct spu_elf_stack_info *sinfo;
3173 if ((sec_data = spu_elf_section_data (sec)) != NULL
3174 && (sinfo = sec_data->u.i.stack_info) != NULL)
3177 for (i = 0; i < sinfo->num_fun; ++i)
3178 if (!root_only || !sinfo->fun[i].non_root)
3179 if (!doit (&sinfo->fun[i], info, param))
3187 /* Transfer call info attached to struct function_info entries for
3188 all of a given function's sections to the first entry. */
3191 transfer_calls (struct function_info *fun,
3192 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3193 void *param ATTRIBUTE_UNUSED)
3195 struct function_info *start = fun->start;
3199 struct call_info *call, *call_next;
3201 while (start->start != NULL)
3202 start = start->start;
3203 for (call = fun->call_list; call != NULL; call = call_next)
3205 call_next = call->next;
3206 if (!insert_callee (start, call))
3209 fun->call_list = NULL;
3214 /* Mark nodes in the call graph that are called by some other node. */
3217 mark_non_root (struct function_info *fun,
3218 struct bfd_link_info *info ATTRIBUTE_UNUSED,
3219 void *param ATTRIBUTE_UNUSED)
3221 struct call_info *call;
3226 for (call = fun->call_list; call; call = call->next)
3228 call->fun->non_root = TRUE;
3229 mark_non_root (call->fun, 0, 0);
3234 /* Remove cycles from the call graph. Set depth of nodes. */
3237 remove_cycles (struct function_info *fun,
3238 struct bfd_link_info *info,
3241 struct call_info **callp, *call;
3242 unsigned int depth = *(unsigned int *) param;
3243 unsigned int max_depth = depth;
3247 fun->marking = TRUE;
3249 callp = &fun->call_list;
3250 while ((call = *callp) != NULL)
3252 call->max_depth = depth + !call->is_pasted;
3253 if (!call->fun->visit2)
3255 if (!remove_cycles (call->fun, info, &call->max_depth))
3257 if (max_depth < call->max_depth)
3258 max_depth = call->max_depth;
3260 else if (call->fun->marking)
3262 struct spu_link_hash_table *htab = spu_hash_table (info);
3264 if (!htab->params->auto_overlay
3265 && htab->params->stack_analysis)
3267 const char *f1 = func_name (fun);
3268 const char *f2 = func_name (call->fun);
3270 info->callbacks->info (_("Stack analysis will ignore the call "
3274 *callp = call->next;
3278 callp = &call->next;
3280 fun->marking = FALSE;
3281 *(unsigned int *) param = max_depth;
3285 /* Check that we actually visited all nodes in remove_cycles. If we
3286 didn't, then there is some cycle in the call graph not attached to
3287 any root node. Arbitrarily choose a node in the cycle as a new
3288 root and break the cycle. */
3291 mark_detached_root (struct function_info *fun,
3292 struct bfd_link_info *info,
3297 fun->non_root = FALSE;
3298 *(unsigned int *) param = 0;
3299 return remove_cycles (fun, info, param);
3302 /* Populate call_list for each function. */
3305 build_call_tree (struct bfd_link_info *info)
3310 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3312 extern const bfd_target bfd_elf32_spu_vec;
3315 if (ibfd->xvec != &bfd_elf32_spu_vec)
3318 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3319 if (!mark_functions_via_relocs (sec, info, TRUE))
3323 /* Transfer call info from hot/cold section part of function
3325 if (!spu_hash_table (info)->params->auto_overlay
3326 && !for_each_node (transfer_calls, info, 0, FALSE))
3329 /* Find the call graph root(s). */
3330 if (!for_each_node (mark_non_root, info, 0, FALSE))
3333 /* Remove cycles from the call graph. We start from the root node(s)
3334 so that we break cycles in a reasonable place. */
3336 if (!for_each_node (remove_cycles, info, &depth, TRUE))
3339 return for_each_node (mark_detached_root, info, &depth, FALSE);
3342 /* qsort predicate to sort calls by priority, max_depth then count. */
3345 sort_calls (const void *a, const void *b)
3347 struct call_info *const *c1 = a;
3348 struct call_info *const *c2 = b;
3351 delta = (*c2)->priority - (*c1)->priority;
3355 delta = (*c2)->max_depth - (*c1)->max_depth;
3359 delta = (*c2)->count - (*c1)->count;
3363 return (char *) c1 - (char *) c2;
3367 unsigned int max_overlay_size;
3370 /* Set linker_mark and gc_mark on any sections that we will put in
3371 overlays. These flags are used by the generic ELF linker, but we
3372 won't be continuing on to bfd_elf_final_link so it is OK to use
3373 them. linker_mark is clear before we get here. Set segment_mark
3374 on sections that are part of a pasted function (excluding the last
3377 Set up function rodata section if --overlay-rodata. We don't
3378 currently include merged string constant rodata sections since
3380 Sort the call graph so that the deepest nodes will be visited
3384 mark_overlay_section (struct function_info *fun,
3385 struct bfd_link_info *info,
3388 struct call_info *call;
3390 struct _mos_param *mos_param = param;
3391 struct spu_link_hash_table *htab = spu_hash_table (info);
3397 if (!fun->sec->linker_mark
3398 && (htab->params->ovly_flavour != ovly_soft_icache
3399 || htab->params->non_ia_text
3400 || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3401 || strcmp (fun->sec->name, ".init") == 0
3402 || strcmp (fun->sec->name, ".fini") == 0))
3406 fun->sec->linker_mark = 1;
3407 fun->sec->gc_mark = 1;
3408 fun->sec->segment_mark = 0;
3409 /* Ensure SEC_CODE is set on this text section (it ought to
3410 be!), and SEC_CODE is clear on rodata sections. We use
3411 this flag to differentiate the two overlay section types. */
3412 fun->sec->flags |= SEC_CODE;
3414 size = fun->sec->size;
3415 if (htab->params->auto_overlay & OVERLAY_RODATA)
3419 /* Find the rodata section corresponding to this function's
3421 if (strcmp (fun->sec->name, ".text") == 0)
3423 name = bfd_malloc (sizeof (".rodata"));
3426 memcpy (name, ".rodata", sizeof (".rodata"));
3428 else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3430 size_t len = strlen (fun->sec->name);
3431 name = bfd_malloc (len + 3);
3434 memcpy (name, ".rodata", sizeof (".rodata"));
3435 memcpy (name + 7, fun->sec->name + 5, len - 4);
3437 else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3439 size_t len = strlen (fun->sec->name) + 1;
3440 name = bfd_malloc (len);
3443 memcpy (name, fun->sec->name, len);
3449 asection *rodata = NULL;
3450 asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3451 if (group_sec == NULL)
3452 rodata = bfd_get_section_by_name (fun->sec->owner, name);
3454 while (group_sec != NULL && group_sec != fun->sec)
3456 if (strcmp (group_sec->name, name) == 0)
3461 group_sec = elf_section_data (group_sec)->next_in_group;
3463 fun->rodata = rodata;
3466 size += fun->rodata->size;
3467 if (htab->params->line_size != 0
3468 && size > htab->params->line_size)
3470 size -= fun->rodata->size;
3475 fun->rodata->linker_mark = 1;
3476 fun->rodata->gc_mark = 1;
3477 fun->rodata->flags &= ~SEC_CODE;
3483 if (mos_param->max_overlay_size < size)
3484 mos_param->max_overlay_size = size;
3487 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3492 struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3496 for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3497 calls[count++] = call;
3499 qsort (calls, count, sizeof (*calls), sort_calls);
3501 fun->call_list = NULL;
3505 calls[count]->next = fun->call_list;
3506 fun->call_list = calls[count];
3511 for (call = fun->call_list; call != NULL; call = call->next)
3513 if (call->is_pasted)
3515 /* There can only be one is_pasted call per function_info. */
3516 BFD_ASSERT (!fun->sec->segment_mark);
3517 fun->sec->segment_mark = 1;
3519 if (!mark_overlay_section (call->fun, info, param))
3523 /* Don't put entry code into an overlay. The overlay manager needs
3524 a stack! Also, don't mark .ovl.init as an overlay. */
3525 if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3526 == info->output_bfd->start_address
3527 || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3529 fun->sec->linker_mark = 0;
3530 if (fun->rodata != NULL)
3531 fun->rodata->linker_mark = 0;
3536 /* If non-zero then unmark functions called from those within sections
3537 that we need to unmark. Unfortunately this isn't reliable since the
3538 call graph cannot know the destination of function pointer calls. */
3539 #define RECURSE_UNMARK 0
3542 asection *exclude_input_section;
3543 asection *exclude_output_section;
3544 unsigned long clearing;
3547 /* Undo some of mark_overlay_section's work. */
3550 unmark_overlay_section (struct function_info *fun,
3551 struct bfd_link_info *info,
3554 struct call_info *call;
3555 struct _uos_param *uos_param = param;
3556 unsigned int excluded = 0;
3564 if (fun->sec == uos_param->exclude_input_section
3565 || fun->sec->output_section == uos_param->exclude_output_section)
3569 uos_param->clearing += excluded;
3571 if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3573 fun->sec->linker_mark = 0;
3575 fun->rodata->linker_mark = 0;
3578 for (call = fun->call_list; call != NULL; call = call->next)
3579 if (!unmark_overlay_section (call->fun, info, param))
3583 uos_param->clearing -= excluded;
3588 unsigned int lib_size;
3589 asection **lib_sections;
3592 /* Add sections we have marked as belonging to overlays to an array
3593 for consideration as non-overlay sections. The array consist of
3594 pairs of sections, (text,rodata), for functions in the call graph. */
3597 collect_lib_sections (struct function_info *fun,
3598 struct bfd_link_info *info,
3601 struct _cl_param *lib_param = param;
3602 struct call_info *call;
3609 if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3612 size = fun->sec->size;
3614 size += fun->rodata->size;
3616 if (size <= lib_param->lib_size)
3618 *lib_param->lib_sections++ = fun->sec;
3619 fun->sec->gc_mark = 0;
3620 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3622 *lib_param->lib_sections++ = fun->rodata;
3623 fun->rodata->gc_mark = 0;
3626 *lib_param->lib_sections++ = NULL;
3629 for (call = fun->call_list; call != NULL; call = call->next)
3630 collect_lib_sections (call->fun, info, param);
3635 /* qsort predicate to sort sections by call count. */
3638 sort_lib (const void *a, const void *b)
3640 asection *const *s1 = a;
3641 asection *const *s2 = b;
3642 struct _spu_elf_section_data *sec_data;
3643 struct spu_elf_stack_info *sinfo;
3647 if ((sec_data = spu_elf_section_data (*s1)) != NULL
3648 && (sinfo = sec_data->u.i.stack_info) != NULL)
3651 for (i = 0; i < sinfo->num_fun; ++i)
3652 delta -= sinfo->fun[i].call_count;
3655 if ((sec_data = spu_elf_section_data (*s2)) != NULL
3656 && (sinfo = sec_data->u.i.stack_info) != NULL)
3659 for (i = 0; i < sinfo->num_fun; ++i)
3660 delta += sinfo->fun[i].call_count;
3669 /* Remove some sections from those marked to be in overlays. Choose
3670 those that are called from many places, likely library functions. */
3673 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3676 asection **lib_sections;
3677 unsigned int i, lib_count;
3678 struct _cl_param collect_lib_param;
3679 struct function_info dummy_caller;
3680 struct spu_link_hash_table *htab;
3682 memset (&dummy_caller, 0, sizeof (dummy_caller));
3684 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3686 extern const bfd_target bfd_elf32_spu_vec;
3689 if (ibfd->xvec != &bfd_elf32_spu_vec)
3692 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3693 if (sec->linker_mark
3694 && sec->size < lib_size
3695 && (sec->flags & SEC_CODE) != 0)
3698 lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3699 if (lib_sections == NULL)
3700 return (unsigned int) -1;
3701 collect_lib_param.lib_size = lib_size;
3702 collect_lib_param.lib_sections = lib_sections;
3703 if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3705 return (unsigned int) -1;
3706 lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3708 /* Sort sections so that those with the most calls are first. */
3710 qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3712 htab = spu_hash_table (info);
3713 for (i = 0; i < lib_count; i++)
3715 unsigned int tmp, stub_size;
3717 struct _spu_elf_section_data *sec_data;
3718 struct spu_elf_stack_info *sinfo;
3720 sec = lib_sections[2 * i];
3721 /* If this section is OK, its size must be less than lib_size. */
3723 /* If it has a rodata section, then add that too. */
3724 if (lib_sections[2 * i + 1])
3725 tmp += lib_sections[2 * i + 1]->size;
3726 /* Add any new overlay call stubs needed by the section. */
3729 && (sec_data = spu_elf_section_data (sec)) != NULL
3730 && (sinfo = sec_data->u.i.stack_info) != NULL)
3733 struct call_info *call;
3735 for (k = 0; k < sinfo->num_fun; ++k)
3736 for (call = sinfo->fun[k].call_list; call; call = call->next)
3737 if (call->fun->sec->linker_mark)
3739 struct call_info *p;
3740 for (p = dummy_caller.call_list; p; p = p->next)
3741 if (p->fun == call->fun)
3744 stub_size += ovl_stub_size (htab->params);
3747 if (tmp + stub_size < lib_size)
3749 struct call_info **pp, *p;
3751 /* This section fits. Mark it as non-overlay. */
3752 lib_sections[2 * i]->linker_mark = 0;
3753 if (lib_sections[2 * i + 1])
3754 lib_sections[2 * i + 1]->linker_mark = 0;
3755 lib_size -= tmp + stub_size;
3756 /* Call stubs to the section we just added are no longer
3758 pp = &dummy_caller.call_list;
3759 while ((p = *pp) != NULL)
3760 if (!p->fun->sec->linker_mark)
3762 lib_size += ovl_stub_size (htab->params);
3768 /* Add new call stubs to dummy_caller. */
3769 if ((sec_data = spu_elf_section_data (sec)) != NULL
3770 && (sinfo = sec_data->u.i.stack_info) != NULL)
3773 struct call_info *call;
3775 for (k = 0; k < sinfo->num_fun; ++k)
3776 for (call = sinfo->fun[k].call_list;
3779 if (call->fun->sec->linker_mark)
3781 struct call_info *callee;
3782 callee = bfd_malloc (sizeof (*callee));
3784 return (unsigned int) -1;
3786 if (!insert_callee (&dummy_caller, callee))
3792 while (dummy_caller.call_list != NULL)
3794 struct call_info *call = dummy_caller.call_list;
3795 dummy_caller.call_list = call->next;
3798 for (i = 0; i < 2 * lib_count; i++)
3799 if (lib_sections[i])
3800 lib_sections[i]->gc_mark = 1;
3801 free (lib_sections);
3805 /* Build an array of overlay sections. The deepest node's section is
3806 added first, then its parent node's section, then everything called
3807 from the parent section. The idea being to group sections to
3808 minimise calls between different overlays. */
3811 collect_overlays (struct function_info *fun,
3812 struct bfd_link_info *info,
3815 struct call_info *call;
3816 bfd_boolean added_fun;
3817 asection ***ovly_sections = param;
3823 for (call = fun->call_list; call != NULL; call = call->next)
3824 if (!call->is_pasted)
3826 if (!collect_overlays (call->fun, info, ovly_sections))
3832 if (fun->sec->linker_mark && fun->sec->gc_mark)
3834 fun->sec->gc_mark = 0;
3835 *(*ovly_sections)++ = fun->sec;
3836 if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3838 fun->rodata->gc_mark = 0;
3839 *(*ovly_sections)++ = fun->rodata;
3842 *(*ovly_sections)++ = NULL;
3845 /* Pasted sections must stay with the first section. We don't
3846 put pasted sections in the array, just the first section.
3847 Mark subsequent sections as already considered. */
3848 if (fun->sec->segment_mark)
3850 struct function_info *call_fun = fun;
3853 for (call = call_fun->call_list; call != NULL; call = call->next)
3854 if (call->is_pasted)
3856 call_fun = call->fun;
3857 call_fun->sec->gc_mark = 0;
3858 if (call_fun->rodata)
3859 call_fun->rodata->gc_mark = 0;
3865 while (call_fun->sec->segment_mark);
3869 for (call = fun->call_list; call != NULL; call = call->next)
3870 if (!collect_overlays (call->fun, info, ovly_sections))
3875 struct _spu_elf_section_data *sec_data;
3876 struct spu_elf_stack_info *sinfo;
3878 if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3879 && (sinfo = sec_data->u.i.stack_info) != NULL)
3882 for (i = 0; i < sinfo->num_fun; ++i)
3883 if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3891 struct _sum_stack_param {
3893 size_t overall_stack;
3894 bfd_boolean emit_stack_syms;
3897 /* Descend the call graph for FUN, accumulating total stack required. */
3900 sum_stack (struct function_info *fun,
3901 struct bfd_link_info *info,
3904 struct call_info *call;
3905 struct function_info *max;
3906 size_t stack, cum_stack;
3908 bfd_boolean has_call;
3909 struct _sum_stack_param *sum_stack_param = param;
3910 struct spu_link_hash_table *htab;
3912 cum_stack = fun->stack;
3913 sum_stack_param->cum_stack = cum_stack;
3919 for (call = fun->call_list; call; call = call->next)
3921 if (!call->is_pasted)
3923 if (!sum_stack (call->fun, info, sum_stack_param))
3925 stack = sum_stack_param->cum_stack;
3926 /* Include caller stack for normal calls, don't do so for
3927 tail calls. fun->stack here is local stack usage for
3929 if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3930 stack += fun->stack;
3931 if (cum_stack < stack)
3938 sum_stack_param->cum_stack = cum_stack;
3940 /* Now fun->stack holds cumulative stack. */
3941 fun->stack = cum_stack;
3945 && sum_stack_param->overall_stack < cum_stack)
3946 sum_stack_param->overall_stack = cum_stack;
3948 htab = spu_hash_table (info);
3949 if (htab->params->auto_overlay)
3952 f1 = func_name (fun);
3953 if (htab->params->stack_analysis)
3956 info->callbacks->info (_(" %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3957 info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3958 f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3962 info->callbacks->minfo (_(" calls:\n"));
3963 for (call = fun->call_list; call; call = call->next)
3964 if (!call->is_pasted)
3966 const char *f2 = func_name (call->fun);
3967 const char *ann1 = call->fun == max ? "*" : " ";
3968 const char *ann2 = call->is_tail ? "t" : " ";
3970 info->callbacks->minfo (_(" %s%s %s\n"), ann1, ann2, f2);
3975 if (sum_stack_param->emit_stack_syms)
3977 char *name = bfd_malloc (18 + strlen (f1));
3978 struct elf_link_hash_entry *h;
3983 if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3984 sprintf (name, "__stack_%s", f1);
3986 sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3988 h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3991 && (h->root.type == bfd_link_hash_new
3992 || h->root.type == bfd_link_hash_undefined
3993 || h->root.type == bfd_link_hash_undefweak))
3995 h->root.type = bfd_link_hash_defined;
3996 h->root.u.def.section = bfd_abs_section_ptr;
3997 h->root.u.def.value = cum_stack;
4002 h->ref_regular_nonweak = 1;
4003 h->forced_local = 1;
4011 /* SEC is part of a pasted function. Return the call_info for the
4012 next section of this function. */
4014 static struct call_info *
4015 find_pasted_call (asection *sec)
4017 struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4018 struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4019 struct call_info *call;
4022 for (k = 0; k < sinfo->num_fun; ++k)
4023 for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4024 if (call->is_pasted)
4030 /* qsort predicate to sort bfds by file name. */
4033 sort_bfds (const void *a, const void *b)
4035 bfd *const *abfd1 = a;
4036 bfd *const *abfd2 = b;
4038 return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4042 print_one_overlay_section (FILE *script,
4045 unsigned int ovlynum,
4046 unsigned int *ovly_map,
4047 asection **ovly_sections,
4048 struct bfd_link_info *info)
4052 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4054 asection *sec = ovly_sections[2 * j];
4056 if (fprintf (script, " %s%c%s (%s)\n",
4057 (sec->owner->my_archive != NULL
4058 ? sec->owner->my_archive->filename : ""),
4059 info->path_separator,
4060 sec->owner->filename,
4063 if (sec->segment_mark)
4065 struct call_info *call = find_pasted_call (sec);
4066 while (call != NULL)
4068 struct function_info *call_fun = call->fun;
4069 sec = call_fun->sec;
4070 if (fprintf (script, " %s%c%s (%s)\n",
4071 (sec->owner->my_archive != NULL
4072 ? sec->owner->my_archive->filename : ""),
4073 info->path_separator,
4074 sec->owner->filename,
4077 for (call = call_fun->call_list; call; call = call->next)
4078 if (call->is_pasted)
4084 for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4086 asection *sec = ovly_sections[2 * j + 1];
4088 && fprintf (script, " %s%c%s (%s)\n",
4089 (sec->owner->my_archive != NULL
4090 ? sec->owner->my_archive->filename : ""),
4091 info->path_separator,
4092 sec->owner->filename,
4096 sec = ovly_sections[2 * j];
4097 if (sec->segment_mark)
4099 struct call_info *call = find_pasted_call (sec);
4100 while (call != NULL)
4102 struct function_info *call_fun = call->fun;
4103 sec = call_fun->rodata;
4105 && fprintf (script, " %s%c%s (%s)\n",
4106 (sec->owner->my_archive != NULL
4107 ? sec->owner->my_archive->filename : ""),
4108 info->path_separator,
4109 sec->owner->filename,
4112 for (call = call_fun->call_list; call; call = call->next)
4113 if (call->is_pasted)
4122 /* Handle --auto-overlay. */
4125 spu_elf_auto_overlay (struct bfd_link_info *info)
4129 struct elf_segment_map *m;
4130 unsigned int fixed_size, lo, hi;
4131 struct spu_link_hash_table *htab;
4132 unsigned int base, i, count, bfd_count;
4133 unsigned int region, ovlynum;
4134 asection **ovly_sections, **ovly_p;
4135 unsigned int *ovly_map;
4137 unsigned int total_overlay_size, overlay_size;
4138 const char *ovly_mgr_entry;
4139 struct elf_link_hash_entry *h;
4140 struct _mos_param mos_param;
4141 struct _uos_param uos_param;
4142 struct function_info dummy_caller;
4144 /* Find the extents of our loadable image. */
4145 lo = (unsigned int) -1;
4147 for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4148 if (m->p_type == PT_LOAD)
4149 for (i = 0; i < m->count; i++)
4150 if (m->sections[i]->size != 0)
4152 if (m->sections[i]->vma < lo)
4153 lo = m->sections[i]->vma;
4154 if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4155 hi = m->sections[i]->vma + m->sections[i]->size - 1;
4157 fixed_size = hi + 1 - lo;
4159 if (!discover_functions (info))
4162 if (!build_call_tree (info))
4165 htab = spu_hash_table (info);
4166 if (htab->reserved == 0)
4168 struct _sum_stack_param sum_stack_param;
4170 sum_stack_param.emit_stack_syms = 0;
4171 sum_stack_param.overall_stack = 0;
4172 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4174 htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
4177 /* No need for overlays if everything already fits. */
4178 if (fixed_size + htab->reserved <= htab->local_store
4179 && htab->params->ovly_flavour != ovly_soft_icache)
4181 htab->params->auto_overlay = 0;
4185 uos_param.exclude_input_section = 0;
4186 uos_param.exclude_output_section
4187 = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4189 ovly_mgr_entry = "__ovly_load";
4190 if (htab->params->ovly_flavour == ovly_soft_icache)
4191 ovly_mgr_entry = "__icache_br_handler";
4192 h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4193 FALSE, FALSE, FALSE);
4195 && (h->root.type == bfd_link_hash_defined
4196 || h->root.type == bfd_link_hash_defweak)
4199 /* We have a user supplied overlay manager. */
4200 uos_param.exclude_input_section = h->root.u.def.section;
4204 /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4205 builtin version to .text, and will adjust .text size. */
4206 fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4209 /* Mark overlay sections, and find max overlay section size. */
4210 mos_param.max_overlay_size = 0;
4211 if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4214 /* We can't put the overlay manager or interrupt routines in
4216 uos_param.clearing = 0;
4217 if ((uos_param.exclude_input_section
4218 || uos_param.exclude_output_section)
4219 && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4223 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4225 bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4226 if (bfd_arr == NULL)
4229 /* Count overlay sections, and subtract their sizes from "fixed_size". */
4232 total_overlay_size = 0;
4233 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4235 extern const bfd_target bfd_elf32_spu_vec;
4237 unsigned int old_count;
4239 if (ibfd->xvec != &bfd_elf32_spu_vec)
4243 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4244 if (sec->linker_mark)
4246 if ((sec->flags & SEC_CODE) != 0)
4248 fixed_size -= sec->size;
4249 total_overlay_size += sec->size;
4251 else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4252 && sec->output_section->owner == info->output_bfd
4253 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4254 fixed_size -= sec->size;
4255 if (count != old_count)
4256 bfd_arr[bfd_count++] = ibfd;
4259 /* Since the overlay link script selects sections by file name and
4260 section name, ensure that file names are unique. */
4263 bfd_boolean ok = TRUE;
4265 qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4266 for (i = 1; i < bfd_count; ++i)
4267 if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4269 if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4271 if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4272 info->callbacks->einfo (_("%s duplicated in %s\n"),
4273 bfd_arr[i]->filename,
4274 bfd_arr[i]->my_archive->filename);
4276 info->callbacks->einfo (_("%s duplicated\n"),
4277 bfd_arr[i]->filename);
4283 info->callbacks->einfo (_("sorry, no support for duplicate "
4284 "object files in auto-overlay script\n"));
4285 bfd_set_error (bfd_error_bad_value);
4291 fixed_size += htab->reserved;
4292 fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4293 if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4295 if (htab->params->ovly_flavour == ovly_soft_icache)
4297 /* Stubs in the non-icache area are bigger. */
4298 fixed_size += htab->non_ovly_stub * 16;
4299 /* Space for icache manager tables.
4300 a) Tag array, one quadword per cache line.
4301 - word 0: ia address of present line, init to zero. */
4302 fixed_size += 16 << htab->num_lines_log2;
4303 /* b) Rewrite "to" list, one quadword per cache line. */
4304 fixed_size += 16 << htab->num_lines_log2;
4305 /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4306 to a power-of-two number of full quadwords) per cache line. */
4307 fixed_size += 16 << (htab->fromelem_size_log2
4308 + htab->num_lines_log2);
4309 /* d) Pointer to __ea backing store (toe), 1 quadword. */
4314 /* Guess number of overlays. Assuming overlay buffer is on
4315 average only half full should be conservative. */
4316 ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4317 / (htab->local_store - fixed_size));
4318 /* Space for _ovly_table[], _ovly_buf_table[] and toe. */
4319 fixed_size += ovlynum * 16 + 16 + 4 + 16;
4323 if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4324 info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4325 "size of 0x%v exceeds local store\n"),
4326 (bfd_vma) fixed_size,
4327 (bfd_vma) mos_param.max_overlay_size);
4329 /* Now see if we should put some functions in the non-overlay area. */
4330 else if (fixed_size < htab->overlay_fixed)
4332 unsigned int max_fixed, lib_size;
4334 max_fixed = htab->local_store - mos_param.max_overlay_size;
4335 if (max_fixed > htab->overlay_fixed)
4336 max_fixed = htab->overlay_fixed;
4337 lib_size = max_fixed - fixed_size;
4338 lib_size = auto_ovl_lib_functions (info, lib_size);
4339 if (lib_size == (unsigned int) -1)
4341 fixed_size = max_fixed - lib_size;
4344 /* Build an array of sections, suitably sorted to place into
4346 ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4347 if (ovly_sections == NULL)
4349 ovly_p = ovly_sections;
4350 if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4352 count = (size_t) (ovly_p - ovly_sections) / 2;
4353 ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4354 if (ovly_map == NULL)
4357 memset (&dummy_caller, 0, sizeof (dummy_caller));
4358 overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4359 if (htab->params->line_size != 0)
4360 overlay_size = htab->params->line_size;
4363 while (base < count)
4365 unsigned int size = 0;
4367 for (i = base; i < count; i++)
4371 unsigned int num_stubs;
4372 struct call_info *call, *pasty;
4373 struct _spu_elf_section_data *sec_data;
4374 struct spu_elf_stack_info *sinfo;
4377 /* See whether we can add this section to the current
4378 overlay without overflowing our overlay buffer. */
4379 sec = ovly_sections[2 * i];
4380 tmp = size + sec->size;
4381 if (ovly_sections[2 * i + 1])
4382 tmp += ovly_sections[2 * i + 1]->size;
4383 if (tmp > overlay_size)
4385 if (sec->segment_mark)
4387 /* Pasted sections must stay together, so add their
4389 struct call_info *pasty = find_pasted_call (sec);
4390 while (pasty != NULL)
4392 struct function_info *call_fun = pasty->fun;
4393 tmp += call_fun->sec->size;
4394 if (call_fun->rodata)
4395 tmp += call_fun->rodata->size;
4396 for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4397 if (pasty->is_pasted)
4401 if (tmp > overlay_size)
4404 /* If we add this section, we might need new overlay call
4405 stubs. Add any overlay section calls to dummy_call. */
4407 sec_data = spu_elf_section_data (sec);
4408 sinfo = sec_data->u.i.stack_info;
4409 for (k = 0; k < sinfo->num_fun; ++k)
4410 for (call = sinfo->fun[k].call_list; call; call = call->next)
4411 if (call->is_pasted)
4413 BFD_ASSERT (pasty == NULL);
4416 else if (call->fun->sec->linker_mark)
4418 if (!copy_callee (&dummy_caller, call))
4421 while (pasty != NULL)
4423 struct function_info *call_fun = pasty->fun;
4425 for (call = call_fun->call_list; call; call = call->next)
4426 if (call->is_pasted)
4428 BFD_ASSERT (pasty == NULL);
4431 else if (!copy_callee (&dummy_caller, call))
4435 /* Calculate call stub size. */
4437 for (call = dummy_caller.call_list; call; call = call->next)
4440 unsigned int stub_delta = 1;
4442 if (htab->params->ovly_flavour == ovly_soft_icache)
4443 stub_delta = call->count;
4444 num_stubs += stub_delta;
4446 /* If the call is within this overlay, we won't need a
4448 for (k = base; k < i + 1; k++)
4449 if (call->fun->sec == ovly_sections[2 * k])
4451 num_stubs -= stub_delta;
4455 if (htab->params->ovly_flavour == ovly_soft_icache
4456 && num_stubs > htab->params->max_branch)
4458 if (tmp + num_stubs * ovl_stub_size (htab->params)
4466 info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4467 ovly_sections[2 * i]->owner,
4468 ovly_sections[2 * i],
4469 ovly_sections[2 * i + 1] ? " + rodata" : "");
4470 bfd_set_error (bfd_error_bad_value);
4474 while (dummy_caller.call_list != NULL)
4476 struct call_info *call = dummy_caller.call_list;
4477 dummy_caller.call_list = call->next;
4483 ovly_map[base++] = ovlynum;
4486 script = htab->params->spu_elf_open_overlay_script ();
4488 if (fprintf (script, "SECTIONS\n{\n") <= 0)
4491 if (htab->params->ovly_flavour == ovly_soft_icache)
4493 if (fprintf (script,
4494 " .data.icache ALIGN (16) : { *(.ovtab) *(.data.icache) }\n"
4495 " . = ALIGN (%u);\n"
4496 " .ovl.init : { *(.ovl.init) }\n"
4497 " . = ABSOLUTE (ADDR (.ovl.init));\n",
4498 htab->params->line_size) <= 0)
4503 while (base < count)
4505 unsigned int indx = ovlynum - 1;
4506 unsigned int vma, lma;
4508 vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4509 lma = indx << htab->line_size_log2;
4511 if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4512 ": AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16) + %u) {\n",
4513 ovlynum, vma, lma) <= 0)
4516 base = print_one_overlay_section (script, base, count, ovlynum,
4517 ovly_map, ovly_sections, info);
4518 if (base == (unsigned) -1)
4521 if (fprintf (script, " }\n") <= 0)
4527 if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4528 1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4533 if (fprintf (script,
4534 " . = ALIGN (16);\n"
4535 " .ovl.init : { *(.ovl.init) }\n"
4536 " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4539 for (region = 1; region <= htab->params->num_lines; region++)
4543 while (base < count && ovly_map[base] < ovlynum)
4551 /* We need to set lma since we are overlaying .ovl.init. */
4552 if (fprintf (script,
4553 " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4558 if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4562 while (base < count)
4564 if (fprintf (script, " .ovly%u {\n", ovlynum) <= 0)
4567 base = print_one_overlay_section (script, base, count, ovlynum,
4568 ovly_map, ovly_sections, info);
4569 if (base == (unsigned) -1)
4572 if (fprintf (script, " }\n") <= 0)
4575 ovlynum += htab->params->num_lines;
4576 while (base < count && ovly_map[base] < ovlynum)
4580 if (fprintf (script, " }\n") <= 0)
4587 free (ovly_sections);
4589 if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4591 if (fclose (script) != 0)
4594 if (htab->params->auto_overlay & AUTO_RELINK)
4595 (*htab->params->spu_elf_relink) ();
4600 bfd_set_error (bfd_error_system_call);
4602 info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4606 /* Provide an estimate of total stack required. */
4609 spu_elf_stack_analysis (struct bfd_link_info *info)
4611 struct spu_link_hash_table *htab;
4612 struct _sum_stack_param sum_stack_param;
4614 if (!discover_functions (info))
4617 if (!build_call_tree (info))
4620 htab = spu_hash_table (info);
4621 if (htab->params->stack_analysis)
4623 info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4624 info->callbacks->minfo (_("\nStack size for functions. "
4625 "Annotations: '*' max stack, 't' tail call\n"));
4628 sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4629 sum_stack_param.overall_stack = 0;
4630 if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4633 if (htab->params->stack_analysis)
4634 info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4635 (bfd_vma) sum_stack_param.overall_stack);
4639 /* Perform a final link. */
4642 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4644 struct spu_link_hash_table *htab = spu_hash_table (info);
4646 if (htab->params->auto_overlay)
4647 spu_elf_auto_overlay (info);
4649 if ((htab->params->stack_analysis
4650 || (htab->params->ovly_flavour == ovly_soft_icache
4651 && htab->params->lrlive_analysis))
4652 && !spu_elf_stack_analysis (info))
4653 info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4655 if (!spu_elf_build_stubs (info))
4656 info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4658 return bfd_elf_final_link (output_bfd, info);
4661 /* Called when not normally emitting relocs, ie. !info->relocatable
4662 and !info->emitrelocations. Returns a count of special relocs
4663 that need to be emitted. */
4666 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4668 Elf_Internal_Rela *relocs;
4669 unsigned int count = 0;
4671 relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4675 Elf_Internal_Rela *rel;
4676 Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4678 for (rel = relocs; rel < relend; rel++)
4680 int r_type = ELF32_R_TYPE (rel->r_info);
4681 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4685 if (elf_section_data (sec)->relocs != relocs)
4692 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD. */
4695 spu_elf_relocate_section (bfd *output_bfd,
4696 struct bfd_link_info *info,
4698 asection *input_section,
4700 Elf_Internal_Rela *relocs,
4701 Elf_Internal_Sym *local_syms,
4702 asection **local_sections)
4704 Elf_Internal_Shdr *symtab_hdr;
4705 struct elf_link_hash_entry **sym_hashes;
4706 Elf_Internal_Rela *rel, *relend;
4707 struct spu_link_hash_table *htab;
4710 bfd_boolean emit_these_relocs = FALSE;
4711 bfd_boolean is_ea_sym;
4713 unsigned int iovl = 0;
4715 htab = spu_hash_table (info);
4716 stubs = (htab->stub_sec != NULL
4717 && maybe_needs_stubs (input_section));
4718 iovl = overlay_index (input_section);
4719 ea = bfd_get_section_by_name (output_bfd, "._ea");
4720 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4721 sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4724 relend = relocs + input_section->reloc_count;
4725 for (; rel < relend; rel++)
4728 reloc_howto_type *howto;
4729 unsigned int r_symndx;
4730 Elf_Internal_Sym *sym;
4732 struct elf_link_hash_entry *h;
4733 const char *sym_name;
4736 bfd_reloc_status_type r;
4737 bfd_boolean unresolved_reloc;
4739 enum _stub_type stub_type;
4741 r_symndx = ELF32_R_SYM (rel->r_info);
4742 r_type = ELF32_R_TYPE (rel->r_info);
4743 howto = elf_howto_table + r_type;
4744 unresolved_reloc = FALSE;
4749 if (r_symndx < symtab_hdr->sh_info)
4751 sym = local_syms + r_symndx;
4752 sec = local_sections[r_symndx];
4753 sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4754 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4758 if (sym_hashes == NULL)
4761 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4763 while (h->root.type == bfd_link_hash_indirect
4764 || h->root.type == bfd_link_hash_warning)
4765 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4768 if (h->root.type == bfd_link_hash_defined
4769 || h->root.type == bfd_link_hash_defweak)
4771 sec = h->root.u.def.section;
4773 || sec->output_section == NULL)
4774 /* Set a flag that will be cleared later if we find a
4775 relocation value for this symbol. output_section
4776 is typically NULL for symbols satisfied by a shared
4778 unresolved_reloc = TRUE;
4780 relocation = (h->root.u.def.value
4781 + sec->output_section->vma
4782 + sec->output_offset);
4784 else if (h->root.type == bfd_link_hash_undefweak)
4786 else if (info->unresolved_syms_in_objects == RM_IGNORE
4787 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4789 else if (!info->relocatable
4790 && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4793 err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4794 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4795 if (!info->callbacks->undefined_symbol (info,
4796 h->root.root.string,
4799 rel->r_offset, err))
4803 sym_name = h->root.root.string;
4806 if (sec != NULL && elf_discarded_section (sec))
4808 /* For relocs against symbols from removed linkonce sections,
4809 or sections discarded by a linker script, we just want the
4810 section contents zeroed. Avoid any special processing. */
4811 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4817 if (info->relocatable)
4820 is_ea_sym = (ea != NULL
4822 && sec->output_section == ea);
4824 /* If this symbol is in an overlay area, we may need to relocate
4825 to the overlay stub. */
4826 addend = rel->r_addend;
4829 && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4830 contents, info)) != no_stub)
4832 unsigned int ovl = 0;
4833 struct got_entry *g, **head;
4835 if (stub_type != nonovl_stub)
4839 head = &h->got.glist;
4841 head = elf_local_got_ents (input_bfd) + r_symndx;
4843 for (g = *head; g != NULL; g = g->next)
4844 if (htab->params->ovly_flavour == ovly_soft_icache
4845 ? g->br_addr == (rel->r_offset
4846 + input_section->output_offset
4847 + input_section->output_section->vma)
4848 : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4853 relocation = g->stub_addr;
4858 /* For soft icache, encode the overlay index into addresses. */
4859 if (htab->params->ovly_flavour == ovly_soft_icache
4860 && (r_type == R_SPU_ADDR16_HI
4861 || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4864 unsigned int ovl = overlay_index (sec);
4867 unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4868 relocation += set_id << 18;
4873 if (unresolved_reloc)
4875 else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4879 /* ._ea is a special section that isn't allocated in SPU
4880 memory, but rather occupies space in PPU memory as
4881 part of an embedded ELF image. If this reloc is
4882 against a symbol defined in ._ea, then transform the
4883 reloc into an equivalent one without a symbol
4884 relative to the start of the ELF image. */
4885 rel->r_addend += (relocation
4887 + elf_section_data (ea)->this_hdr.sh_offset);
4888 rel->r_info = ELF32_R_INFO (0, r_type);
4890 emit_these_relocs = TRUE;
4894 unresolved_reloc = TRUE;
4896 if (unresolved_reloc)
4898 (*_bfd_error_handler)
4899 (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4901 bfd_get_section_name (input_bfd, input_section),
4902 (long) rel->r_offset,
4908 r = _bfd_final_link_relocate (howto,
4912 rel->r_offset, relocation, addend);
4914 if (r != bfd_reloc_ok)
4916 const char *msg = (const char *) 0;
4920 case bfd_reloc_overflow:
4921 if (!((*info->callbacks->reloc_overflow)
4922 (info, (h ? &h->root : NULL), sym_name, howto->name,
4923 (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4927 case bfd_reloc_undefined:
4928 if (!((*info->callbacks->undefined_symbol)
4929 (info, sym_name, input_bfd, input_section,
4930 rel->r_offset, TRUE)))
4934 case bfd_reloc_outofrange:
4935 msg = _("internal error: out of range error");
4938 case bfd_reloc_notsupported:
4939 msg = _("internal error: unsupported relocation error");
4942 case bfd_reloc_dangerous:
4943 msg = _("internal error: dangerous error");
4947 msg = _("internal error: unknown error");
4952 if (!((*info->callbacks->warning)
4953 (info, msg, sym_name, input_bfd, input_section,
4962 && emit_these_relocs
4963 && !info->emitrelocations)
4965 Elf_Internal_Rela *wrel;
4966 Elf_Internal_Shdr *rel_hdr;
4968 wrel = rel = relocs;
4969 relend = relocs + input_section->reloc_count;
4970 for (; rel < relend; rel++)
4974 r_type = ELF32_R_TYPE (rel->r_info);
4975 if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4978 input_section->reloc_count = wrel - relocs;
4979 /* Backflips for _bfd_elf_link_output_relocs. */
4980 rel_hdr = &elf_section_data (input_section)->rel_hdr;
4981 rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4988 /* Adjust _SPUEAR_ syms to point at their overlay stubs. */
4991 spu_elf_output_symbol_hook (struct bfd_link_info *info,
4992 const char *sym_name ATTRIBUTE_UNUSED,
4993 Elf_Internal_Sym *sym,
4994 asection *sym_sec ATTRIBUTE_UNUSED,
4995 struct elf_link_hash_entry *h)
4997 struct spu_link_hash_table *htab = spu_hash_table (info);
4999 if (!info->relocatable
5000 && htab->stub_sec != NULL
5002 && (h->root.type == bfd_link_hash_defined
5003 || h->root.type == bfd_link_hash_defweak)
5005 && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5007 struct got_entry *g;
5009 for (g = h->got.glist; g != NULL; g = g->next)
5010 if (htab->params->ovly_flavour == ovly_soft_icache
5011 ? g->br_addr == g->stub_addr
5012 : g->addend == 0 && g->ovl == 0)
5014 sym->st_shndx = (_bfd_elf_section_from_bfd_section
5015 (htab->stub_sec[0]->output_section->owner,
5016 htab->stub_sec[0]->output_section));
5017 sym->st_value = g->stub_addr;
5025 static int spu_plugin = 0;
5028 spu_elf_plugin (int val)
5033 /* Set ELF header e_type for plugins. */
5036 spu_elf_post_process_headers (bfd *abfd,
5037 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5041 Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5043 i_ehdrp->e_type = ET_DYN;
5047 /* We may add an extra PT_LOAD segment for .toe. We also need extra
5048 segments for overlays. */
5051 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5058 struct spu_link_hash_table *htab = spu_hash_table (info);
5059 extra = htab->num_overlays;
5065 sec = bfd_get_section_by_name (abfd, ".toe");
5066 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5072 /* Remove .toe section from other PT_LOAD segments and put it in
5073 a segment of its own. Put overlays in separate segments too. */
5076 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5079 struct elf_segment_map *m, *m_overlay;
5080 struct elf_segment_map **p, **p_overlay;
5086 toe = bfd_get_section_by_name (abfd, ".toe");
5087 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5088 if (m->p_type == PT_LOAD && m->count > 1)
5089 for (i = 0; i < m->count; i++)
5090 if ((s = m->sections[i]) == toe
5091 || spu_elf_section_data (s)->u.o.ovl_index != 0)
5093 struct elf_segment_map *m2;
5096 if (i + 1 < m->count)
5098 amt = sizeof (struct elf_segment_map);
5099 amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5100 m2 = bfd_zalloc (abfd, amt);
5103 m2->count = m->count - (i + 1);
5104 memcpy (m2->sections, m->sections + i + 1,
5105 m2->count * sizeof (m->sections[0]));
5106 m2->p_type = PT_LOAD;
5114 amt = sizeof (struct elf_segment_map);
5115 m2 = bfd_zalloc (abfd, amt);
5118 m2->p_type = PT_LOAD;
5120 m2->sections[0] = s;
5128 /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5129 PT_LOAD segments. This can cause the .ovl.init section to be
5130 overwritten with the contents of some overlay segment. To work
5131 around this issue, we ensure that all PF_OVERLAY segments are
5132 sorted first amongst the program headers; this ensures that even
5133 with a broken loader, the .ovl.init section (which is not marked
5134 as PF_OVERLAY) will be placed into SPU local store on startup. */
5136 /* Move all overlay segments onto a separate list. */
5137 p = &elf_tdata (abfd)->segment_map;
5138 p_overlay = &m_overlay;
5141 if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5142 && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5144 struct elf_segment_map *m = *p;
5147 p_overlay = &m->next;
5154 /* Re-insert overlay segments at the head of the segment map. */
5155 *p_overlay = elf_tdata (abfd)->segment_map;
5156 elf_tdata (abfd)->segment_map = m_overlay;
5161 /* Tweak the section type of .note.spu_name. */
5164 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5165 Elf_Internal_Shdr *hdr,
5168 if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5169 hdr->sh_type = SHT_NOTE;
5173 /* Tweak phdrs before writing them out. */
5176 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5178 const struct elf_backend_data *bed;
5179 struct elf_obj_tdata *tdata;
5180 Elf_Internal_Phdr *phdr, *last;
5181 struct spu_link_hash_table *htab;
5188 bed = get_elf_backend_data (abfd);
5189 tdata = elf_tdata (abfd);
5191 count = tdata->program_header_size / bed->s->sizeof_phdr;
5192 htab = spu_hash_table (info);
5193 if (htab->num_overlays != 0)
5195 struct elf_segment_map *m;
5198 for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5200 && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5202 /* Mark this as an overlay header. */
5203 phdr[i].p_flags |= PF_OVERLAY;
5205 if (htab->ovtab != NULL && htab->ovtab->size != 0
5206 && htab->params->ovly_flavour != ovly_soft_icache)
5208 bfd_byte *p = htab->ovtab->contents;
5209 unsigned int off = o * 16 + 8;
5211 /* Write file_off into _ovly_table. */
5212 bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5215 /* Soft-icache has its file offset put in .ovl.init. */
5216 if (htab->init != NULL && htab->init->size != 0)
5218 bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5220 bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5224 /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5225 of 16. This should always be possible when using the standard
5226 linker scripts, but don't create overlapping segments if
5227 someone is playing games with linker scripts. */
5229 for (i = count; i-- != 0; )
5230 if (phdr[i].p_type == PT_LOAD)
5234 adjust = -phdr[i].p_filesz & 15;
5237 && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5240 adjust = -phdr[i].p_memsz & 15;
5243 && phdr[i].p_filesz != 0
5244 && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5245 && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5248 if (phdr[i].p_filesz != 0)
5252 if (i == (unsigned int) -1)
5253 for (i = count; i-- != 0; )
5254 if (phdr[i].p_type == PT_LOAD)
5258 adjust = -phdr[i].p_filesz & 15;
5259 phdr[i].p_filesz += adjust;
5261 adjust = -phdr[i].p_memsz & 15;
5262 phdr[i].p_memsz += adjust;
5268 #define TARGET_BIG_SYM bfd_elf32_spu_vec
5269 #define TARGET_BIG_NAME "elf32-spu"
5270 #define ELF_ARCH bfd_arch_spu
5271 #define ELF_MACHINE_CODE EM_SPU
5272 /* This matches the alignment need for DMA. */
5273 #define ELF_MAXPAGESIZE 0x80
5274 #define elf_backend_rela_normal 1
5275 #define elf_backend_can_gc_sections 1
5277 #define bfd_elf32_bfd_reloc_type_lookup spu_elf_reloc_type_lookup
5278 #define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
5279 #define elf_info_to_howto spu_elf_info_to_howto
5280 #define elf_backend_count_relocs spu_elf_count_relocs
5281 #define elf_backend_relocate_section spu_elf_relocate_section
5282 #define elf_backend_symbol_processing spu_elf_backend_symbol_processing
5283 #define elf_backend_link_output_symbol_hook spu_elf_output_symbol_hook
5284 #define elf_backend_object_p spu_elf_object_p
5285 #define bfd_elf32_new_section_hook spu_elf_new_section_hook
5286 #define bfd_elf32_bfd_link_hash_table_create spu_elf_link_hash_table_create
5288 #define elf_backend_additional_program_headers spu_elf_additional_program_headers
5289 #define elf_backend_modify_segment_map spu_elf_modify_segment_map
5290 #define elf_backend_modify_program_headers spu_elf_modify_program_headers
5291 #define elf_backend_post_process_headers spu_elf_post_process_headers
5292 #define elf_backend_fake_sections spu_elf_fake_sections
5293 #define elf_backend_special_sections spu_elf_special_sections
5294 #define bfd_elf32_bfd_final_link spu_elf_final_link
5296 #include "elf32-target.h"