/* PowerPC64-specific support for 64-bit ELF.
Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
- 2009, 2010 Free Software Foundation, Inc.
+ 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
based on elf32-ppc.c by Ian Lance Taylor.
Largely rewritten by Alan Modra.
#include "elf-bfd.h"
#include "elf/ppc64.h"
#include "elf64-ppc.h"
+#include "dwarf2.h"
static bfd_reloc_status_type ppc64_elf_ha_reloc
(bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
#define bfd_elf64_mkobject ppc64_elf_mkobject
#define bfd_elf64_bfd_reloc_type_lookup ppc64_elf_reloc_type_lookup
-#define bfd_elf64_bfd_reloc_name_lookup ppc64_elf_reloc_name_lookup
-#define bfd_elf64_bfd_merge_private_bfd_data ppc64_elf_merge_private_bfd_data
+#define bfd_elf64_bfd_reloc_name_lookup ppc64_elf_reloc_name_lookup
+#define bfd_elf64_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
#define bfd_elf64_new_section_hook ppc64_elf_new_section_hook
#define bfd_elf64_bfd_link_hash_table_create ppc64_elf_link_hash_table_create
#define bfd_elf64_bfd_link_hash_table_free ppc64_elf_link_hash_table_free
#define bfd_elf64_get_synthetic_symtab ppc64_elf_get_synthetic_symtab
+#define bfd_elf64_bfd_link_just_syms ppc64_elf_link_just_syms
#define elf_backend_object_p ppc64_elf_object_p
#define elf_backend_grok_prstatus ppc64_elf_grok_prstatus
#define ADDIS_R2_R2 0x3c420000 /* addis %r2,%r2,off@ha */
#define ADDI_R2_R2 0x38420000 /* addi %r2,%r2,off@l */
+#define XOR_R11_R11_R11 0x7d6b5a78 /* xor %r11,%r11,%r11 */
+#define ADD_R12_R12_R11 0x7d8c5a14 /* add %r12,%r12,%r11 */
+#define ADD_R2_R2_R11 0x7c425a14 /* add %r2,%r2,%r11 */
+#define CMPLDI_R2_0 0x28220000 /* cmpldi %r2,0 */
+#define BNECTR 0x4ca20420 /* bnectr+ */
+#define BNECTR_P4 0x4ce20420 /* bnectr+ */
+
#define LD_R11_0R2 0xe9620000 /* ld %r11,xxx+0(%r2) */
#define LD_R2_0R2 0xe8420000 /* ld %r2,xxx+0(%r2) */
0, /* dst_mask */
FALSE), /* pcrel_offset */
+ HOWTO (R_PPC64_TOCSAVE,
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_PPC64_TOCSAVE", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
/* Computes the load module index of the load module that contains the
definition of its TLS sym. */
HOWTO (R_PPC64_DTPMOD64,
long insn;
enum elf_ppc64_reloc_type r_type;
bfd_size_type octets;
- /* Disabled until we sort out how ld should choose 'y' vs 'at'. */
- bfd_boolean is_power4 = FALSE;
+ /* Assume 'at' branch hints. */
+ bfd_boolean is_isa_v2 = TRUE;
/* If this is a relocatable link (output_bfd test tells us), just
call the generic function. Any adjustment will be done at final
|| r_type == R_PPC64_REL14_BRTAKEN)
insn |= 0x01 << 21; /* 'y' or 't' bit, lowest bit of BO field. */
- if (is_power4)
+ if (is_isa_v2)
{
/* Set 'a' bit. This is 0b00010 in BO field for branch
on CR(BI) insns (BO == 001at or 011at), and 0b01000
/* Nonzero if this bfd has small toc/got relocs, ie. that expect
the reloc to be in the range -32768 to 32767. */
- unsigned int has_small_toc_reloc;
+ unsigned int has_small_toc_reloc : 1;
+
+ /* Set if toc/got ha relocs detected not using r2, or lo reloc
+ instruction not one we handle. */
+ unsigned int unexpected_toc_insn : 1;
};
#define ppc64_elf_tdata(bfd) \
if (note->descsz != 136)
return FALSE;
+ elf_tdata (abfd)->core_pid
+ = bfd_get_32 (abfd, note->descdata + 24);
elf_tdata (abfd)->core_program
= _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
elf_tdata (abfd)->core_command
va_list ap;
va_start (ap, note_type);
- memset (data, 0, 40);
+ memset (data, 0, sizeof (data));
strncpy (data + 40, va_arg (ap, const char *), 16);
strncpy (data + 56, va_arg (ap, const char *), 80);
va_end (ap);
}
}
-/* Merge backend specific data from an object file to the output
- object file when linking. */
-
-static bfd_boolean
-ppc64_elf_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
-{
- /* Check if we have the same endianess. */
- if (ibfd->xvec->byteorder != obfd->xvec->byteorder
- && ibfd->xvec->byteorder != BFD_ENDIAN_UNKNOWN
- && obfd->xvec->byteorder != BFD_ENDIAN_UNKNOWN)
- {
- const char *msg;
-
- if (bfd_big_endian (ibfd))
- msg = _("%B: compiled for a big endian system "
- "and target is little endian");
- else
- msg = _("%B: compiled for a little endian system "
- "and target is big endian");
-
- (*_bfd_error_handler) (msg, ibfd);
-
- bfd_set_error (bfd_error_wrong_format);
- return FALSE;
- }
-
- return TRUE;
-}
-
/* Add extra PPC sections. */
static const struct bfd_elf_special_section ppc64_elf_special_sections[]=
{
if (sec->vma > ent)
break;
- if ((sec->flags & SEC_ALLOC) == 0
- || (sec->flags & SEC_LOAD) == 0)
+ /* SEC_LOAD may not be set if SEC is from a separate debug
+ info file. */
+ if ((sec->flags & SEC_ALLOC) == 0)
break;
if ((sec->flags & SEC_CODE) != 0)
s->section = sec;
calls may use the function descriptor symbol, ie. "bl foo". This
behaves exactly as "bl .foo". */
-/* The linker needs to keep track of the number of relocs that it
- decides to copy as dynamic relocs in check_relocs for each symbol.
- This is so that it can later discard them if they are found to be
- unnecessary. We store the information in a field extending the
- regular ELF linker hash table. */
-
-struct ppc_dyn_relocs
-{
- struct ppc_dyn_relocs *next;
-
- /* The input section of the reloc. */
- asection *sec;
-
- /* Total number of relocs copied for the input section. */
- bfd_size_type count;
-
- /* Number of pc-relative relocs copied for the input section. */
- bfd_size_type pc_count;
-};
-
/* Of those relocs that might be copied as dynamic relocs, this function
selects those that must be copied when linking a shared library,
even when the symbol is local. */
ppc_stub_long_branch_r2off,
ppc_stub_plt_branch,
ppc_stub_plt_branch_r2off,
- ppc_stub_plt_call
+ ppc_stub_plt_call,
+ ppc_stub_plt_call_r2save
};
struct ppc_stub_hash_entry {
} u;
/* Track dynamic relocs copied for this symbol. */
- struct ppc_dyn_relocs *dyn_relocs;
+ struct elf_dyn_relocs *dyn_relocs;
/* Link between function code and descriptor symbols. */
struct ppc_link_hash_entry *oh;
/* Another hash table for plt_branch stubs. */
struct bfd_hash_table branch_hash_table;
+ /* Hash table for function prologue tocsave. */
+ htab_t tocsave_htab;
+
/* Linker stub bfd. */
bfd *stub_bfd;
asection *sfpr;
asection *brlt;
asection *relbrlt;
+ asection *glink_eh_frame;
/* Shortcut to .__tls_get_addr and __tls_get_addr. */
struct ppc_link_hash_entry *tls_get_addr;
bfd_size_type got_reli_size;
/* Statistics. */
- unsigned long stub_count[ppc_stub_plt_call];
+ unsigned long stub_count[ppc_stub_plt_call_r2save];
/* Number of stubs against global syms. */
unsigned long stub_globals;
+ /* Alignment of PLT call stubs. */
+ unsigned int plt_stub_align:4;
+
+ /* Set if PLT call stubs should load r11. */
+ unsigned int plt_static_chain:1;
+
+ /* Set if PLT call stubs need a read-read barrier. */
+ unsigned int plt_thread_safe:1;
+
/* Set if we should emit symbols for stubs. */
unsigned int emit_stub_syms:1;
return entry;
}
+struct tocsave_entry {
+ asection *sec;
+ bfd_vma offset;
+};
+
+static hashval_t
+tocsave_htab_hash (const void *p)
+{
+ const struct tocsave_entry *e = (const struct tocsave_entry *) p;
+ return ((bfd_vma)(intptr_t) e->sec ^ e->offset) >> 3;
+}
+
+static int
+tocsave_htab_eq (const void *p1, const void *p2)
+{
+ const struct tocsave_entry *e1 = (const struct tocsave_entry *) p1;
+ const struct tocsave_entry *e2 = (const struct tocsave_entry *) p2;
+ return e1->sec == e2->sec && e1->offset == e2->offset;
+}
+
/* Create a ppc64 ELF linker hash table. */
static struct bfd_link_hash_table *
sizeof (struct ppc_branch_hash_entry)))
return NULL;
+ htab->tocsave_htab = htab_try_create (1024,
+ tocsave_htab_hash,
+ tocsave_htab_eq,
+ NULL);
+ if (htab->tocsave_htab == NULL)
+ return NULL;
+
/* Initializing two fields of the union is just cosmetic. We really
only care about glist, but when compiled on a 32-bit host the
bfd_vma fields are larger. Setting the bfd_vma to zero makes
static void
ppc64_elf_link_hash_table_free (struct bfd_link_hash_table *hash)
{
- struct ppc_link_hash_table *ret = (struct ppc_link_hash_table *) hash;
+ struct ppc_link_hash_table *htab = (struct ppc_link_hash_table *) hash;
- bfd_hash_table_free (&ret->stub_hash_table);
- bfd_hash_table_free (&ret->branch_hash_table);
+ bfd_hash_table_free (&htab->stub_hash_table);
+ bfd_hash_table_free (&htab->branch_hash_table);
+ if (htab->tocsave_htab)
+ htab_delete (htab->tocsave_htab);
_bfd_generic_link_hash_table_free (hash);
}
static struct ppc_stub_hash_entry *
ppc_add_stub (const char *stub_name,
asection *section,
- struct ppc_link_hash_table *htab)
+ struct bfd_link_info *info)
{
+ struct ppc_link_hash_table *htab = ppc_hash_table (info);
asection *link_sec;
asection *stub_sec;
struct ppc_stub_hash_entry *stub_entry;
TRUE, FALSE);
if (stub_entry == NULL)
{
- (*_bfd_error_handler) (_("%B: cannot create stub entry %s"),
- section->owner, stub_name);
+ info->callbacks->einfo (_("%P: %B: cannot create stub entry %s\n"),
+ section->owner, stub_name);
return NULL;
}
|| ! bfd_set_section_alignment (dynobj, htab->glink, 3))
return FALSE;
+ if (!info->no_ld_generated_unwind_info)
+ {
+ flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS
+ | SEC_IN_MEMORY | SEC_LINKER_CREATED);
+ htab->glink_eh_frame = bfd_make_section_anyway_with_flags (dynobj,
+ ".eh_frame",
+ flags);
+ if (htab->glink_eh_frame == NULL
+ || !bfd_set_section_alignment (abfd, htab->glink_eh_frame, 2))
+ return FALSE;
+ }
+
flags = SEC_ALLOC | SEC_LINKER_CREATED;
htab->iplt = bfd_make_section_anyway_with_flags (dynobj, ".iplt", flags);
if (htab->iplt == NULL
edir = (struct ppc_link_hash_entry *) dir;
eind = (struct ppc_link_hash_entry *) ind;
+ edir->is_func |= eind->is_func;
+ edir->is_func_descriptor |= eind->is_func_descriptor;
+ edir->tls_mask |= eind->tls_mask;
+ if (eind->oh != NULL)
+ edir->oh = ppc_follow_link (eind->oh);
+
+ /* If called to transfer flags for a weakdef during processing
+ of elf_adjust_dynamic_symbol, don't copy NON_GOT_REF.
+ We clear it ourselves for ELIMINATE_COPY_RELOCS. */
+ if (!(ELIMINATE_COPY_RELOCS
+ && eind->elf.root.type != bfd_link_hash_indirect
+ && edir->elf.dynamic_adjusted))
+ edir->elf.non_got_ref |= eind->elf.non_got_ref;
+
+ edir->elf.ref_dynamic |= eind->elf.ref_dynamic;
+ edir->elf.ref_regular |= eind->elf.ref_regular;
+ edir->elf.ref_regular_nonweak |= eind->elf.ref_regular_nonweak;
+ edir->elf.needs_plt |= eind->elf.needs_plt;
+
/* Copy over any dynamic relocs we may have on the indirect sym. */
if (eind->dyn_relocs != NULL)
{
if (edir->dyn_relocs != NULL)
{
- struct ppc_dyn_relocs **pp;
- struct ppc_dyn_relocs *p;
+ struct elf_dyn_relocs **pp;
+ struct elf_dyn_relocs *p;
/* Add reloc counts against the indirect sym to the direct sym
list. Merge any entries against the same section. */
for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
{
- struct ppc_dyn_relocs *q;
+ struct elf_dyn_relocs *q;
for (q = edir->dyn_relocs; q != NULL; q = q->next)
if (q->sec == p->sec)
eind->dyn_relocs = NULL;
}
- edir->is_func |= eind->is_func;
- edir->is_func_descriptor |= eind->is_func_descriptor;
- edir->tls_mask |= eind->tls_mask;
- if (eind->oh != NULL)
- edir->oh = ppc_follow_link (eind->oh);
-
- /* If called to transfer flags for a weakdef during processing
- of elf_adjust_dynamic_symbol, don't copy NON_GOT_REF.
- We clear it ourselves for ELIMINATE_COPY_RELOCS. */
- if (!(ELIMINATE_COPY_RELOCS
- && eind->elf.root.type != bfd_link_hash_indirect
- && edir->elf.dynamic_adjusted))
- edir->elf.non_got_ref |= eind->elf.non_got_ref;
-
- edir->elf.ref_dynamic |= eind->elf.ref_dynamic;
- edir->elf.ref_regular |= eind->elf.ref_regular;
- edir->elf.ref_regular_nonweak |= eind->elf.ref_regular_nonweak;
- edir->elf.needs_plt |= eind->elf.needs_plt;
-
- /* If we were called to copy over info for a weak sym, that's all. */
+ /* If we were called to copy over info for a weak sym, that's all.
+ You might think dyn_relocs need not be copied over; After all,
+ both syms will be dynamic or both non-dynamic so we're just
+ moving reloc accounting around. However, ELIMINATE_COPY_RELOCS
+ code in ppc64_elf_adjust_dynamic_symbol needs to check for
+ dyn_relocs in read-only sections, and it does so on what is the
+ DIR sym here. */
if (eind->elf.root.type != bfd_link_hash_indirect)
return;
asection **sec,
bfd_vma *value ATTRIBUTE_UNUSED)
{
+ if ((ibfd->flags & DYNAMIC) == 0
+ && ELF_ST_BIND (isym->st_info) == STB_GNU_UNIQUE)
+ elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
+
if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
{
if ((ibfd->flags & DYNAMIC) == 0)
- elf_tdata (info->output_bfd)->has_ifunc_symbols = TRUE;
+ elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
}
else if (ELF_ST_TYPE (isym->st_info) == STT_FUNC)
;
return TRUE;
}
+/* If --just-symbols against a final linked binary, then assume we need
+ toc adjusting stubs when calling functions defined there. */
+
+static void
+ppc64_elf_link_just_syms (asection *sec, struct bfd_link_info *info)
+{
+ if ((sec->flags & SEC_CODE) != 0
+ && (sec->owner->flags & (EXEC_P | DYNAMIC)) != 0
+ && is_ppc64_elf (sec->owner))
+ {
+ asection *got = bfd_get_section_by_name (sec->owner, ".got");
+ if (got != NULL
+ && got->size >= elf_backend_got_header_size
+ && bfd_get_section_by_name (sec->owner, ".opd") != NULL)
+ sec->has_toc_reloc = 1;
+ }
+ _bfd_elf_link_just_syms (sec, info);
+}
+
static struct plt_entry **
update_local_sym_info (bfd *abfd, Elf_Internal_Shdr *symtab_hdr,
unsigned long r_symndx, bfd_vma r_addend, int tls_type)
|| (!info->shared
&& ifunc != NULL))
{
- struct ppc_dyn_relocs *p;
- struct ppc_dyn_relocs **head;
+ struct elf_dyn_relocs *p;
+ struct elf_dyn_relocs **head;
/* We must copy these reloc types into the output file.
Create a reloc section in dynobj and make room for
s = sec;
vpp = &elf_section_data (s)->local_dynrel;
- head = (struct ppc_dyn_relocs **) vpp;
+ head = (struct elf_dyn_relocs **) vpp;
}
p = *head;
/* No relocs implies we are linking a --just-symbols object. */
if (opd_sec->reloc_count == 0)
{
- if (!bfd_get_section_contents (opd_bfd, opd_sec, &val, offset, 8))
+ char buf[8];
+
+ if (!bfd_get_section_contents (opd_bfd, opd_sec, buf, offset, 8))
return (bfd_vma) -1;
+ val = bfd_get_64 (opd_bfd, buf);
if (code_sec != NULL)
{
asection *sec, *likely = NULL;
struct ppc_link_hash_entry *eh = (struct ppc_link_hash_entry *) h;
struct ppc_link_hash_entry *fdh;
- if (eh->elf.root.type == bfd_link_hash_warning)
- eh = (struct ppc_link_hash_entry *) eh->elf.root.u.i.link;
-
/* Dynamic linking info is on the func descriptor sym. */
fdh = defined_func_desc (eh);
if (fdh != NULL)
|| (!info->executable
&& eh->elf.def_regular
&& ELF_ST_VISIBILITY (eh->elf.other) != STV_INTERNAL
- && ELF_ST_VISIBILITY (eh->elf.other) != STV_HIDDEN)))
+ && ELF_ST_VISIBILITY (eh->elf.other) != STV_HIDDEN
+ && (strchr (eh->elf.root.root.string, ELF_VER_CHR) != NULL
+ || !bfd_hide_sym_by_version (info->version_info,
+ eh->elf.root.root.string)))))
{
asection *code_sec;
struct ppc_link_hash_entry *fh;
if (r_symndx >= symtab_hdr->sh_info)
{
struct ppc_link_hash_entry *eh;
- struct ppc_dyn_relocs **pp;
- struct ppc_dyn_relocs *p;
+ struct elf_dyn_relocs **pp;
+ struct elf_dyn_relocs *p;
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
h = elf_follow_link (h);
if (fh->elf.root.type == bfd_link_hash_indirect)
return TRUE;
- if (fh->elf.root.type == bfd_link_hash_warning)
- fh = (struct ppc_link_hash_entry *) fh->elf.root.u.i.link;
-
info = inf;
htab = ppc_hash_table (info);
if (htab == NULL)
if (ELIMINATE_COPY_RELOCS)
{
struct ppc_link_hash_entry * eh;
- struct ppc_dyn_relocs *p;
+ struct elf_dyn_relocs *p;
eh = (struct ppc_link_hash_entry *) h;
for (p = eh->dyn_relocs; p != NULL; p = p->next)
function pointers, vtable refs and suchlike in read-only
sections. Allow them to proceed, but warn that this might
break at runtime. */
- (*_bfd_error_handler)
- (_("copy reloc against `%s' requires lazy plt linking; "
- "avoid setting LD_BIND_NOW=1 or upgrade gcc"),
+ info->callbacks->einfo
+ (_("%P: copy reloc against `%s' requires lazy plt linking; "
+ "avoid setting LD_BIND_NOW=1 or upgrade gcc\n"),
h->root.root.string);
}
if (h->size == 0)
{
- (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
- h->root.root.string);
+ info->callbacks->einfo (_("%P: dynamic variable `%s' is zero size\n"),
+ h->root.root.string);
return TRUE;
}
return 1;
}
+/* Find (or create) an entry in the tocsave hash table. */
+
+static struct tocsave_entry *
+tocsave_find (struct ppc_link_hash_table *htab,
+ enum insert_option insert,
+ Elf_Internal_Sym **local_syms,
+ const Elf_Internal_Rela *irela,
+ bfd *ibfd)
+{
+ unsigned long r_indx;
+ struct elf_link_hash_entry *h;
+ Elf_Internal_Sym *sym;
+ struct tocsave_entry ent, *p;
+ hashval_t hash;
+ struct tocsave_entry **slot;
+
+ r_indx = ELF64_R_SYM (irela->r_info);
+ if (!get_sym_h (&h, &sym, &ent.sec, NULL, local_syms, r_indx, ibfd))
+ return NULL;
+ if (ent.sec == NULL || ent.sec->output_section == NULL)
+ {
+ (*_bfd_error_handler)
+ (_("%B: undefined symbol on R_PPC64_TOCSAVE relocation"));
+ return NULL;
+ }
+
+ if (h != NULL)
+ ent.offset = h->root.u.def.value;
+ else
+ ent.offset = sym->st_value;
+ ent.offset += irela->r_addend;
+
+ hash = tocsave_htab_hash (&ent);
+ slot = ((struct tocsave_entry **)
+ htab_find_slot_with_hash (htab->tocsave_htab, &ent, hash, insert));
+ if (slot == NULL)
+ return NULL;
+
+ if (*slot == NULL)
+ {
+ p = (struct tocsave_entry *) bfd_alloc (ibfd, sizeof (*p));
+ if (p == NULL)
+ return NULL;
+ *p = ent;
+ *slot = p;
+ }
+ return *slot;
+}
+
/* Adjust all global syms defined in opd sections. In gcc generated
code for the old ABI, these will already have been done. */
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
if (h->root.type != bfd_link_hash_defined
&& h->root.type != bfd_link_hash_defweak)
return TRUE;
asection *sym_sec)
{
enum elf_ppc64_reloc_type r_type;
- struct ppc_dyn_relocs *p;
- struct ppc_dyn_relocs **pp;
+ struct elf_dyn_relocs *p;
+ struct elf_dyn_relocs **pp;
/* Can this reloc be dynamic? This switch, and later tests here
should be kept in sync with the code in check_relocs. */
if (sym_sec != NULL)
{
void *vpp = &elf_section_data (sym_sec)->local_dynrel;
- pp = (struct ppc_dyn_relocs **) vpp;
+ pp = (struct elf_dyn_relocs **) vpp;
}
else
{
void *vpp = &elf_section_data (sec)->local_dynrel;
- pp = (struct ppc_dyn_relocs **) vpp;
+ pp = (struct elf_dyn_relocs **) vpp;
}
/* elf_gc_sweep may have already removed all dyn relocs associated
pp = &p->next;
}
- (*_bfd_error_handler) (_("dynreloc miscount for %B, section %A"),
- sec->owner, sec);
+ info->callbacks->einfo (_("%P: dynreloc miscount for %B, section %A\n"),
+ sec->owner, sec);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
if (need_edit || add_aux_fields)
{
Elf_Internal_Rela *write_rel;
+ Elf_Internal_Shdr *rel_hdr;
bfd_byte *rptr, *wptr;
bfd_byte *new_contents;
bfd_boolean skip;
/* Fudge the header size too, as this is used later in
elf_bfd_final_link if we are emitting relocs. */
- elf_section_data (sec)->rel_hdr.sh_size
- = sec->reloc_count * elf_section_data (sec)->rel_hdr.sh_entsize;
- BFD_ASSERT (elf_section_data (sec)->rel_hdr2 == NULL);
+ rel_hdr = _bfd_elf_single_rel_hdr (sec);
+ rel_hdr->sh_size = sec->reloc_count * rel_hdr->sh_entsize;
some_edited = TRUE;
}
else if (elf_section_data (sec)->relocs != relstart)
bfd *ibfd;
asection *sec;
struct ppc_link_hash_table *htab;
+ unsigned char *toc_ref;
int pass;
if (info->relocatable || !info->executable)
if (htab == NULL)
return FALSE;
- for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
- {
- Elf_Internal_Sym *locsyms = NULL;
- asection *toc = bfd_get_section_by_name (ibfd, ".toc");
- unsigned char *toc_ref = NULL;
-
- /* Look at all the sections for this file. Make two passes over
- the relocs. On the first pass, mark toc entries involved
- with tls relocs, and check that tls relocs involved in
- setting up a tls_get_addr call are indeed followed by such a
- call. If they are not, exclude them from the optimizations
- done on the second pass. */
- for (pass = 0; pass < 2; ++pass)
+ /* Make two passes over the relocs. On the first pass, mark toc
+ entries involved with tls relocs, and check that tls relocs
+ involved in setting up a tls_get_addr call are indeed followed by
+ such a call. If they are not, we can't do any tls optimization.
+ On the second pass twiddle tls_mask flags to notify
+ relocate_section that optimization can be done, and adjust got
+ and plt refcounts. */
+ toc_ref = NULL;
+ for (pass = 0; pass < 2; ++pass)
+ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
+ {
+ Elf_Internal_Sym *locsyms = NULL;
+ asection *toc = bfd_get_section_by_name (ibfd, ".toc");
+
for (sec = ibfd->sections; sec != NULL; sec = sec->next)
if (sec->has_tls_reloc && !bfd_is_abs_section (sec->output_section))
{
Elf_Internal_Rela *relstart, *rel, *relend;
+ bfd_boolean found_tls_get_addr_arg = 0;
/* Read the relocations. */
relstart = _bfd_elf_link_read_relocs (ibfd, sec, NULL, NULL,
bfd_boolean ok_tprel, is_local;
long toc_ref_index = 0;
int expecting_tls_get_addr = 0;
+ bfd_boolean ret = FALSE;
r_symndx = ELF64_R_SYM (rel->r_info);
if (!get_sym_h (&h, &sym, &sym_sec, &tls_mask, &locsyms,
&& (elf_symtab_hdr (ibfd).contents
!= (unsigned char *) locsyms))
free (locsyms);
- return FALSE;
+ return ret;
}
if (h != NULL)
else if (h->root.type == bfd_link_hash_undefweak)
value = 0;
else
- continue;
+ {
+ found_tls_get_addr_arg = 0;
+ continue;
+ }
}
else
/* Symbols referenced by TLS relocs must be of type
}
r_type = ELF64_R_TYPE (rel->r_info);
+ /* If this section has old-style __tls_get_addr calls
+ without marker relocs, then check that each
+ __tls_get_addr call reloc is preceded by a reloc
+ that conceivably belongs to the __tls_get_addr arg
+ setup insn. If we don't find matching arg setup
+ relocs, don't do any tls optimization. */
+ if (pass == 0
+ && sec->has_tls_get_addr_call
+ && h != NULL
+ && (h == &htab->tls_get_addr->elf
+ || h == &htab->tls_get_addr_fd->elf)
+ && !found_tls_get_addr_arg
+ && is_branch_reloc (r_type))
+ {
+ info->callbacks->minfo (_("%H __tls_get_addr lost arg, "
+ "TLS optimization disabled\n"),
+ ibfd, sec, rel->r_offset);
+ ret = TRUE;
+ goto err_free_rel;
+ }
+
+ found_tls_get_addr_arg = 0;
switch (r_type)
{
case R_PPC64_GOT_TLSLD16:
case R_PPC64_GOT_TLSLD16_LO:
expecting_tls_get_addr = 1;
+ found_tls_get_addr_arg = 1;
/* Fall thru */
case R_PPC64_GOT_TLSLD16_HI:
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO:
expecting_tls_get_addr = 1;
+ found_tls_get_addr_arg = 1;
/* Fall thru */
case R_PPC64_GOT_TLSGD16_HI:
}
continue;
- case R_PPC64_TOC16:
- case R_PPC64_TOC16_LO:
- case R_PPC64_TLS:
case R_PPC64_TLSGD:
case R_PPC64_TLSLD:
+ found_tls_get_addr_arg = 1;
+ /* Fall thru */
+
+ case R_PPC64_TLS:
+ case R_PPC64_TOC16:
+ case R_PPC64_TOC16_LO:
if (sym_sec == NULL || sym_sec != toc)
continue;
case of R_PPC64_TLS, and after checking for
tls_get_addr for the TOC16 relocs. */
if (toc_ref == NULL)
- {
- toc_ref = bfd_zmalloc (toc->size / 8);
- if (toc_ref == NULL)
- goto err_free_rel;
- }
+ toc_ref = bfd_zmalloc (toc->output_section->rawsize / 8);
+ if (toc_ref == NULL)
+ goto err_free_rel;
+
if (h != NULL)
value = h->root.u.def.value;
else
value = sym->st_value;
value += rel->r_addend;
BFD_ASSERT (value < toc->size && value % 8 == 0);
- toc_ref_index = value / 8;
+ toc_ref_index = (value + toc->output_offset) / 8;
if (r_type == R_PPC64_TLS
|| r_type == R_PPC64_TLSGD
|| r_type == R_PPC64_TLSLD)
if (pass == 0
|| sec != toc
|| toc_ref == NULL
- || !toc_ref[rel->r_offset / 8])
+ || !toc_ref[(rel->r_offset + toc->output_offset) / 8])
continue;
if (ok_tprel)
{
if (pass == 0
|| sec != toc
|| toc_ref == NULL
- || !toc_ref[rel->r_offset / 8])
+ || !toc_ref[(rel->r_offset + toc->output_offset) / 8])
continue;
if (rel + 1 < relend
&& (rel[1].r_info
rel, ibfd);
if (retval == 0)
goto err_free_rel;
- if (retval > 1 && toc_tls != NULL)
- toc_ref[toc_ref_index] = 1;
+ if (toc_tls != NULL)
+ {
+ if ((*toc_tls & (TLS_GD | TLS_LD)) != 0)
+ found_tls_get_addr_arg = 1;
+ if (retval > 1)
+ toc_ref[toc_ref_index] = 1;
+ }
}
continue;
}
/* Uh oh, we didn't find the expected call. We
could just mark this symbol to exclude it
from tls optimization but it's safer to skip
- the entire section. */
- sec->has_tls_reloc = 0;
- break;
+ the entire optimization. */
+ info->callbacks->minfo (_("%H arg lost __tls_get_addr, "
+ "TLS optimization disabled\n"),
+ ibfd, sec, rel->r_offset);
+ ret = TRUE;
+ goto err_free_rel;
}
if (expecting_tls_get_addr && htab->tls_get_addr != NULL)
free (relstart);
}
- if (toc_ref != NULL)
- free (toc_ref);
+ if (locsyms != NULL
+ && (elf_symtab_hdr (ibfd).contents != (unsigned char *) locsyms))
+ {
+ if (!info->keep_memory)
+ free (locsyms);
+ else
+ elf_symtab_hdr (ibfd).contents = (unsigned char *) locsyms;
+ }
+ }
- if (locsyms != NULL
- && (elf_symtab_hdr (ibfd).contents != (unsigned char *) locsyms))
- {
- if (!info->keep_memory)
- free (locsyms);
- else
- elf_symtab_hdr (ibfd).contents = (unsigned char *) locsyms;
- }
- }
+ if (toc_ref != NULL)
+ free (toc_ref);
return TRUE;
}
struct adjust_toc_info *toc_inf = (struct adjust_toc_info *) inf;
unsigned long i;
- if (h->root.type == bfd_link_hash_indirect)
- return TRUE;
-
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
if (h->root.type != bfd_link_hash_defined
&& h->root.type != bfd_link_hash_defweak)
return TRUE;
return TRUE;
}
+/* Return TRUE iff INSN is one we expect on a _LO variety toc/got reloc. */
+
+static bfd_boolean
+ok_lo_toc_insn (unsigned int insn)
+{
+ return ((insn & (0x3f << 26)) == 14u << 26 /* addi */
+ || (insn & (0x3f << 26)) == 32u << 26 /* lwz */
+ || (insn & (0x3f << 26)) == 34u << 26 /* lbz */
+ || (insn & (0x3f << 26)) == 36u << 26 /* stw */
+ || (insn & (0x3f << 26)) == 38u << 26 /* stb */
+ || (insn & (0x3f << 26)) == 40u << 26 /* lhz */
+ || (insn & (0x3f << 26)) == 42u << 26 /* lha */
+ || (insn & (0x3f << 26)) == 44u << 26 /* sth */
+ || (insn & (0x3f << 26)) == 46u << 26 /* lmw */
+ || (insn & (0x3f << 26)) == 47u << 26 /* stmw */
+ || (insn & (0x3f << 26)) == 48u << 26 /* lfs */
+ || (insn & (0x3f << 26)) == 50u << 26 /* lfd */
+ || (insn & (0x3f << 26)) == 52u << 26 /* stfs */
+ || (insn & (0x3f << 26)) == 54u << 26 /* stfd */
+ || ((insn & (0x3f << 26)) == 58u << 26 /* lwa,ld,lmd */
+ && (insn & 3) != 1)
+ || ((insn & (0x3f << 26)) == 62u << 26 /* std, stmd */
+ && ((insn & 3) == 0 || (insn & 3) == 3))
+ || (insn & (0x3f << 26)) == 12u << 26 /* addic */);
+}
+
/* Examine all relocs referencing .toc sections in order to remove
unused .toc entries. */
asection *toc, *sec;
Elf_Internal_Shdr *symtab_hdr;
Elf_Internal_Sym *local_syms;
- Elf_Internal_Rela *relstart, *rel;
+ Elf_Internal_Rela *relstart, *rel, *toc_relocs;
unsigned long *skip, *drop;
unsigned char *used;
unsigned char *keep, last, some_unused;
|| elf_discarded_section (toc))
continue;
+ toc_relocs = NULL;
local_syms = NULL;
symtab_hdr = &elf_symtab_hdr (ibfd);
&& toc->reloc_count != 0)
{
/* Read toc relocs. */
- relstart = _bfd_elf_link_read_relocs (ibfd, toc, NULL, NULL,
- info->keep_memory);
- if (relstart == NULL)
+ toc_relocs = _bfd_elf_link_read_relocs (ibfd, toc, NULL, NULL,
+ info->keep_memory);
+ if (toc_relocs == NULL)
goto error_ret;
- for (rel = relstart; rel < relstart + toc->reloc_count; ++rel)
+ for (rel = toc_relocs; rel < toc_relocs + toc->reloc_count; ++rel)
{
enum elf_ppc64_reloc_type r_type;
unsigned long r_symndx;
r_symndx, ibfd))
goto error_ret;
+ if (sym_sec == NULL
+ || elf_discarded_section (sym_sec))
+ continue;
+
if (!SYMBOL_CALLS_LOCAL (info, h))
continue;
}
skip[rel->r_offset >> 3]
- |= can_optimize | ((rel - relstart) << 2);
+ |= can_optimize | ((rel - toc_relocs) << 2);
}
-
- if (elf_section_data (toc)->relocs != relstart)
- free (relstart);
}
if (skip == NULL)
&& relstart != NULL
&& elf_section_data (sec)->relocs != relstart)
free (relstart);
+ if (toc_relocs != NULL
+ && elf_section_data (toc)->relocs != toc_relocs)
+ free (toc_relocs);
if (skip != NULL)
free (skip);
return FALSE;
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
bfd_vma val;
+ enum {no_check, check_lo, check_ha} insn_check;
r_type = ELF64_R_TYPE (rel->r_info);
+ switch (r_type)
+ {
+ default:
+ insn_check = no_check;
+ break;
+
+ case R_PPC64_GOT_TLSLD16_HA:
+ case R_PPC64_GOT_TLSGD16_HA:
+ case R_PPC64_GOT_TPREL16_HA:
+ case R_PPC64_GOT_DTPREL16_HA:
+ case R_PPC64_GOT16_HA:
+ case R_PPC64_TOC16_HA:
+ insn_check = check_ha;
+ break;
+
+ case R_PPC64_GOT_TLSLD16_LO:
+ case R_PPC64_GOT_TLSGD16_LO:
+ case R_PPC64_GOT_TPREL16_LO_DS:
+ case R_PPC64_GOT_DTPREL16_LO_DS:
+ case R_PPC64_GOT16_LO:
+ case R_PPC64_GOT16_LO_DS:
+ case R_PPC64_TOC16_LO:
+ case R_PPC64_TOC16_LO_DS:
+ insn_check = check_lo;
+ break;
+ }
+
+ if (insn_check != no_check)
+ {
+ bfd_vma off = rel->r_offset & ~3;
+ unsigned char buf[4];
+ unsigned int insn;
+
+ if (!bfd_get_section_contents (ibfd, sec, buf, off, 4))
+ {
+ free (used);
+ goto error_ret;
+ }
+ insn = bfd_get_32 (ibfd, buf);
+ if (insn_check == check_lo
+ ? !ok_lo_toc_insn (insn)
+ : ((insn & ((0x3f << 26) | 0x1f << 16))
+ != ((15u << 26) | (2 << 16)) /* addis rt,2,imm */))
+ {
+ char str[12];
+
+ ppc64_elf_tdata (ibfd)->unexpected_toc_insn = 1;
+ sprintf (str, "%#08x", insn);
+ info->callbacks->einfo
+ (_("%P: %H: toc optimization is not supported for"
+ " %s instruction.\n"),
+ ibfd, sec, rel->r_offset & ~3, str);
+ }
+ }
+
switch (r_type)
{
case R_PPC64_TOC16:
case R_PPC64_TOC16_LO_DS:
off = rel->r_offset + (bfd_big_endian (ibfd) ? -2 : 3);
if (!bfd_get_section_contents (ibfd, sec, &opc, off, 1))
- return FALSE;
+ {
+ free (used);
+ goto error_ret;
+ }
if ((opc & (0x3f << 2)) == (58u << 2))
break;
/* Fall thru */
some_unused = 1;
last = 0;
}
- else if (*drop)
+ else if ((*drop & ref_from_discarded) != 0)
{
some_unused = 1;
last = ref_from_discarded;
else if ((skip[val >> 3] & can_optimize) != 0)
{
Elf_Internal_Rela *tocrel
- = elf_section_data (toc)->relocs + (skip[val >> 3] >> 2);
+ = toc_relocs + (skip[val >> 3] >> 2);
unsigned long tsym = ELF64_R_SYM (tocrel->r_info);
switch (r_type)
break;
default:
- abort ();
+ if (!ppc64_elf_howto_table[R_PPC64_ADDR32])
+ ppc_howto_init ();
+ info->callbacks->einfo
+ (_("%P: %H: %s relocation references "
+ "optimized away TOC entry\n"),
+ ibfd, sec, rel->r_offset,
+ ppc64_elf_howto_table[r_type]->name);
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret;
}
rel->r_addend = tocrel->r_addend;
elf_section_data (sec)->relocs = relstart;
/* We shouldn't have local or global symbols defined in the TOC,
but handle them anyway. */
- for (sym = local_syms;
- sym < local_syms + symtab_hdr->sh_info;
- ++sym)
- if (sym->st_value != 0
- && bfd_section_from_elf_index (ibfd, sym->st_shndx) == toc)
- {
- unsigned long i;
+ if (local_syms != NULL)
+ for (sym = local_syms;
+ sym < local_syms + symtab_hdr->sh_info;
+ ++sym)
+ if (sym->st_value != 0
+ && bfd_section_from_elf_index (ibfd, sym->st_shndx) == toc)
+ {
+ unsigned long i;
- if (sym->st_value > toc->rawsize)
- i = toc->rawsize >> 3;
- else
- i = sym->st_value >> 3;
+ if (sym->st_value > toc->rawsize)
+ i = toc->rawsize >> 3;
+ else
+ i = sym->st_value >> 3;
- if ((skip[i] & (ref_from_discarded | can_optimize)) != 0)
- {
- if (local_toc_syms)
- (*_bfd_error_handler)
- (_("%s defined on removed toc entry"),
- bfd_elf_sym_name (ibfd, symtab_hdr, sym, NULL));
- do
- ++i;
- while ((skip[i] & (ref_from_discarded | can_optimize)));
- sym->st_value = (bfd_vma) i << 3;
- }
+ if ((skip[i] & (ref_from_discarded | can_optimize)) != 0)
+ {
+ if (local_toc_syms)
+ (*_bfd_error_handler)
+ (_("%s defined on removed toc entry"),
+ bfd_elf_sym_name (ibfd, symtab_hdr, sym, NULL));
+ do
+ ++i;
+ while ((skip[i] & (ref_from_discarded | can_optimize)));
+ sym->st_value = (bfd_vma) i << 3;
+ }
- sym->st_value -= skip[i];
- symtab_hdr->contents = (unsigned char *) local_syms;
- }
+ sym->st_value -= skip[i];
+ symtab_hdr->contents = (unsigned char *) local_syms;
+ }
/* Adjust any global syms defined in this toc input section. */
if (toc_inf.global_toc_syms)
if (toc->reloc_count != 0)
{
+ Elf_Internal_Shdr *rel_hdr;
Elf_Internal_Rela *wrel;
bfd_size_type sz;
- /* Read toc relocs. */
- relstart = _bfd_elf_link_read_relocs (ibfd, toc, NULL, NULL,
- TRUE);
- if (relstart == NULL)
+ /* Remove unused toc relocs, and adjust those we keep. */
+ if (toc_relocs == NULL)
+ toc_relocs = _bfd_elf_link_read_relocs (ibfd, toc, NULL, NULL,
+ info->keep_memory);
+ if (toc_relocs == NULL)
goto error_ret;
- /* Remove unused toc relocs, and adjust those we keep. */
- wrel = relstart;
- for (rel = relstart; rel < relstart + toc->reloc_count; ++rel)
+ wrel = toc_relocs;
+ for (rel = toc_relocs; rel < toc_relocs + toc->reloc_count; ++rel)
if ((skip[rel->r_offset >> 3]
& (ref_from_discarded | can_optimize)) == 0)
{
&local_syms, NULL, NULL))
goto error_ret;
- toc->reloc_count = wrel - relstart;
- sz = elf_section_data (toc)->rel_hdr.sh_entsize;
- elf_section_data (toc)->rel_hdr.sh_size = toc->reloc_count * sz;
- BFD_ASSERT (elf_section_data (toc)->rel_hdr2 == NULL);
+ elf_section_data (toc)->relocs = toc_relocs;
+ toc->reloc_count = wrel - toc_relocs;
+ rel_hdr = _bfd_elf_single_rel_hdr (toc);
+ sz = rel_hdr->sh_entsize;
+ rel_hdr->sh_size = toc->reloc_count * sz;
}
}
+ else if (toc_relocs != NULL
+ && elf_section_data (toc)->relocs != toc_relocs)
+ free (toc_relocs);
if (local_syms != NULL
&& symtab_hdr->contents != (unsigned char *) local_syms)
struct ppc_link_hash_table *htab;
asection *s;
struct ppc_link_hash_entry *eh;
- struct ppc_dyn_relocs *p;
+ struct elf_dyn_relocs *p;
struct got_entry **pgent, *gent;
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
info = (struct bfd_link_info *) inf;
htab = ppc_hash_table (info);
if (htab == NULL)
then they should avoid writing weird assembly. */
if (SYMBOL_CALLS_LOCAL (info, h))
{
- struct ppc_dyn_relocs **pp;
+ struct elf_dyn_relocs **pp;
for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
{
readonly_dynrelocs (struct elf_link_hash_entry *h, void *inf)
{
struct ppc_link_hash_entry *eh;
- struct ppc_dyn_relocs *p;
-
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ struct elf_dyn_relocs *p;
eh = (struct ppc_link_hash_entry *) h;
for (p = eh->dyn_relocs; p != NULL; p = p->next)
for (s = ibfd->sections; s != NULL; s = s->next)
{
- struct ppc_dyn_relocs *p;
+ struct elf_dyn_relocs *p;
for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
{
/* Strip this section if we don't need it; see the
comment below. */
}
+ else if (s == htab->glink_eh_frame)
+ {
+ if (!bfd_is_abs_section (s->output_section))
+ /* Not sized yet. */
+ continue;
+ }
else if (CONST_STRNEQ (s->name, ".rela"))
{
if (s->size != 0)
return ppc_stub_none;
}
-/* Build a .plt call stub. */
+/* With power7 weakly ordered memory model, it is possible for ld.so
+ to update a plt entry in one thread and have another thread see a
+ stale zero toc entry. To avoid this we need some sort of acquire
+ barrier in the call stub. One solution is to make the load of the
+ toc word seem to appear to depend on the load of the function entry
+ word. Another solution is to test for r2 being zero, and branch to
+ the appropriate glink entry if so.
+
+ . fake dep barrier compare
+ . ld 11,xxx(2) ld 11,xxx(2)
+ . mtctr 11 mtctr 11
+ . xor 11,11,11 ld 2,xxx+8(2)
+ . add 2,2,11 cmpldi 2,0
+ . ld 2,xxx+8(2) bnectr+
+ . bctr b <glink_entry>
+
+ The solution involving the compare turns out to be faster, so
+ that's what we use unless the branch won't reach. */
+
+#define ALWAYS_USE_FAKE_DEP 0
+#define ALWAYS_EMIT_R2SAVE 0
-static inline bfd_byte *
-build_plt_stub (bfd *obfd, bfd_byte *p, int offset, Elf_Internal_Rela *r)
-{
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI ((v) + 0x8000)
+static inline unsigned int
+plt_stub_size (struct ppc_link_hash_table *htab,
+ struct ppc_stub_hash_entry *stub_entry,
+ bfd_vma off)
+{
+ unsigned size = PLT_CALL_STUB_SIZE;
+
+ if (!(ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save))
+ size -= 4;
+ if (!htab->plt_static_chain)
+ size -= 4;
+ if (htab->plt_thread_safe)
+ size += 8;
+ if (PPC_HA (off) == 0)
+ size -= 4;
+ if (PPC_HA (off + 8 + 8 * htab->plt_static_chain) != PPC_HA (off))
+ size += 4;
+ if (stub_entry->h != NULL
+ && (stub_entry->h == htab->tls_get_addr_fd
+ || stub_entry->h == htab->tls_get_addr)
+ && !htab->no_tls_get_addr_opt)
+ size += 13 * 4;
+ return size;
+}
+
+/* If this stub would cross fewer 2**plt_stub_align boundaries if we align,
+ then return the padding needed to do so. */
+static inline unsigned int
+plt_stub_pad (struct ppc_link_hash_table *htab,
+ struct ppc_stub_hash_entry *stub_entry,
+ bfd_vma plt_off)
+{
+ int stub_align = 1 << htab->plt_stub_align;
+ unsigned stub_size = plt_stub_size (htab, stub_entry, plt_off);
+ bfd_vma stub_off = stub_entry->stub_sec->size;
+
+ if (((stub_off + stub_size - 1) & -stub_align) - (stub_off & -stub_align)
+ > (stub_size & -stub_align))
+ return stub_align - (stub_off & (stub_align - 1));
+ return 0;
+}
+
+/* Build a .plt call stub. */
+
+static inline bfd_byte *
+build_plt_stub (struct ppc_link_hash_table *htab,
+ struct ppc_stub_hash_entry *stub_entry,
+ bfd_byte *p, bfd_vma offset, Elf_Internal_Rela *r)
+{
+ bfd *obfd = htab->stub_bfd;
+ bfd_boolean plt_static_chain = htab->plt_static_chain;
+ bfd_boolean plt_thread_safe = htab->plt_thread_safe;
+ bfd_boolean use_fake_dep = plt_thread_safe;
+ bfd_vma cmp_branch_off = 0;
+
+ if (!ALWAYS_USE_FAKE_DEP
+ && plt_thread_safe
+ && !(stub_entry->h != NULL
+ && (stub_entry->h == htab->tls_get_addr_fd
+ || stub_entry->h == htab->tls_get_addr)
+ && !htab->no_tls_get_addr_opt))
+ {
+ bfd_vma pltoff = stub_entry->plt_ent->plt.offset & ~1;
+ bfd_vma pltindex = (pltoff - PLT_INITIAL_ENTRY_SIZE) / PLT_ENTRY_SIZE;
+ bfd_vma glinkoff = GLINK_CALL_STUB_SIZE + pltindex * 8;
+ bfd_vma to, from;
+
+ if (pltindex > 32767)
+ glinkoff += (pltindex - 32767) * 4;
+ to = (glinkoff
+ + htab->glink->output_offset
+ + htab->glink->output_section->vma);
+ from = (p - stub_entry->stub_sec->contents
+ + 4 * (ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ + 4 * (PPC_HA (offset) != 0)
+ + 4 * (PPC_HA (offset + 8 + 8 * plt_static_chain)
+ != PPC_HA (offset))
+ + 4 * (plt_static_chain != 0)
+ + 20
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_sec->output_section->vma);
+ cmp_branch_off = to - from;
+ use_fake_dep = cmp_branch_off + (1 << 25) >= (1 << 26);
+ }
+
if (PPC_HA (offset) != 0)
{
if (r != NULL)
{
+ if (ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ r[0].r_offset += 4;
r[0].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_HA);
- r[1].r_offset = r[0].r_offset + 8;
+ r[1].r_offset = r[0].r_offset + 4;
r[1].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_LO_DS);
r[1].r_addend = r[0].r_addend;
- if (PPC_HA (offset + 16) != PPC_HA (offset))
+ if (PPC_HA (offset + 8 + 8 * plt_static_chain) != PPC_HA (offset))
{
r[2].r_offset = r[1].r_offset + 4;
r[2].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_LO);
}
else
{
- r[2].r_offset = r[1].r_offset + 8;
+ r[2].r_offset = r[1].r_offset + 8 + 8 * use_fake_dep;
r[2].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_LO_DS);
r[2].r_addend = r[0].r_addend + 8;
- r[3].r_offset = r[2].r_offset + 4;
- r[3].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_LO_DS);
- r[3].r_addend = r[0].r_addend + 16;
+ if (plt_static_chain)
+ {
+ r[3].r_offset = r[2].r_offset + 4;
+ r[3].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_LO_DS);
+ r[3].r_addend = r[0].r_addend + 16;
+ }
}
}
+ if (ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ bfd_put_32 (obfd, STD_R2_40R1, p), p += 4;
bfd_put_32 (obfd, ADDIS_R12_R2 | PPC_HA (offset), p), p += 4;
- bfd_put_32 (obfd, STD_R2_40R1, p), p += 4;
bfd_put_32 (obfd, LD_R11_0R12 | PPC_LO (offset), p), p += 4;
- if (PPC_HA (offset + 16) != PPC_HA (offset))
+ if (PPC_HA (offset + 8 + 8 * plt_static_chain) != PPC_HA (offset))
{
bfd_put_32 (obfd, ADDI_R12_R12 | PPC_LO (offset), p), p += 4;
offset = 0;
}
bfd_put_32 (obfd, MTCTR_R11, p), p += 4;
+ if (use_fake_dep)
+ {
+ bfd_put_32 (obfd, XOR_R11_R11_R11, p), p += 4;
+ bfd_put_32 (obfd, ADD_R12_R12_R11, p), p += 4;
+ }
bfd_put_32 (obfd, LD_R2_0R12 | PPC_LO (offset + 8), p), p += 4;
- bfd_put_32 (obfd, LD_R11_0R12 | PPC_LO (offset + 16), p), p += 4;
- bfd_put_32 (obfd, BCTR, p), p += 4;
+ if (plt_static_chain)
+ bfd_put_32 (obfd, LD_R11_0R12 | PPC_LO (offset + 16), p), p += 4;
}
else
{
if (r != NULL)
{
- r[0].r_offset += 4;
+ if (ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ r[0].r_offset += 4;
r[0].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_DS);
- if (PPC_HA (offset + 16) != PPC_HA (offset))
+ if (PPC_HA (offset + 8 + 8 * plt_static_chain) != PPC_HA (offset))
{
r[1].r_offset = r[0].r_offset + 4;
r[1].r_info = ELF64_R_INFO (0, R_PPC64_TOC16);
}
else
{
- r[1].r_offset = r[0].r_offset + 8;
+ r[1].r_offset = r[0].r_offset + 8 + 8 * use_fake_dep;
r[1].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_DS);
- r[1].r_addend = r[0].r_addend + 16;
- r[2].r_offset = r[1].r_offset + 4;
- r[2].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_DS);
- r[2].r_addend = r[0].r_addend + 8;
+ r[1].r_addend = r[0].r_addend + 8 + 8 * plt_static_chain;
+ if (plt_static_chain)
+ {
+ r[2].r_offset = r[1].r_offset + 4;
+ r[2].r_info = ELF64_R_INFO (0, R_PPC64_TOC16_DS);
+ r[2].r_addend = r[0].r_addend + 8;
+ }
}
}
- bfd_put_32 (obfd, STD_R2_40R1, p), p += 4;
+ if (ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ bfd_put_32 (obfd, STD_R2_40R1, p), p += 4;
bfd_put_32 (obfd, LD_R11_0R2 | PPC_LO (offset), p), p += 4;
- if (PPC_HA (offset + 16) != PPC_HA (offset))
+ if (PPC_HA (offset + 8 + 8 * plt_static_chain) != PPC_HA (offset))
{
bfd_put_32 (obfd, ADDI_R2_R2 | PPC_LO (offset), p), p += 4;
offset = 0;
}
bfd_put_32 (obfd, MTCTR_R11, p), p += 4;
- bfd_put_32 (obfd, LD_R11_0R2 | PPC_LO (offset + 16), p), p += 4;
+ if (use_fake_dep)
+ {
+ bfd_put_32 (obfd, XOR_R11_R11_R11, p), p += 4;
+ bfd_put_32 (obfd, ADD_R2_R2_R11, p), p += 4;
+ }
+ if (plt_static_chain)
+ bfd_put_32 (obfd, LD_R11_0R2 | PPC_LO (offset + 16), p), p += 4;
bfd_put_32 (obfd, LD_R2_0R2 | PPC_LO (offset + 8), p), p += 4;
- bfd_put_32 (obfd, BCTR, p), p += 4;
}
+ if (plt_thread_safe && !use_fake_dep)
+ {
+ bfd_put_32 (obfd, CMPLDI_R2_0, p), p += 4;
+ bfd_put_32 (obfd, BNECTR_P4, p), p += 4;
+ bfd_put_32 (obfd, B_DOT + cmp_branch_off, p), p += 4;
+ }
+ else
+ bfd_put_32 (obfd, BCTR, p), p += 4;
return p;
}
#define MTLR_R11 0x7d6803a6
static inline bfd_byte *
-build_tls_get_addr_stub (bfd *obfd, bfd_byte *p, int offset,
- Elf_Internal_Rela *r)
+build_tls_get_addr_stub (struct ppc_link_hash_table *htab,
+ struct ppc_stub_hash_entry *stub_entry,
+ bfd_byte *p, bfd_vma offset, Elf_Internal_Rela *r)
{
+ bfd *obfd = htab->stub_bfd;
+
bfd_put_32 (obfd, LD_R11_0R3 + 0, p), p += 4;
bfd_put_32 (obfd, LD_R12_0R3 + 8, p), p += 4;
bfd_put_32 (obfd, MR_R0_R3, p), p += 4;
if (r != NULL)
r[0].r_offset += 9 * 4;
- p = build_plt_stub (obfd, p, offset, r);
+ p = build_plt_stub (htab, stub_entry, p, offset, r);
bfd_put_32 (obfd, BCTRL, p - 4);
bfd_put_32 (obfd, LD_R11_0R1 + 32, p), p += 4;
if (relocs == NULL)
return NULL;
elfsec_data->relocs = relocs;
- elfsec_data->rel_hdr.sh_size = (sec->reloc_count
- * sizeof (Elf64_External_Rela));
- elfsec_data->rel_hdr.sh_entsize = sizeof (Elf64_External_Rela);
+ elfsec_data->rela.hdr = bfd_zalloc (sec->owner,
+ sizeof (Elf_Internal_Shdr));
+ if (elfsec_data->rela.hdr == NULL)
+ return NULL;
+ elfsec_data->rela.hdr->sh_size = (sec->reloc_count
+ * sizeof (Elf64_External_Rela));
+ elfsec_data->rela.hdr->sh_entsize = sizeof (Elf64_External_Rela);
sec->reloc_count = 0;
}
relocs += sec->reloc_count;
return relocs;
}
+static bfd_vma
+get_r2off (struct bfd_link_info *info,
+ struct ppc_stub_hash_entry *stub_entry)
+{
+ struct ppc_link_hash_table *htab = ppc_hash_table (info);
+ bfd_vma r2off = htab->stub_group[stub_entry->target_section->id].toc_off;
+
+ if (r2off == 0)
+ {
+ /* Support linking -R objects. Get the toc pointer from the
+ opd entry. */
+ char buf[8];
+ asection *opd = stub_entry->h->elf.root.u.def.section;
+ bfd_vma opd_off = stub_entry->h->elf.root.u.def.value;
+
+ if (strcmp (opd->name, ".opd") != 0
+ || opd->reloc_count != 0)
+ {
+ info->callbacks->einfo (_("%P: cannot find opd entry toc for %s\n"),
+ stub_entry->h->elf.root.root.string);
+ bfd_set_error (bfd_error_bad_value);
+ return 0;
+ }
+ if (!bfd_get_section_contents (opd->owner, opd, buf, opd_off + 8, 8))
+ return 0;
+ r2off = bfd_get_64 (opd->owner, buf);
+ r2off -= elf_gp (info->output_bfd);
+ }
+ r2off -= htab->stub_group[stub_entry->id_sec->id].toc_off;
+ return r2off;
+}
+
static bfd_boolean
ppc_build_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
{
size = 4;
if (stub_entry->stub_type == ppc_stub_long_branch_r2off)
{
- bfd_vma r2off;
+ bfd_vma r2off = get_r2off (info, stub_entry);
- r2off = (htab->stub_group[stub_entry->target_section->id].toc_off
- - htab->stub_group[stub_entry->id_sec->id].toc_off);
+ if (r2off == 0)
+ {
+ htab->stub_error = TRUE;
+ return FALSE;
+ }
bfd_put_32 (htab->stub_bfd, STD_R2_40R1, loc);
loc += 4;
size = 12;
if (off + (1 << 25) >= (bfd_vma) (1 << 26))
{
- (*_bfd_error_handler) (_("long branch stub `%s' offset overflow"),
- stub_entry->root.string);
+ info->callbacks->einfo (_("%P: long branch stub `%s' offset overflow\n"),
+ stub_entry->root.string);
htab->stub_error = TRUE;
return FALSE;
}
FALSE, FALSE);
if (br_entry == NULL)
{
- (*_bfd_error_handler) (_("can't find branch stub `%s'"),
- stub_entry->root.string);
+ info->callbacks->einfo (_("%P: can't find branch stub `%s'\n"),
+ stub_entry->root.string);
htab->stub_error = TRUE;
return FALSE;
}
if (off + 0x80008000 > 0xffffffff || (off & 7) != 0)
{
- (*_bfd_error_handler)
- (_("linkage table error against `%s'"),
+ info->callbacks->einfo
+ (_("%P: linkage table error against `%s'\n"),
stub_entry->root.string);
bfd_set_error (bfd_error_bad_value);
htab->stub_error = TRUE;
}
else
{
- bfd_vma r2off;
+ bfd_vma r2off = get_r2off (info, stub_entry);
+
+ if (r2off == 0)
+ {
+ htab->stub_error = TRUE;
+ return FALSE;
+ }
- r2off = (htab->stub_group[stub_entry->target_section->id].toc_off
- - htab->stub_group[stub_entry->id_sec->id].toc_off);
bfd_put_32 (htab->stub_bfd, STD_R2_40R1, loc);
loc += 4;
size = 20;
break;
case ppc_stub_plt_call:
+ case ppc_stub_plt_call_r2save:
if (stub_entry->h != NULL
&& stub_entry->h->is_func_descriptor
&& stub_entry->h->oh != NULL)
these checks could now disappear. */
if (fh->elf.root.type == bfd_link_hash_undefined)
fh->elf.root.type = bfd_link_hash_undefweak;
+ /* Stop undo_symbol_twiddle changing it back to undefined. */
+ fh->was_undefined = 0;
}
/* Now build the stub. */
if (off + 0x80008000 > 0xffffffff || (off & 7) != 0)
{
- (*_bfd_error_handler)
- (_("linkage table error against `%s'"),
+ info->callbacks->einfo
+ (_("%P: linkage table error against `%s'\n"),
stub_entry->h != NULL
? stub_entry->h->elf.root.root.string
: "<local sym>");
return FALSE;
}
+ if (htab->plt_stub_align != 0)
+ {
+ unsigned pad = plt_stub_pad (htab, stub_entry, off);
+
+ stub_entry->stub_sec->size += pad;
+ stub_entry->stub_offset = stub_entry->stub_sec->size;
+ loc += pad;
+ }
+
r = NULL;
if (info->emitrelocations)
{
r = get_relocs (stub_entry->stub_sec,
- (2 + (PPC_HA (off) != 0)
- + (PPC_HA (off + 16) == PPC_HA (off))));
+ (2
+ + (PPC_HA (off) != 0)
+ + (htab->plt_static_chain
+ && PPC_HA (off + 16) == PPC_HA (off))));
if (r == NULL)
return FALSE;
r[0].r_offset = loc - stub_entry->stub_sec->contents;
&& (stub_entry->h == htab->tls_get_addr_fd
|| stub_entry->h == htab->tls_get_addr)
&& !htab->no_tls_get_addr_opt)
- p = build_tls_get_addr_stub (htab->stub_bfd, loc, off, r);
+ p = build_tls_get_addr_stub (htab, stub_entry, loc, off, r);
else
- p = build_plt_stub (htab->stub_bfd, loc, off, r);
+ p = build_plt_stub (htab, stub_entry, loc, off, r);
size = p - loc;
break;
"long_branch_r2off",
"plt_branch",
"plt_branch_r2off",
+ "plt_call",
"plt_call" };
len1 = strlen (stub_str[stub_entry->stub_type - 1]);
if (htab == NULL)
return FALSE;
- if (stub_entry->stub_type == ppc_stub_plt_call)
+ if (stub_entry->stub_type == ppc_stub_plt_call
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
{
asection *plt;
off = stub_entry->plt_ent->plt.offset & ~(bfd_vma) 1;
- elf_gp (plt->output_section->owner)
- htab->stub_group[stub_entry->id_sec->id].toc_off);
- size = PLT_CALL_STUB_SIZE;
- if (PPC_HA (off) == 0)
- size -= 4;
- if (PPC_HA (off + 16) != PPC_HA (off))
- size += 4;
- if (stub_entry->h != NULL
- && (stub_entry->h == htab->tls_get_addr_fd
- || stub_entry->h == htab->tls_get_addr)
- && !htab->no_tls_get_addr_opt)
- size += 13 * 4;
+ size = plt_stub_size (htab, stub_entry, off);
+ if (htab->plt_stub_align)
+ size += plt_stub_pad (htab, stub_entry, off);
if (info->emitrelocations)
{
stub_entry->stub_sec->reloc_count
- += 2 + (PPC_HA (off) != 0) + (PPC_HA (off + 16) == PPC_HA (off));
+ += (2
+ + (PPC_HA (off) != 0)
+ + (htab->plt_static_chain
+ && PPC_HA (off + 16) == PPC_HA (off)));
stub_entry->stub_sec->flags |= SEC_RELOC;
}
}
size = 4;
if (stub_entry->stub_type == ppc_stub_long_branch_r2off)
{
- r2off = (htab->stub_group[stub_entry->target_section->id].toc_off
- - htab->stub_group[stub_entry->id_sec->id].toc_off);
+ r2off = get_r2off (info, stub_entry);
+ if (r2off == 0)
+ {
+ htab->stub_error = TRUE;
+ return FALSE;
+ }
size = 12;
if (PPC_HA (r2off) != 0)
size = 16;
TRUE, FALSE);
if (br_entry == NULL)
{
- (*_bfd_error_handler) (_("can't build branch stub `%s'"),
- stub_entry->root.string);
+ info->callbacks->einfo (_("%P: can't build branch stub `%s'\n"),
+ stub_entry->root.string);
htab->stub_error = TRUE;
return FALSE;
}
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
merge_got_entries (&h->got.glist);
return TRUE;
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
for (gent = h->got.glist; gent != NULL; gent = gent->next)
if (!gent->is_indirect)
allocate_got (h, (struct bfd_link_info *) inf, gent);
if (elf_gp (isec->owner) != 0)
htab->toc_curr = elf_gp (isec->owner);
}
- else if (!isec->call_check_done
- && toc_adjusting_stub_needed (info, isec) < 0)
- return FALSE;
+ else
+ {
+ if (!isec->call_check_done
+ && toc_adjusting_stub_needed (info, isec) < 0)
+ return FALSE;
+ /* If we make a local call from this section, ie. a branch
+ without a following nop, then we have no place to put a
+ toc restoring insn. We must use the same toc group as
+ the callee.
+ Testing makes_toc_func_call actually tests for *any*
+ calls to functions that need a good toc pointer. A more
+ precise test would be better, as this one will set
+ incorrect values for pasted .init/.fini fragments.
+ (Fixed later in check_pasted_section.) */
+ if (isec->makes_toc_func_call
+ && elf_gp (isec->owner) != 0)
+ htab->toc_curr = elf_gp (isec->owner);
+ }
}
/* Functions that don't use the TOC can belong in any TOC group.
- Use the last TOC base. This happens to make _init and _fini
- pasting work, because the fragments generally don't use the TOC. */
+ Use the last TOC base. */
htab->stub_group[isec->id].toc_off = htab->toc_curr;
return TRUE;
}
else if (toc_off != htab->stub_group[i->id].toc_off)
return FALSE;
}
+
+ if (toc_off == 0)
+ for (i = o->map_head.s; i != NULL; i = i->map_head.s)
+ if (i->makes_toc_func_call)
+ {
+ toc_off = htab->stub_group[i->id].toc_off;
+ break;
+ }
+
/* Make sure the whole pasted function uses the same toc offset. */
if (toc_off != 0)
for (i = o->map_head.s; i != NULL; i = i->map_head.s)
#undef PREV_SEC
}
+static const unsigned char glink_eh_frame_cie[] =
+{
+ 0, 0, 0, 16, /* length. */
+ 0, 0, 0, 0, /* id. */
+ 1, /* CIE version. */
+ 'z', 'R', 0, /* Augmentation string. */
+ 4, /* Code alignment. */
+ 0x78, /* Data alignment. */
+ 65, /* RA reg. */
+ 1, /* Augmentation size. */
+ DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding. */
+ DW_CFA_def_cfa, 1, 0 /* def_cfa: r1 offset 0. */
+};
+
+/* Stripping output sections is normally done before dynamic section
+ symbols have been allocated. This function is called later, and
+ handles cases like htab->brlt which is mapped to its own output
+ section. */
+
+static void
+maybe_strip_output (struct bfd_link_info *info, asection *isec)
+{
+ if (isec->size == 0
+ && isec->output_section->size == 0
+ && !bfd_section_removed_from_list (info->output_bfd,
+ isec->output_section)
+ && elf_section_data (isec->output_section)->dynindx == 0)
+ {
+ isec->output_section->flags |= SEC_EXCLUDE;
+ bfd_section_list_remove (info->output_bfd, isec->output_section);
+ info->output_bfd->section_count--;
+ }
+}
+
/* Determine and set the size of the stub section for a final link.
The basic idea here is to examine all the relocations looking for
instruction. */
bfd_boolean
-ppc64_elf_size_stubs (struct bfd_link_info *info, bfd_signed_vma group_size)
+ppc64_elf_size_stubs (struct bfd_link_info *info, bfd_signed_vma group_size,
+ bfd_boolean plt_static_chain, int plt_thread_safe,
+ int plt_stub_align)
{
bfd_size_type stub_group_size;
bfd_boolean stubs_always_before_branch;
if (htab == NULL)
return FALSE;
+ htab->plt_static_chain = plt_static_chain;
+ htab->plt_stub_align = plt_stub_align;
+ if (plt_thread_safe == -1)
+ {
+ const char *const thread_starter[] =
+ {
+ "pthread_create",
+ /* libstdc++ */
+ "_ZNSt6thread15_M_start_threadESt10shared_ptrINS_10_Impl_baseEE",
+ /* librt */
+ "aio_init", "aio_read", "aio_write", "aio_fsync", "lio_listio",
+ "mq_notify", "create_timer",
+ /* libanl */
+ "getaddrinfo_a",
+ /* libgomp */
+ "GOMP_parallel_start",
+ "GOMP_parallel_loop_static_start",
+ "GOMP_parallel_loop_dynamic_start",
+ "GOMP_parallel_loop_guided_start",
+ "GOMP_parallel_loop_runtime_start",
+ "GOMP_parallel_sections_start",
+ };
+ unsigned i;
+
+ for (i = 0; i < sizeof (thread_starter)/ sizeof (thread_starter[0]); i++)
+ {
+ struct elf_link_hash_entry *h;
+ h = elf_link_hash_lookup (&htab->elf, thread_starter[i],
+ FALSE, FALSE, TRUE);
+ plt_thread_safe = h != NULL && h->ref_regular;
+ if (plt_thread_safe)
+ break;
+ }
+ }
+ htab->plt_thread_safe = plt_thread_safe;
stubs_always_before_branch = group_size < 0;
if (group_size < 0)
stub_group_size = -group_size;
continue;
}
+ if (stub_type == ppc_stub_plt_call
+ && irela + 1 < irelaend
+ && irela[1].r_offset == irela->r_offset + 4
+ && ELF64_R_TYPE (irela[1].r_info) == R_PPC64_TOCSAVE)
+ {
+ if (!tocsave_find (htab, INSERT,
+ &local_syms, irela + 1, input_bfd))
+ goto error_ret_free_internal;
+ }
+ else if (stub_type == ppc_stub_plt_call)
+ stub_type = ppc_stub_plt_call_r2save;
+
/* Support for grouping stub sections. */
id_sec = htab->stub_group[section->id].link_sec;
{
/* The proper stub has already been created. */
free (stub_name);
+ if (stub_type == ppc_stub_plt_call_r2save)
+ stub_entry->stub_type = stub_type;
continue;
}
- stub_entry = ppc_add_stub (stub_name, section, htab);
+ stub_entry = ppc_add_stub (stub_name, section, info);
if (stub_entry == NULL)
{
free (stub_name);
}
stub_entry->stub_type = stub_type;
- if (stub_type != ppc_stub_plt_call)
+ if (stub_type != ppc_stub_plt_call
+ && stub_type != ppc_stub_plt_call_r2save)
{
stub_entry->target_value = code_value;
stub_entry->target_section = code_sec;
htab->glink->flags |= SEC_RELOC;
}
+ if (htab->glink_eh_frame != NULL
+ && !bfd_is_abs_section (htab->glink_eh_frame->output_section)
+ && (htab->glink_eh_frame->flags & SEC_EXCLUDE) == 0)
+ {
+ bfd_size_type size = 0;
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ if ((stub_sec->flags & SEC_LINKER_CREATED) == 0)
+ size += 20;
+ if (htab->glink != NULL && htab->glink->size != 0)
+ size += 24;
+ if (size != 0)
+ size += sizeof (glink_eh_frame_cie);
+ htab->glink_eh_frame->rawsize = htab->glink_eh_frame->size;
+ htab->glink_eh_frame->size = size;
+ }
+
+ if (htab->plt_stub_align != 0)
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ if ((stub_sec->flags & SEC_LINKER_CREATED) == 0)
+ stub_sec->size = ((stub_sec->size + (1 << htab->plt_stub_align) - 1)
+ & (-1 << htab->plt_stub_align));
+
for (stub_sec = htab->stub_bfd->sections;
stub_sec != NULL;
stub_sec = stub_sec->next)
/* Exit from this loop when no stubs have been added, and no stubs
have changed size. */
- if (stub_sec == NULL)
+ if (stub_sec == NULL
+ && (htab->glink_eh_frame == NULL
+ || htab->glink_eh_frame->rawsize == htab->glink_eh_frame->size))
break;
/* Ask the linker to do its stuff. */
(*htab->layout_sections_again) ();
}
- /* It would be nice to strip htab->brlt from the output if the
- section is empty, but it's too late. If we strip sections here,
- the dynamic symbol table is corrupted since the section symbol
- for the stripped section isn't written. */
+ maybe_strip_output (info, htab->brlt);
+ if (htab->glink_eh_frame != NULL)
+ maybe_strip_output (info, htab->glink_eh_frame);
return TRUE;
}
return FALSE;
}
+ if (htab->glink_eh_frame != NULL
+ && htab->glink_eh_frame->size != 0)
+ {
+ bfd_vma val;
+
+ p = bfd_zalloc (htab->glink_eh_frame->owner, htab->glink_eh_frame->size);
+ if (p == NULL)
+ return FALSE;
+ htab->glink_eh_frame->contents = p;
+
+ htab->glink_eh_frame->rawsize = htab->glink_eh_frame->size;
+
+ memcpy (p, glink_eh_frame_cie, sizeof (glink_eh_frame_cie));
+ /* CIE length (rewrite in case little-endian). */
+ bfd_put_32 (htab->elf.dynobj, sizeof (glink_eh_frame_cie) - 4, p);
+ p += sizeof (glink_eh_frame_cie);
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ if ((stub_sec->flags & SEC_LINKER_CREATED) == 0)
+ {
+ /* FDE length. */
+ bfd_put_32 (htab->elf.dynobj, 16, p);
+ p += 4;
+ /* CIE pointer. */
+ val = p - htab->glink_eh_frame->contents;
+ bfd_put_32 (htab->elf.dynobj, val, p);
+ p += 4;
+ /* Offset to stub section. */
+ val = (stub_sec->output_section->vma
+ + stub_sec->output_offset);
+ val -= (htab->glink_eh_frame->output_section->vma
+ + htab->glink_eh_frame->output_offset);
+ val -= p - htab->glink_eh_frame->contents;
+ if (val + 0x80000000 > 0xffffffff)
+ {
+ info->callbacks->einfo
+ (_("%P: %s offset too large for .eh_frame sdata4 encoding"),
+ stub_sec->name);
+ return FALSE;
+ }
+ bfd_put_32 (htab->elf.dynobj, val, p);
+ p += 4;
+ /* stub section size. */
+ bfd_put_32 (htab->elf.dynobj, stub_sec->rawsize, p);
+ p += 4;
+ /* Augmentation. */
+ p += 1;
+ /* Pad. */
+ p += 3;
+ }
+ if (htab->glink != NULL && htab->glink->size != 0)
+ {
+ /* FDE length. */
+ bfd_put_32 (htab->elf.dynobj, 20, p);
+ p += 4;
+ /* CIE pointer. */
+ val = p - htab->glink_eh_frame->contents;
+ bfd_put_32 (htab->elf.dynobj, val, p);
+ p += 4;
+ /* Offset to .glink. */
+ val = (htab->glink->output_section->vma
+ + htab->glink->output_offset
+ + 8);
+ val -= (htab->glink_eh_frame->output_section->vma
+ + htab->glink_eh_frame->output_offset);
+ val -= p - htab->glink_eh_frame->contents;
+ if (val + 0x80000000 > 0xffffffff)
+ {
+ info->callbacks->einfo
+ (_("%P: %s offset too large for .eh_frame sdata4 encoding"),
+ htab->glink->name);
+ return FALSE;
+ }
+ bfd_put_32 (htab->elf.dynobj, val, p);
+ p += 4;
+ /* .glink size. */
+ bfd_put_32 (htab->elf.dynobj, htab->glink->rawsize - 8, p);
+ p += 4;
+ /* Augmentation. */
+ p += 1;
+
+ *p++ = DW_CFA_advance_loc + 1;
+ *p++ = DW_CFA_register;
+ *p++ = 65;
+ *p++ = 12;
+ *p++ = DW_CFA_advance_loc + 4;
+ *p++ = DW_CFA_restore_extended;
+ *p++ = 65;
+ }
+ htab->glink_eh_frame->size = p - htab->glink_eh_frame->contents;
+ }
+
/* Build the stubs as directed by the stub hash table. */
bfd_hash_traverse (&htab->stub_hash_table, ppc_build_one_stub, info);
if (htab->relbrlt != NULL)
htab->relbrlt->reloc_count = 0;
+ if (htab->plt_stub_align != 0)
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ if ((stub_sec->flags & SEC_LINKER_CREATED) == 0)
+ stub_sec->size = ((stub_sec->size + (1 << htab->plt_stub_align) - 1)
+ & (-1 << htab->plt_stub_align));
+
for (stub_sec = htab->stub_bfd->sections;
stub_sec != NULL;
stub_sec = stub_sec->next)
}
if (stub_sec != NULL
- || htab->glink->rawsize != htab->glink->size)
+ || htab->glink->rawsize != htab->glink->size
+ || (htab->glink_eh_frame != NULL
+ && htab->glink_eh_frame->rawsize != htab->glink_eh_frame->size))
{
htab->stub_error = TRUE;
- (*_bfd_error_handler) (_("stubs don't match calculated size"));
+ info->callbacks->einfo (_("%P: stubs don't match calculated size\n"));
}
if (htab->stub_error)
" toc adjust %lu\n"
" long branch %lu\n"
" long toc adj %lu\n"
- " plt call %lu"),
+ " plt call %lu\n"
+ " plt call toc %lu"),
stub_sec_count,
stub_sec_count == 1 ? "" : "s",
htab->stub_count[ppc_stub_long_branch - 1],
htab->stub_count[ppc_stub_long_branch_r2off - 1],
htab->stub_count[ppc_stub_plt_branch - 1],
htab->stub_count[ppc_stub_plt_branch_r2off - 1],
- htab->stub_count[ppc_stub_plt_call - 1]);
+ htab->stub_count[ppc_stub_plt_call - 1],
+ htab->stub_count[ppc_stub_plt_call_r2save - 1]);
}
return TRUE;
}
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
eh = (struct ppc_link_hash_entry *) h;
if (eh->elf.root.type != bfd_link_hash_undefweak || !eh->was_undefined)
return TRUE;
return _bfd_elf_default_action_discarded (sec);
}
-/* REL points to a low-part reloc on a largetoc instruction sequence.
- Find the matching high-part reloc instruction and verify that it
- is addis REG,x,imm. If so, set *REG to x and return a pointer to
- the high-part reloc. */
-
-static const Elf_Internal_Rela *
-ha_reloc_match (const Elf_Internal_Rela *relocs,
- const Elf_Internal_Rela *rel,
- unsigned int *reg,
- bfd_boolean match_addend,
- const bfd *input_bfd,
- const bfd_byte *contents)
-{
- enum elf_ppc64_reloc_type r_type, r_type_ha;
- bfd_vma r_info_ha, r_addend;
-
- r_type = ELF64_R_TYPE (rel->r_info);
- switch (r_type)
- {
- case R_PPC64_GOT_TLSLD16_LO:
- case R_PPC64_GOT_TLSGD16_LO:
- case R_PPC64_GOT_TPREL16_LO_DS:
- case R_PPC64_GOT_DTPREL16_LO_DS:
- case R_PPC64_GOT16_LO:
- case R_PPC64_TOC16_LO:
- r_type_ha = r_type + 2;
- break;
- case R_PPC64_GOT16_LO_DS:
- r_type_ha = R_PPC64_GOT16_HA;
- break;
- case R_PPC64_TOC16_LO_DS:
- r_type_ha = R_PPC64_TOC16_HA;
- break;
- default:
- abort ();
- }
- r_info_ha = ELF64_R_INFO (ELF64_R_SYM (rel->r_info), r_type_ha);
- r_addend = rel->r_addend;
-
- while (--rel >= relocs)
- if (rel->r_info == r_info_ha
- && (!match_addend
- || rel->r_addend == r_addend))
- {
- const bfd_byte *p = contents + (rel->r_offset & ~3);
- unsigned int insn = bfd_get_32 (input_bfd, p);
- if ((insn & (0x3f << 26)) == (15u << 26) /* addis rt,x,imm */
- && (insn & (0x1f << 21)) == (*reg << 21))
- {
- *reg = (insn >> 16) & 0x1f;
- return rel;
- }
- break;
- }
- return NULL;
-}
-
/* The RELOCATE_SECTION function is called by the ELF backend linker
to handle the relocations for a section.
Elf_Internal_Rela outrel;
bfd_byte *loc;
struct got_entry **local_got_ents;
- unsigned char *ha_opt;
bfd_vma TOCstart;
- bfd_boolean no_ha_opt;
bfd_boolean ret = TRUE;
bfd_boolean is_opd;
- /* Disabled until we sort out how ld should choose 'y' vs 'at'. */
- bfd_boolean is_power4 = FALSE;
+ /* Assume 'at' branch hints. */
+ bfd_boolean is_isa_v2 = TRUE;
bfd_vma d_offset = (bfd_big_endian (output_bfd) ? 2 : 0);
/* Initialize howto table if needed. */
symtab_hdr = &elf_symtab_hdr (input_bfd);
sym_hashes = elf_sym_hashes (input_bfd);
is_opd = ppc64_elf_section_data (input_section)->sec_type == sec_opd;
- ha_opt = NULL;
- no_ha_opt = FALSE;
rel = relocs;
relend = relocs + input_section->reloc_count;
bfd_boolean unresolved_reloc;
bfd_boolean warned;
unsigned int insn;
- bfd_vma mask;
+ unsigned int mask;
struct ppc_stub_hash_entry *stub_entry;
bfd_vma max_br_offset;
bfd_vma from;
h = (struct ppc_link_hash_entry *) h_elf;
if (sec != NULL && elf_discarded_section (sec))
- {
- /* For relocs against symbols from removed linkonce sections,
- or sections discarded by a linker script, we just want the
- section contents zeroed. Avoid any special processing. */
- _bfd_clear_contents (ppc64_elf_howto_table[r_type], input_bfd,
- contents + rel->r_offset);
- rel->r_info = 0;
- rel->r_addend = 0;
- continue;
- }
+ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
+ rel, relend,
+ ppc64_elf_howto_table[r_type],
+ contents);
if (info->relocatable)
continue;
/* Check that tls relocs are used with tls syms, and non-tls
relocs are used with non-tls syms. */
- if (r_symndx != 0
+ if (r_symndx != STN_UNDEF
&& r_type != R_PPC64_NONE
&& (h == NULL
|| h->elf.root.type == bfd_link_hash_defined
/* R_PPC64_TLS is OK against a symbol in the TOC. */
;
else
- (*_bfd_error_handler)
+ info->callbacks->einfo
(!IS_PPC64_TLS_RELOC (r_type)
- ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
- : _("%B(%A+0x%lx): %s used with non-TLS symbol %s"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
+ ? _("%P: %H: %s used with TLS symbol %s\n")
+ : _("%P: %H: %s used with non-TLS symbol %s\n"),
+ input_bfd, input_section, rel->r_offset,
ppc64_elf_howto_table[r_type]->name,
sym_name);
}
if (local_sections[r_symndx] == sec)
break;
if (r_symndx >= symtab_hdr->sh_info)
- r_symndx = 0;
+ r_symndx = STN_UNDEF;
rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
- if (r_symndx != 0)
+ if (r_symndx != STN_UNDEF)
rel->r_addend -= (local_syms[r_symndx].st_value
+ sec->output_offset
+ sec->output_section->vma);
if (local_sections[r_symndx] == sec)
break;
if (r_symndx >= symtab_hdr->sh_info)
- r_symndx = 0;
+ r_symndx = STN_UNDEF;
rel->r_addend = htab->elf.tls_sec->vma + DTP_OFFSET;
- if (r_symndx != 0)
+ if (r_symndx != STN_UNDEF)
rel->r_addend -= (local_syms[r_symndx].st_value
+ sec->output_offset
+ sec->output_section->vma);
default:
break;
+ case R_PPC64_TOCSAVE:
+ if (relocation + addend == (rel->r_offset
+ + input_section->output_offset
+ + input_section->output_section->vma)
+ && tocsave_find (htab, NO_INSERT,
+ &local_syms, rel, input_bfd))
+ {
+ insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
+ if (insn == NOP
+ || insn == CROR_151515 || insn == CROR_313131)
+ bfd_put_32 (input_bfd, STD_R2_40R1,
+ contents + rel->r_offset);
+ }
+ break;
+
/* Branch taken prediction relocations. */
case R_PPC64_ADDR14_BRTAKEN:
case R_PPC64_REL14_BRTAKEN:
stub_entry = ppc_get_stub_entry (input_section, sec, fdh, rel, htab);
if (stub_entry != NULL
&& (stub_entry->stub_type == ppc_stub_plt_call
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save
|| stub_entry->stub_type == ppc_stub_plt_branch_r2off
|| stub_entry->stub_type == ppc_stub_long_branch_r2off))
{
if (!can_plt_call)
{
- if (stub_entry->stub_type == ppc_stub_plt_call)
+ if (stub_entry->stub_type == ppc_stub_plt_call
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
{
/* If this is a plain branch rather than a branch
and link, don't require a nop. However, don't
".init") == 0
|| strcmp (input_section->output_section->name,
".fini") == 0)
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): automatic multiple TOCs "
+ info->callbacks->einfo
+ (_("%P: %H: automatic multiple TOCs "
"not supported using your crt files; "
- "recompile with -mminimal-toc or upgrade gcc"),
- input_bfd,
- input_section,
- (long) rel->r_offset);
+ "recompile with -mminimal-toc or upgrade gcc\n"),
+ input_bfd, input_section, rel->r_offset);
else
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): sibling call optimization to `%s' "
+ info->callbacks->einfo
+ (_("%P: %H: sibling call optimization to `%s' "
"does not allow automatic multiple TOCs; "
"recompile with -mminimal-toc or "
"-fno-optimize-sibling-calls, "
- "or make `%s' extern"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
+ "or make `%s' extern\n"),
+ input_bfd, input_section, rel->r_offset,
sym_name,
sym_name);
bfd_set_error (bfd_error_bad_value);
}
if (can_plt_call
- && stub_entry->stub_type == ppc_stub_plt_call)
+ && (stub_entry->stub_type == ppc_stub_plt_call
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save))
unresolved_reloc = FALSE;
}
+ stub_entry->stub_sec->output_offset
+ stub_entry->stub_sec->output_section->vma);
addend = 0;
+
+ if ((stub_entry->stub_type == ppc_stub_plt_call
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ && (ALWAYS_EMIT_R2SAVE
+ || stub_entry->stub_type == ppc_stub_plt_call_r2save)
+ && rel + 1 < relend
+ && rel[1].r_offset == rel->r_offset + 4
+ && ELF64_R_TYPE (rel[1].r_info) == R_PPC64_TOCSAVE)
+ relocation += 4;
}
if (insn != 0)
{
- if (is_power4)
+ if (is_isa_v2)
{
/* Set 'a' bit. This is 0b00010 in BO field for branch
on CR(BI) insns (BO == 001at or 011at), and 0b01000
switch (r_type)
{
default:
- (*_bfd_error_handler)
- (_("%B: unknown relocation type %d for symbol %s"),
+ info->callbacks->einfo
+ (_("%P: %B: unknown relocation type %d for symbol %s\n"),
input_bfd, (int) r_type, sym_name);
bfd_set_error (bfd_error_bad_value);
case R_PPC64_TLS:
case R_PPC64_TLSGD:
case R_PPC64_TLSLD:
+ case R_PPC64_TOCSAVE:
case R_PPC64_GNU_VTINHERIT:
case R_PPC64_GNU_VTENTRY:
continue;
case R_PPC64_TOC:
/* Relocation value is TOC base. */
relocation = TOCstart;
- if (r_symndx == 0)
+ if (r_symndx == STN_UNDEF)
relocation += htab->stub_group[input_section->id].toc_off;
else if (unresolved_reloc)
;
? h->elf.type == STT_GNU_IFUNC
: ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): relocation %s for indirect "
- "function %s unsupported"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
+ info->callbacks->einfo
+ (_("%P: %H: relocation %s for indirect "
+ "function %s unsupported\n"),
+ input_bfd, input_section, rel->r_offset,
ppc64_elf_howto_table[r_type]->name,
sym_name);
ret = FALSE;
}
- else if (r_symndx == 0 || bfd_is_abs_section (sec))
+ else if (r_symndx == STN_UNDEF || bfd_is_abs_section (sec))
;
else if (sec == NULL || sec->owner == NULL)
{
case R_PPC64_PLTREL64:
/* These ones haven't been implemented yet. */
- (*_bfd_error_handler)
- (_("%B: relocation %s is not supported for symbol %s."),
+ info->callbacks->einfo
+ (_("%P: %B: relocation %s is not supported for symbol %s\n"),
input_bfd,
ppc64_elf_howto_table[r_type]->name, sym_name);
case R_PPC64_GOT_DTPREL16_HA:
case R_PPC64_GOT16_HA:
case R_PPC64_TOC16_HA:
- /* nop is done later. */
+ if (htab->do_toc_opt && relocation + addend + 0x8000 < 0x10000
+ && !ppc64_elf_tdata (input_bfd)->unexpected_toc_insn)
+ {
+ bfd_byte *p = contents + (rel->r_offset & ~3);
+ bfd_put_32 (input_bfd, NOP, p);
+ }
break;
case R_PPC64_GOT_TLSLD16_LO:
case R_PPC64_GOT16_LO_DS:
case R_PPC64_TOC16_LO:
case R_PPC64_TOC16_LO_DS:
- if (htab->do_toc_opt && relocation + addend + 0x8000 < 0x10000)
+ if (htab->do_toc_opt && relocation + addend + 0x8000 < 0x10000
+ && !ppc64_elf_tdata (input_bfd)->unexpected_toc_insn)
{
bfd_byte *p = contents + (rel->r_offset & ~3);
insn = bfd_get_32 (input_bfd, p);
- if ((insn & (0x3f << 26)) == 14u << 26 /* addi */
- || (insn & (0x3f << 26)) == 32u << 26 /* lwz */
- || (insn & (0x3f << 26)) == 34u << 26 /* lbz */
- || (insn & (0x3f << 26)) == 36u << 26 /* stw */
- || (insn & (0x3f << 26)) == 38u << 26 /* stb */
- || (insn & (0x3f << 26)) == 40u << 26 /* lhz */
- || (insn & (0x3f << 26)) == 42u << 26 /* lha */
- || (insn & (0x3f << 26)) == 44u << 26 /* sth */
- || (insn & (0x3f << 26)) == 46u << 26 /* lmw */
- || (insn & (0x3f << 26)) == 47u << 26 /* stmw */
- || (insn & (0x3f << 26)) == 48u << 26 /* lfs */
- || (insn & (0x3f << 26)) == 50u << 26 /* lfd */
- || (insn & (0x3f << 26)) == 52u << 26 /* stfs */
- || (insn & (0x3f << 26)) == 54u << 26 /* stfd */
- || ((insn & (0x3f << 26)) == 58u << 26 /* lwa,ld,lmd */
- && (insn & 3) != 1)
- || ((insn & (0x3f << 26)) == 62u << 26 /* std, stmd */
- && ((insn & 3) == 0 || (insn & 3) == 3)))
+ if ((insn & (0x3f << 26)) == 12u << 26 /* addic */)
{
- unsigned int reg = (insn >> 16) & 0x1f;
- const Elf_Internal_Rela *ha;
- bfd_boolean match_addend;
-
- match_addend = (sym != NULL
- && ELF_ST_TYPE (sym->st_info) == STT_SECTION);
- ha = ha_reloc_match (relocs, rel, ®, match_addend,
- input_bfd, contents);
- if (ha != NULL)
- {
- insn &= ~(0x1f << 16);
- insn |= reg << 16;
- bfd_put_32 (input_bfd, insn, p);
- if (ha_opt == NULL)
- {
- ha_opt = bfd_zmalloc (input_section->reloc_count);
- if (ha_opt == NULL)
- return FALSE;
- }
- ha_opt[ha - relocs] = 1;
- }
- else
- /* If we don't find a matching high part insn,
- something is fishy. Refuse to nop any high
- part insn in this section. */
- no_ha_opt = TRUE;
+ /* Transform addic to addi when we change reg. */
+ insn &= ~((0x3f << 26) | (0x1f << 16));
+ insn |= (14u << 26) | (2 << 16);
}
+ else
+ {
+ insn &= ~(0x1f << 16);
+ insn |= 2 << 16;
+ }
+ bfd_put_32 (input_bfd, insn, p);
}
break;
}
mask = 15;
if (((relocation + addend) & mask) != 0)
{
- (*_bfd_error_handler)
- (_("%B: error: relocation %s not a multiple of %d"),
- input_bfd,
+ info->callbacks->einfo
+ (_("%P: %H: error: %s not a multiple of %u\n"),
+ input_bfd, input_section, rel->r_offset,
ppc64_elf_howto_table[r_type]->name,
mask + 1);
bfd_set_error (bfd_error_bad_value);
not process them. */
if (unresolved_reloc
&& !((input_section->flags & SEC_DEBUGGING) != 0
- && h->elf.def_dynamic))
+ && h->elf.def_dynamic)
+ && _bfd_elf_section_offset (output_bfd, info, input_section,
+ rel->r_offset) != (bfd_vma) -1)
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
+ info->callbacks->einfo
+ (_("%P: %H: unresolvable %s relocation against symbol `%s'\n"),
+ input_bfd, input_section, rel->r_offset,
ppc64_elf_howto_table[(int) r_type]->name,
h->elf.root.root.string);
ret = FALSE;
}
else
{
- (*_bfd_error_handler)
- (_("%B(%A+0x%lx): %s reloc against `%s': error %d"),
- input_bfd,
- input_section,
- (long) rel->r_offset,
+ info->callbacks->einfo
+ (_("%P: %H: %s reloc against `%s': error %d\n"),
+ input_bfd, input_section, rel->r_offset,
ppc64_elf_howto_table[r_type]->name,
sym_name,
(int) r);
}
}
- if (ha_opt != NULL)
- {
- if (!no_ha_opt)
- {
- unsigned char *opt = ha_opt;
- rel = relocs;
- relend = relocs + input_section->reloc_count;
- for (; rel < relend; opt++, rel++)
- if (*opt != 0)
- {
- bfd_byte *p = contents + (rel->r_offset & ~3);
- bfd_put_32 (input_bfd, NOP, p);
- }
- }
- free (ha_opt);
- }
-
/* If we're emitting relocations, then shortly after this function
returns, reloc offsets and addends for this section will be
adjusted. Worse, reloc symbol indices will be for the output
&& htab->brlt->reloc_count != 0
&& !_bfd_elf_link_output_relocs (output_bfd,
htab->brlt,
- &elf_section_data (htab->brlt)->rel_hdr,
+ elf_section_data (htab->brlt)->rela.hdr,
elf_section_data (htab->brlt)->relocs,
NULL))
return FALSE;
&& htab->glink->reloc_count != 0
&& !_bfd_elf_link_output_relocs (output_bfd,
htab->glink,
- &elf_section_data (htab->glink)->rel_hdr,
+ elf_section_data (htab->glink)->rela.hdr,
elf_section_data (htab->glink)->relocs,
NULL))
return FALSE;
+
+ if (htab->glink_eh_frame != NULL
+ && htab->glink_eh_frame->sec_info_type == ELF_INFO_TYPE_EH_FRAME
+ && !_bfd_elf_write_section_eh_frame (output_bfd, info,
+ htab->glink_eh_frame,
+ htab->glink_eh_frame->contents))
+ return FALSE;
+
/* We need to handle writing out multiple GOT sections ourselves,
since we didn't add them to DYNOBJ. We know dynobj is the first
bfd. */
}
#include "elf64-target.h"
+
+/* FreeBSD support */
+
+#undef TARGET_LITTLE_SYM
+#undef TARGET_LITTLE_NAME
+
+#undef TARGET_BIG_SYM
+#define TARGET_BIG_SYM bfd_elf64_powerpc_freebsd_vec
+#undef TARGET_BIG_NAME
+#define TARGET_BIG_NAME "elf64-powerpc-freebsd"
+
+#undef ELF_OSABI
+#define ELF_OSABI ELFOSABI_FREEBSD
+
+#undef elf64_bed
+#define elf64_bed elf64_powerpc_fbsd_bed
+
+#include "elf64-target.h"
+