/* Xtensa-specific support for 32-bit ELF.
- Copyright 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Copyright (C) 2003-2015 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2 of the
+ published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
static bfd_boolean xtensa_is_proptable_section (asection *);
static int internal_reloc_compare (const void *, const void *);
static int internal_reloc_matches (const void *, const void *);
-extern asection *xtensa_get_property_section (asection *, const char *);
+static asection *xtensa_get_property_section (asection *, const char *);
+extern asection *xtensa_make_property_section (asection *, const char *);
static flagword xtensa_get_property_predef_flags (asection *);
/* Other functions called directly by the linker. */
int elf32xtensa_no_literal_movement = 1;
+/* Rename one of the generic section flags to better document how it
+ is used here. */
+/* Whether relocations have been processed. */
+#define reloc_done sec_flg0
\f
static reloc_howto_type elf_howto_table[] =
{
- HOWTO (R_XTENSA_NONE, 0, 0, 0, FALSE, 0, complain_overflow_dont,
+ HOWTO (R_XTENSA_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
bfd_elf_xtensa_reloc, "R_XTENSA_NONE",
FALSE, 0, 0, FALSE),
HOWTO (R_XTENSA_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
bfd_elf_xtensa_reloc, "R_XTENSA_ASM_SIMPLIFY", FALSE, 0, 0, TRUE),
EMPTY_HOWTO (13),
- EMPTY_HOWTO (14),
+
+ HOWTO (R_XTENSA_32_PCREL, 0, 2, 32, TRUE, 0, complain_overflow_bitfield,
+ bfd_elf_xtensa_reloc, "R_XTENSA_32_PCREL",
+ FALSE, 0, 0xffffffff, TRUE),
/* GNU extension to record C++ vtable hierarchy. */
HOWTO (R_XTENSA_GNU_VTINHERIT, 0, 2, 0, FALSE, 0, complain_overflow_dont,
FALSE, 0, 0, FALSE),
/* Relocations for supporting difference of symbols. */
- HOWTO (R_XTENSA_DIFF8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
+ HOWTO (R_XTENSA_DIFF8, 0, 0, 8, FALSE, 0, complain_overflow_signed,
bfd_elf_xtensa_reloc, "R_XTENSA_DIFF8", FALSE, 0, 0xff, FALSE),
- HOWTO (R_XTENSA_DIFF16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
+ HOWTO (R_XTENSA_DIFF16, 0, 1, 16, FALSE, 0, complain_overflow_signed,
bfd_elf_xtensa_reloc, "R_XTENSA_DIFF16", FALSE, 0, 0xffff, FALSE),
- HOWTO (R_XTENSA_DIFF32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
+ HOWTO (R_XTENSA_DIFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
bfd_elf_xtensa_reloc, "R_XTENSA_DIFF32", FALSE, 0, 0xffffffff, FALSE),
/* General immediate operand relocations. */
bfd_elf_xtensa_reloc, "R_XTENSA_SLOT13_ALT", FALSE, 0, 0, TRUE),
HOWTO (R_XTENSA_SLOT14_ALT, 0, 0, 0, TRUE, 0, complain_overflow_dont,
bfd_elf_xtensa_reloc, "R_XTENSA_SLOT14_ALT", FALSE, 0, 0, TRUE),
+
+ /* TLS relocations. */
+ HOWTO (R_XTENSA_TLSDESC_FN, 0, 2, 32, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLSDESC_FN",
+ FALSE, 0, 0xffffffff, FALSE),
+ HOWTO (R_XTENSA_TLSDESC_ARG, 0, 2, 32, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLSDESC_ARG",
+ FALSE, 0, 0xffffffff, FALSE),
+ HOWTO (R_XTENSA_TLS_DTPOFF, 0, 2, 32, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLS_DTPOFF",
+ FALSE, 0, 0xffffffff, FALSE),
+ HOWTO (R_XTENSA_TLS_TPOFF, 0, 2, 32, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLS_TPOFF",
+ FALSE, 0, 0xffffffff, FALSE),
+ HOWTO (R_XTENSA_TLS_FUNC, 0, 0, 0, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLS_FUNC",
+ FALSE, 0, 0, FALSE),
+ HOWTO (R_XTENSA_TLS_ARG, 0, 0, 0, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLS_ARG",
+ FALSE, 0, 0, FALSE),
+ HOWTO (R_XTENSA_TLS_CALL, 0, 0, 0, FALSE, 0, complain_overflow_dont,
+ bfd_elf_xtensa_reloc, "R_XTENSA_TLS_CALL",
+ FALSE, 0, 0, FALSE),
};
#if DEBUG_GEN_RELOC
TRACE ("BFD_RELOC_32");
return &elf_howto_table[(unsigned) R_XTENSA_32 ];
+ case BFD_RELOC_32_PCREL:
+ TRACE ("BFD_RELOC_32_PCREL");
+ return &elf_howto_table[(unsigned) R_XTENSA_32_PCREL ];
+
case BFD_RELOC_XTENSA_DIFF8:
TRACE ("BFD_RELOC_XTENSA_DIFF8");
return &elf_howto_table[(unsigned) R_XTENSA_DIFF8 ];
TRACE ("BFD_RELOC_VTABLE_ENTRY");
return &elf_howto_table[(unsigned) R_XTENSA_GNU_VTENTRY ];
+ case BFD_RELOC_XTENSA_TLSDESC_FN:
+ TRACE ("BFD_RELOC_XTENSA_TLSDESC_FN");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLSDESC_FN ];
+
+ case BFD_RELOC_XTENSA_TLSDESC_ARG:
+ TRACE ("BFD_RELOC_XTENSA_TLSDESC_ARG");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLSDESC_ARG ];
+
+ case BFD_RELOC_XTENSA_TLS_DTPOFF:
+ TRACE ("BFD_RELOC_XTENSA_TLS_DTPOFF");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLS_DTPOFF ];
+
+ case BFD_RELOC_XTENSA_TLS_TPOFF:
+ TRACE ("BFD_RELOC_XTENSA_TLS_TPOFF");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLS_TPOFF ];
+
+ case BFD_RELOC_XTENSA_TLS_FUNC:
+ TRACE ("BFD_RELOC_XTENSA_TLS_FUNC");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLS_FUNC ];
+
+ case BFD_RELOC_XTENSA_TLS_ARG:
+ TRACE ("BFD_RELOC_XTENSA_TLS_ARG");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLS_ARG ];
+
+ case BFD_RELOC_XTENSA_TLS_CALL:
+ TRACE ("BFD_RELOC_XTENSA_TLS_CALL");
+ return &elf_howto_table[(unsigned) R_XTENSA_TLS_CALL ];
+
default:
if (code >= BFD_RELOC_XTENSA_SLOT0_OP
&& code <= BFD_RELOC_XTENSA_SLOT14_OP)
{
unsigned int r_type = ELF32_R_TYPE (dst->r_info);
- BFD_ASSERT (r_type < (unsigned int) R_XTENSA_max);
+ if (r_type >= (unsigned int) R_XTENSA_max)
+ {
+ _bfd_error_handler (_("%A: invalid XTENSA reloc number: %d"), abfd, r_type);
+ r_type = 0;
+ }
cache_ptr->howto = &elf_howto_table[r_type];
}
0 /* unused */
};
+/* The size of the thread control block. */
+#define TCB_SIZE 8
+
+struct elf_xtensa_link_hash_entry
+{
+ struct elf_link_hash_entry elf;
+
+ bfd_signed_vma tlsfunc_refcount;
+
+#define GOT_UNKNOWN 0
+#define GOT_NORMAL 1
+#define GOT_TLS_GD 2 /* global or local dynamic */
+#define GOT_TLS_IE 4 /* initial or local exec */
+#define GOT_TLS_ANY (GOT_TLS_GD | GOT_TLS_IE)
+ unsigned char tls_type;
+};
+
+#define elf_xtensa_hash_entry(ent) ((struct elf_xtensa_link_hash_entry *)(ent))
+
+struct elf_xtensa_obj_tdata
+{
+ struct elf_obj_tdata root;
+
+ /* tls_type for each local got entry. */
+ char *local_got_tls_type;
+
+ bfd_signed_vma *local_tlsfunc_refcounts;
+};
+
+#define elf_xtensa_tdata(abfd) \
+ ((struct elf_xtensa_obj_tdata *) (abfd)->tdata.any)
+
+#define elf_xtensa_local_got_tls_type(abfd) \
+ (elf_xtensa_tdata (abfd)->local_got_tls_type)
+
+#define elf_xtensa_local_tlsfunc_refcounts(abfd) \
+ (elf_xtensa_tdata (abfd)->local_tlsfunc_refcounts)
+
+#define is_xtensa_elf(bfd) \
+ (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
+ && elf_tdata (bfd) != NULL \
+ && elf_object_id (bfd) == XTENSA_ELF_DATA)
+
+static bfd_boolean
+elf_xtensa_mkobject (bfd *abfd)
+{
+ return bfd_elf_allocate_object (abfd, sizeof (struct elf_xtensa_obj_tdata),
+ XTENSA_ELF_DATA);
+}
+
/* Xtensa ELF linker hash table. */
struct elf_xtensa_link_hash_table
needed. It is OK if this count is an overestimate, e.g., some
relocations may be removed by GC. */
int plt_reloc_count;
+
+ struct elf_xtensa_link_hash_entry *tlsbase;
};
/* Get the Xtensa ELF linker hash table from a link_info structure. */
#define elf_xtensa_hash_table(p) \
- ((struct elf_xtensa_link_hash_table *) ((p)->hash))
+ (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
+ == XTENSA_ELF_DATA ? ((struct elf_xtensa_link_hash_table *) ((p)->hash)) : NULL)
+
+/* Create an entry in an Xtensa ELF linker hash table. */
+
+static struct bfd_hash_entry *
+elf_xtensa_link_hash_newfunc (struct bfd_hash_entry *entry,
+ struct bfd_hash_table *table,
+ const char *string)
+{
+ /* Allocate the structure if it has not already been allocated by a
+ subclass. */
+ if (entry == NULL)
+ {
+ entry = bfd_hash_allocate (table,
+ sizeof (struct elf_xtensa_link_hash_entry));
+ if (entry == NULL)
+ return entry;
+ }
+
+ /* Call the allocation method of the superclass. */
+ entry = _bfd_elf_link_hash_newfunc (entry, table, string);
+ if (entry != NULL)
+ {
+ struct elf_xtensa_link_hash_entry *eh = elf_xtensa_hash_entry (entry);
+ eh->tlsfunc_refcount = 0;
+ eh->tls_type = GOT_UNKNOWN;
+ }
+
+ return entry;
+}
/* Create an Xtensa ELF linker hash table. */
static struct bfd_link_hash_table *
elf_xtensa_link_hash_table_create (bfd *abfd)
{
+ struct elf_link_hash_entry *tlsbase;
struct elf_xtensa_link_hash_table *ret;
bfd_size_type amt = sizeof (struct elf_xtensa_link_hash_table);
- ret = bfd_malloc (amt);
+ ret = bfd_zmalloc (amt);
if (ret == NULL)
return NULL;
if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd,
- _bfd_elf_link_hash_newfunc,
- sizeof (struct elf_link_hash_entry)))
+ elf_xtensa_link_hash_newfunc,
+ sizeof (struct elf_xtensa_link_hash_entry),
+ XTENSA_ELF_DATA))
{
free (ret);
return NULL;
}
- ret->sgot = NULL;
- ret->sgotplt = NULL;
- ret->srelgot = NULL;
- ret->splt = NULL;
- ret->srelplt = NULL;
- ret->sgotloc = NULL;
- ret->spltlittbl = NULL;
-
- ret->plt_reloc_count = 0;
+ /* Create a hash entry for "_TLS_MODULE_BASE_" to speed up checking
+ for it later. */
+ tlsbase = elf_link_hash_lookup (&ret->elf, "_TLS_MODULE_BASE_",
+ TRUE, FALSE, FALSE);
+ tlsbase->root.type = bfd_link_hash_new;
+ tlsbase->root.u.undef.abfd = NULL;
+ tlsbase->non_elf = 0;
+ ret->tlsbase = elf_xtensa_hash_entry (tlsbase);
+ ret->tlsbase->tls_type = GOT_UNKNOWN;
return &ret->elf.root;
}
+/* Copy the extra info we tack onto an elf_link_hash_entry. */
+
+static void
+elf_xtensa_copy_indirect_symbol (struct bfd_link_info *info,
+ struct elf_link_hash_entry *dir,
+ struct elf_link_hash_entry *ind)
+{
+ struct elf_xtensa_link_hash_entry *edir, *eind;
+
+ edir = elf_xtensa_hash_entry (dir);
+ eind = elf_xtensa_hash_entry (ind);
+
+ if (ind->root.type == bfd_link_hash_indirect)
+ {
+ edir->tlsfunc_refcount += eind->tlsfunc_refcount;
+ eind->tlsfunc_refcount = 0;
+
+ if (dir->got.refcount <= 0)
+ {
+ edir->tls_type = eind->tls_type;
+ eind->tls_type = GOT_UNKNOWN;
+ }
+ }
+
+ _bfd_elf_link_hash_copy_indirect (info, dir, ind);
+}
+
static inline bfd_boolean
elf_xtensa_dynamic_symbol_p (struct elf_link_hash_entry *h,
struct bfd_link_info *info)
!= GET_XTENSA_PROP_ALIGNMENT (b->flags)))
return (GET_XTENSA_PROP_ALIGNMENT (a->flags)
- GET_XTENSA_PROP_ALIGNMENT (b->flags));
-
+
if ((a->flags & XTENSA_PROP_UNREACHABLE)
!= (b->flags & XTENSA_PROP_UNREACHABLE))
return ((b->flags & XTENSA_PROP_UNREACHABLE)
if (table_section)
table_size = table_section->size;
- if (table_size == 0)
+ if (table_size == 0)
{
*table_p = NULL;
return 0;
section_limit = bfd_get_section_limit (abfd, section);
rel_end = internal_relocs + table_section->reloc_count;
- for (off = 0; off < table_size; off += table_entry_size)
+ for (off = 0; off < table_size; off += table_entry_size)
{
bfd_vma address = bfd_get_32 (abfd, table_data + off);
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
- if (info->relocatable)
+ if (info->relocatable || (sec->flags & SEC_ALLOC) == 0)
return TRUE;
+ BFD_ASSERT (is_xtensa_elf (abfd));
+
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
{
unsigned int r_type;
unsigned long r_symndx;
- struct elf_link_hash_entry *h;
+ struct elf_link_hash_entry *h = NULL;
+ struct elf_xtensa_link_hash_entry *eh;
+ int tls_type, old_tls_type;
+ bfd_boolean is_got = FALSE;
+ bfd_boolean is_plt = FALSE;
+ bfd_boolean is_tlsfunc = FALSE;
r_symndx = ELF32_R_SYM (rel->r_info);
r_type = ELF32_R_TYPE (rel->r_info);
return FALSE;
}
- if (r_symndx < symtab_hdr->sh_info)
- h = NULL;
- else
+ if (r_symndx >= symtab_hdr->sh_info)
{
h = sym_hashes[r_symndx - symtab_hdr->sh_info];
while (h->root.type == bfd_link_hash_indirect
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ /* PR15323, ref flags aren't set for references in the same
+ object. */
+ h->root.non_ir_ref = 1;
}
+ eh = elf_xtensa_hash_entry (h);
switch (r_type)
{
- case R_XTENSA_32:
- if (h == NULL)
- goto local_literal;
+ case R_XTENSA_TLSDESC_FN:
+ if (info->shared)
+ {
+ tls_type = GOT_TLS_GD;
+ is_got = TRUE;
+ is_tlsfunc = TRUE;
+ }
+ else
+ tls_type = GOT_TLS_IE;
+ break;
- if ((sec->flags & SEC_ALLOC) != 0)
+ case R_XTENSA_TLSDESC_ARG:
+ if (info->shared)
{
- if (h->got.refcount <= 0)
- h->got.refcount = 1;
- else
- h->got.refcount += 1;
+ tls_type = GOT_TLS_GD;
+ is_got = TRUE;
}
+ else
+ {
+ tls_type = GOT_TLS_IE;
+ if (h && elf_xtensa_hash_entry (h) != htab->tlsbase)
+ is_got = TRUE;
+ }
+ break;
+
+ case R_XTENSA_TLS_DTPOFF:
+ if (info->shared)
+ tls_type = GOT_TLS_GD;
+ else
+ tls_type = GOT_TLS_IE;
+ break;
+
+ case R_XTENSA_TLS_TPOFF:
+ tls_type = GOT_TLS_IE;
+ if (info->shared)
+ info->flags |= DF_STATIC_TLS;
+ if (info->shared || h)
+ is_got = TRUE;
+ break;
+
+ case R_XTENSA_32:
+ tls_type = GOT_NORMAL;
+ is_got = TRUE;
break;
case R_XTENSA_PLT:
- /* If this relocation is against a local symbol, then it's
- exactly the same as a normal local GOT entry. */
- if (h == NULL)
- goto local_literal;
+ tls_type = GOT_NORMAL;
+ is_plt = TRUE;
+ break;
+
+ case R_XTENSA_GNU_VTINHERIT:
+ /* This relocation describes the C++ object vtable hierarchy.
+ Reconstruct it for later use during GC. */
+ if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
+ return FALSE;
+ continue;
- if ((sec->flags & SEC_ALLOC) != 0)
+ case R_XTENSA_GNU_VTENTRY:
+ /* This relocation describes which C++ vtable entries are actually
+ used. Record for later use during GC. */
+ BFD_ASSERT (h != NULL);
+ if (h != NULL
+ && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
+ return FALSE;
+ continue;
+
+ default:
+ /* Nothing to do for any other relocations. */
+ continue;
+ }
+
+ if (h)
+ {
+ if (is_plt)
{
if (h->plt.refcount <= 0)
{
return FALSE;
}
}
- break;
+ else if (is_got)
+ {
+ if (h->got.refcount <= 0)
+ h->got.refcount = 1;
+ else
+ h->got.refcount += 1;
+ }
- local_literal:
- if ((sec->flags & SEC_ALLOC) != 0)
+ if (is_tlsfunc)
+ eh->tlsfunc_refcount += 1;
+
+ old_tls_type = eh->tls_type;
+ }
+ else
+ {
+ /* Allocate storage the first time. */
+ if (elf_local_got_refcounts (abfd) == NULL)
{
- bfd_signed_vma *local_got_refcounts;
+ bfd_size_type size = symtab_hdr->sh_info;
+ void *mem;
- /* This is a global offset table entry for a local symbol. */
- local_got_refcounts = elf_local_got_refcounts (abfd);
- if (local_got_refcounts == NULL)
- {
- bfd_size_type size;
+ mem = bfd_zalloc (abfd, size * sizeof (bfd_signed_vma));
+ if (mem == NULL)
+ return FALSE;
+ elf_local_got_refcounts (abfd) = (bfd_signed_vma *) mem;
- size = symtab_hdr->sh_info;
- size *= sizeof (bfd_signed_vma);
- local_got_refcounts =
- (bfd_signed_vma *) bfd_zalloc (abfd, size);
- if (local_got_refcounts == NULL)
- return FALSE;
- elf_local_got_refcounts (abfd) = local_got_refcounts;
- }
- local_got_refcounts[r_symndx] += 1;
+ mem = bfd_zalloc (abfd, size);
+ if (mem == NULL)
+ return FALSE;
+ elf_xtensa_local_got_tls_type (abfd) = (char *) mem;
+
+ mem = bfd_zalloc (abfd, size * sizeof (bfd_signed_vma));
+ if (mem == NULL)
+ return FALSE;
+ elf_xtensa_local_tlsfunc_refcounts (abfd)
+ = (bfd_signed_vma *) mem;
}
- break;
- case R_XTENSA_OP0:
- case R_XTENSA_OP1:
- case R_XTENSA_OP2:
- case R_XTENSA_SLOT0_OP:
- case R_XTENSA_SLOT1_OP:
- case R_XTENSA_SLOT2_OP:
- case R_XTENSA_SLOT3_OP:
- case R_XTENSA_SLOT4_OP:
- case R_XTENSA_SLOT5_OP:
- case R_XTENSA_SLOT6_OP:
- case R_XTENSA_SLOT7_OP:
- case R_XTENSA_SLOT8_OP:
- case R_XTENSA_SLOT9_OP:
- case R_XTENSA_SLOT10_OP:
- case R_XTENSA_SLOT11_OP:
- case R_XTENSA_SLOT12_OP:
- case R_XTENSA_SLOT13_OP:
- case R_XTENSA_SLOT14_OP:
- case R_XTENSA_SLOT0_ALT:
- case R_XTENSA_SLOT1_ALT:
- case R_XTENSA_SLOT2_ALT:
- case R_XTENSA_SLOT3_ALT:
- case R_XTENSA_SLOT4_ALT:
- case R_XTENSA_SLOT5_ALT:
- case R_XTENSA_SLOT6_ALT:
- case R_XTENSA_SLOT7_ALT:
- case R_XTENSA_SLOT8_ALT:
- case R_XTENSA_SLOT9_ALT:
- case R_XTENSA_SLOT10_ALT:
- case R_XTENSA_SLOT11_ALT:
- case R_XTENSA_SLOT12_ALT:
- case R_XTENSA_SLOT13_ALT:
- case R_XTENSA_SLOT14_ALT:
- case R_XTENSA_ASM_EXPAND:
- case R_XTENSA_ASM_SIMPLIFY:
- case R_XTENSA_DIFF8:
- case R_XTENSA_DIFF16:
- case R_XTENSA_DIFF32:
- /* Nothing to do for these. */
- break;
+ /* This is a global offset table entry for a local symbol. */
+ if (is_got || is_plt)
+ elf_local_got_refcounts (abfd) [r_symndx] += 1;
- case R_XTENSA_GNU_VTINHERIT:
- /* This relocation describes the C++ object vtable hierarchy.
- Reconstruct it for later use during GC. */
- if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
- return FALSE;
- break;
+ if (is_tlsfunc)
+ elf_xtensa_local_tlsfunc_refcounts (abfd) [r_symndx] += 1;
- case R_XTENSA_GNU_VTENTRY:
- /* This relocation describes which C++ vtable entries are actually
- used. Record for later use during GC. */
- if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
- return FALSE;
- break;
+ old_tls_type = elf_xtensa_local_got_tls_type (abfd) [r_symndx];
+ }
- default:
- break;
+ if ((old_tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_IE))
+ tls_type |= old_tls_type;
+ /* If a TLS symbol is accessed using IE at least once,
+ there is no point to use a dynamic model for it. */
+ else if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
+ && ((old_tls_type & GOT_TLS_GD) == 0
+ || (tls_type & GOT_TLS_IE) == 0))
+ {
+ if ((old_tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GD))
+ tls_type = old_tls_type;
+ else if ((old_tls_type & GOT_TLS_GD) && (tls_type & GOT_TLS_GD))
+ tls_type |= old_tls_type;
+ else
+ {
+ (*_bfd_error_handler)
+ (_("%B: `%s' accessed both as normal and thread local symbol"),
+ abfd,
+ h ? h->root.root.string : "<local>");
+ return FALSE;
+ }
+ }
+
+ if (old_tls_type != tls_type)
+ {
+ if (eh)
+ eh->tls_type = tls_type;
+ else
+ elf_xtensa_local_got_tls_type (abfd) [r_symndx] = tls_type;
}
}
static bfd_boolean
elf_xtensa_gc_sweep_hook (bfd *abfd,
- struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info,
asection *sec,
const Elf_Internal_Rela *relocs)
{
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
- bfd_signed_vma *local_got_refcounts;
const Elf_Internal_Rela *rel, *relend;
+ struct elf_xtensa_link_hash_table *htab;
+
+ htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
+ if (info->relocatable)
+ return TRUE;
if ((sec->flags & SEC_ALLOC) == 0)
return TRUE;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
- local_got_refcounts = elf_local_got_refcounts (abfd);
relend = relocs + sec->reloc_count;
for (rel = relocs; rel < relend; rel++)
unsigned long r_symndx;
unsigned int r_type;
struct elf_link_hash_entry *h = NULL;
+ struct elf_xtensa_link_hash_entry *eh;
+ bfd_boolean is_got = FALSE;
+ bfd_boolean is_plt = FALSE;
+ bfd_boolean is_tlsfunc = FALSE;
r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
|| h->root.type == bfd_link_hash_warning)
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
+ eh = elf_xtensa_hash_entry (h);
r_type = ELF32_R_TYPE (rel->r_info);
switch (r_type)
{
- case R_XTENSA_32:
- if (h == NULL)
- goto local_literal;
- if (h->got.refcount > 0)
- h->got.refcount--;
+ case R_XTENSA_TLSDESC_FN:
+ if (info->shared)
+ {
+ is_got = TRUE;
+ is_tlsfunc = TRUE;
+ }
break;
- case R_XTENSA_PLT:
- if (h == NULL)
- goto local_literal;
- if (h->plt.refcount > 0)
- h->plt.refcount--;
+ case R_XTENSA_TLSDESC_ARG:
+ if (info->shared)
+ is_got = TRUE;
+ else
+ {
+ if (h && elf_xtensa_hash_entry (h) != htab->tlsbase)
+ is_got = TRUE;
+ }
break;
- local_literal:
- if (local_got_refcounts[r_symndx] > 0)
- local_got_refcounts[r_symndx] -= 1;
+ case R_XTENSA_TLS_TPOFF:
+ if (info->shared || h)
+ is_got = TRUE;
break;
- default:
+ case R_XTENSA_32:
+ is_got = TRUE;
break;
+
+ case R_XTENSA_PLT:
+ is_plt = TRUE;
+ break;
+
+ default:
+ continue;
+ }
+
+ if (h)
+ {
+ if (is_plt)
+ {
+ if (h->plt.refcount > 0)
+ h->plt.refcount--;
+ }
+ else if (is_got)
+ {
+ if (h->got.refcount > 0)
+ h->got.refcount--;
+ }
+ if (is_tlsfunc)
+ {
+ if (eh->tlsfunc_refcount > 0)
+ eh->tlsfunc_refcount--;
+ }
+ }
+ else
+ {
+ if (is_got || is_plt)
+ {
+ bfd_signed_vma *got_refcount
+ = &elf_local_got_refcounts (abfd) [r_symndx];
+ if (*got_refcount > 0)
+ *got_refcount -= 1;
+ }
+ if (is_tlsfunc)
+ {
+ bfd_signed_vma *tlsfunc_refcount
+ = &elf_xtensa_local_tlsfunc_refcounts (abfd) [r_symndx];
+ if (*tlsfunc_refcount > 0)
+ *tlsfunc_refcount -= 1;
+ }
}
}
flagword flags, noalloc_flags;
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
/* First do all the standard stuff. */
if (! _bfd_elf_create_dynamic_sections (dynobj, info))
return FALSE;
- htab->splt = bfd_get_section_by_name (dynobj, ".plt");
- htab->srelplt = bfd_get_section_by_name (dynobj, ".rela.plt");
- htab->sgot = bfd_get_section_by_name (dynobj, ".got");
- htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
+ htab->splt = bfd_get_linker_section (dynobj, ".plt");
+ htab->srelplt = bfd_get_linker_section (dynobj, ".rela.plt");
+ htab->sgot = bfd_get_linker_section (dynobj, ".got");
+ htab->sgotplt = bfd_get_linker_section (dynobj, ".got.plt");
+ htab->srelgot = bfd_get_linker_section (dynobj, ".rela.got");
/* Create any extra PLT sections in case check_relocs has already
been called on all the non-dynamic input files. */
|| ! bfd_set_section_flags (dynobj, htab->sgotplt, flags))
return FALSE;
- /* Create ".rela.got". */
- htab->srelgot = bfd_make_section_with_flags (dynobj, ".rela.got", flags);
- if (htab->srelgot == NULL
- || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2))
- return FALSE;
-
/* Create ".got.loc" (literal tables for use by dynamic linker). */
- htab->sgotloc = bfd_make_section_with_flags (dynobj, ".got.loc", flags);
+ htab->sgotloc = bfd_make_section_anyway_with_flags (dynobj, ".got.loc",
+ flags);
if (htab->sgotloc == NULL
|| ! bfd_set_section_alignment (dynobj, htab->sgotloc, 2))
return FALSE;
/* Create ".xt.lit.plt" (literal table for ".got.plt*"). */
- htab->spltlittbl = bfd_make_section_with_flags (dynobj, ".xt.lit.plt",
- noalloc_flags);
+ htab->spltlittbl = bfd_make_section_anyway_with_flags (dynobj, ".xt.lit.plt",
+ noalloc_flags);
if (htab->spltlittbl == NULL
|| ! bfd_set_section_alignment (dynobj, htab->spltlittbl, 2))
return FALSE;
sname = (char *) bfd_malloc (10);
sprintf (sname, ".plt.%u", chunk);
- s = bfd_make_section_with_flags (dynobj, sname, flags | SEC_CODE);
+ s = bfd_make_section_anyway_with_flags (dynobj, sname, flags | SEC_CODE);
if (s == NULL
|| ! bfd_set_section_alignment (dynobj, s, 2))
return FALSE;
sname = (char *) bfd_malloc (14);
sprintf (sname, ".got.plt.%u", chunk);
- s = bfd_make_section_with_flags (dynobj, sname, flags);
+ s = bfd_make_section_anyway_with_flags (dynobj, sname, flags);
if (s == NULL
|| ! bfd_set_section_alignment (dynobj, s, 2))
return FALSE;
{
struct bfd_link_info *info;
struct elf_xtensa_link_hash_table *htab;
- bfd_boolean is_dynamic;
+ struct elf_xtensa_link_hash_entry *eh = elf_xtensa_hash_entry (h);
if (h->root.type == bfd_link_hash_indirect)
return TRUE;
- if (h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
-
info = (struct bfd_link_info *) arg;
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
- is_dynamic = elf_xtensa_dynamic_symbol_p (h, info);
+ /* If we saw any use of an IE model for this symbol, we can then optimize
+ away GOT entries for any TLSDESC_FN relocs. */
+ if ((eh->tls_type & GOT_TLS_IE) != 0)
+ {
+ BFD_ASSERT (h->got.refcount >= eh->tlsfunc_refcount);
+ h->got.refcount -= eh->tlsfunc_refcount;
+ }
- if (! is_dynamic)
+ if (! elf_xtensa_dynamic_symbol_p (h, info))
elf_xtensa_make_sym_local (info, h);
if (h->plt.refcount > 0)
bfd *i;
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return;
- for (i = info->input_bfds; i; i = i->link_next)
+ for (i = info->input_bfds; i; i = i->link.next)
{
bfd_signed_vma *local_got_refcounts;
bfd_size_type j, cnt;
for (j = 0; j < cnt; ++j)
{
+ /* If we saw any use of an IE model for this symbol, we can
+ then optimize away GOT entries for any TLSDESC_FN relocs. */
+ if ((elf_xtensa_local_got_tls_type (i) [j] & GOT_TLS_IE) != 0)
+ {
+ bfd_signed_vma *tlsfunc_refcount
+ = &elf_xtensa_local_tlsfunc_refcounts (i) [j];
+ BFD_ASSERT (local_got_refcounts[j] >= *tlsfunc_refcount);
+ local_got_refcounts[j] -= *tlsfunc_refcount;
+ }
+
if (local_got_refcounts[j] > 0)
htab->srelgot->size += (local_got_refcounts[j]
* sizeof (Elf32_External_Rela));
plt_chunks = 0;
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
dynobj = elf_hash_table (info)->dynobj;
if (dynobj == NULL)
abort ();
/* Set the contents of the .interp section to the interpreter. */
if (info->executable)
{
- s = bfd_get_section_by_name (dynobj, ".interp");
+ s = bfd_get_linker_section (dynobj, ".interp");
if (s == NULL)
abort ();
s->size = sizeof ELF_DYNAMIC_INTERPRETER;
literal tables. */
sgotloc = htab->sgotloc;
sgotloc->size = spltlittbl->size;
- for (abfd = info->input_bfds; abfd != NULL; abfd = abfd->link_next)
+ for (abfd = info->input_bfds; abfd != NULL; abfd = abfd->link.next)
{
if (abfd->flags & DYNAMIC)
continue;
for (s = abfd->sections; s != NULL; s = s->next)
{
- if (! elf_discarded_section (s)
+ if (! discarded_section (s)
&& xtensa_is_littable_section (s)
&& s != spltlittbl)
sgotloc->size += s->size;
if (relplt)
{
- if (!add_dynamic_entry (DT_PLTGOT, 0)
- || !add_dynamic_entry (DT_PLTRELSZ, 0)
+ if (!add_dynamic_entry (DT_PLTRELSZ, 0)
|| !add_dynamic_entry (DT_PLTREL, DT_RELA)
|| !add_dynamic_entry (DT_JMPREL, 0))
return FALSE;
return FALSE;
}
- if (!add_dynamic_entry (DT_XTENSA_GOT_LOC_OFF, 0)
+ if (!add_dynamic_entry (DT_PLTGOT, 0)
+ || !add_dynamic_entry (DT_XTENSA_GOT_LOC_OFF, 0)
|| !add_dynamic_entry (DT_XTENSA_GOT_LOC_SZ, 0))
return FALSE;
}
return TRUE;
}
+static bfd_boolean
+elf_xtensa_always_size_sections (bfd *output_bfd,
+ struct bfd_link_info *info)
+{
+ struct elf_xtensa_link_hash_table *htab;
+ asection *tls_sec;
+
+ htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
+ tls_sec = htab->elf.tls_sec;
+
+ if (tls_sec && (htab->tlsbase->tls_type & GOT_TLS_ANY) != 0)
+ {
+ struct elf_link_hash_entry *tlsbase = &htab->tlsbase->elf;
+ struct bfd_link_hash_entry *bh = &tlsbase->root;
+ const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
+
+ tlsbase->type = STT_TLS;
+ if (!(_bfd_generic_link_add_one_symbol
+ (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
+ tls_sec, 0, NULL, FALSE,
+ bed->collect, &bh)))
+ return FALSE;
+ tlsbase->def_regular = 1;
+ tlsbase->other = STV_HIDDEN;
+ (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
+ }
+
+ return TRUE;
+}
+
\f
+/* Return the base VMA address which should be subtracted from real addresses
+ when resolving @dtpoff relocation.
+ This is PT_TLS segment p_vaddr. */
+
+static bfd_vma
+dtpoff_base (struct bfd_link_info *info)
+{
+ /* If tls_sec is NULL, we should have signalled an error already. */
+ if (elf_hash_table (info)->tls_sec == NULL)
+ return 0;
+ return elf_hash_table (info)->tls_sec->vma;
+}
+
+/* Return the relocation value for @tpoff relocation
+ if STT_TLS virtual address is ADDRESS. */
+
+static bfd_vma
+tpoff (struct bfd_link_info *info, bfd_vma address)
+{
+ struct elf_link_hash_table *htab = elf_hash_table (info);
+ bfd_vma base;
+
+ /* If tls_sec is NULL, we should have signalled an error already. */
+ if (htab->tls_sec == NULL)
+ return 0;
+ base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
+ return address - htab->tls_sec->vma + base;
+}
+
/* Perform the specified relocation. The instruction at (contents + address)
is modified to set one operand to represent the value in "relocation". The
operand position is determined by the relocation type recorded in the
xtensa_isa isa = xtensa_default_isa;
static xtensa_insnbuf ibuff = NULL;
static xtensa_insnbuf sbuff = NULL;
- bfd_vma self_address = 0;
+ bfd_vma self_address;
bfd_size_type input_size;
int opnd, slot;
uint32 newval;
input_size = bfd_get_section_limit (abfd, input_section);
+ /* Calculate the PC address for this instruction. */
+ self_address = (input_section->output_section->vma
+ + input_section->output_offset
+ + address);
+
switch (howto->type)
{
case R_XTENSA_NONE:
case R_XTENSA_DIFF8:
case R_XTENSA_DIFF16:
case R_XTENSA_DIFF32:
+ case R_XTENSA_TLS_FUNC:
+ case R_XTENSA_TLS_ARG:
+ case R_XTENSA_TLS_CALL:
return bfd_reloc_ok;
case R_XTENSA_ASM_EXPAND:
if (!is_weak_undef)
{
/* Check for windowed CALL across a 1GB boundary. */
- xtensa_opcode opcode =
- get_expanded_call_opcode (contents + address,
- input_size - address, 0);
+ opcode = get_expanded_call_opcode (contents + address,
+ input_size - address, 0);
if (is_windowed_call_opcode (opcode))
{
- self_address = (input_section->output_section->vma
- + input_section->output_offset
- + address);
if ((self_address >> CALL_SEGMENT_BITS)
- != (relocation >> CALL_SEGMENT_BITS))
+ != (relocation >> CALL_SEGMENT_BITS))
{
*error_message = "windowed longcall crosses 1GB boundary; "
"return may fail";
/* The CALL needs to be relocated. Continue below for that part. */
address += 3;
+ self_address += 3;
howto = &elf_howto_table[(unsigned) R_XTENSA_SLOT0_OP ];
}
break;
case R_XTENSA_32:
- case R_XTENSA_PLT:
{
bfd_vma x;
x = bfd_get_32 (abfd, contents + address);
bfd_put_32 (abfd, x, contents + address);
}
return bfd_reloc_ok;
+
+ case R_XTENSA_32_PCREL:
+ bfd_put_32 (abfd, relocation - self_address, contents + address);
+ return bfd_reloc_ok;
+
+ case R_XTENSA_PLT:
+ case R_XTENSA_TLSDESC_FN:
+ case R_XTENSA_TLSDESC_ARG:
+ case R_XTENSA_TLS_DTPOFF:
+ case R_XTENSA_TLS_TPOFF:
+ bfd_put_32 (abfd, relocation, contents + address);
+ return bfd_reloc_ok;
}
/* Only instruction slot-specific relocations handled below.... */
return bfd_reloc_dangerous;
}
- /* Calculate the PC address for this instruction. */
- self_address = (input_section->output_section->vma
- + input_section->output_offset
- + address);
-
newval = relocation;
}
}
&& is_windowed_call_opcode (opcode))
{
if ((self_address >> CALL_SEGMENT_BITS)
- != (relocation >> CALL_SEGMENT_BITS))
+ != (relocation >> CALL_SEGMENT_BITS))
{
*error_message =
"windowed call crosses 1GB boundary; return may fail";
static char *message = NULL;
bfd_size_type orig_len, len = 0;
bfd_boolean is_append;
+ va_list ap;
+
+ va_start (ap, arglen);
- VA_OPEN (ap, arglen);
- VA_FIXEDARG (ap, const char *, origmsg);
-
- is_append = (origmsg == message);
+ is_append = (origmsg == message);
orig_len = strlen (origmsg);
len = orig_len + strlen (fmt) + arglen + 20;
if (len > alloc_size)
{
- message = (char *) bfd_realloc (message, len);
+ message = (char *) bfd_realloc_or_free (message, len);
alloc_size = len;
}
- if (!is_append)
- memcpy (message, origmsg, orig_len);
- vsprintf (message + orig_len, fmt, ap);
- VA_CLOSE (ap);
+ if (message != NULL)
+ {
+ if (!is_append)
+ memcpy (message, origmsg, orig_len);
+ vsprintf (message + orig_len, fmt, ap);
+ }
+ va_end (ap);
return message;
}
}
+static bfd_boolean get_indirect_call_dest_reg (xtensa_opcode, unsigned *);
+
+static bfd_boolean
+replace_tls_insn (Elf_Internal_Rela *rel,
+ bfd *abfd,
+ asection *input_section,
+ bfd_byte *contents,
+ bfd_boolean is_ld_model,
+ char **error_message)
+{
+ static xtensa_insnbuf ibuff = NULL;
+ static xtensa_insnbuf sbuff = NULL;
+ xtensa_isa isa = xtensa_default_isa;
+ xtensa_format fmt;
+ xtensa_opcode old_op, new_op;
+ bfd_size_type input_size;
+ int r_type;
+ unsigned dest_reg, src_reg;
+
+ if (ibuff == NULL)
+ {
+ ibuff = xtensa_insnbuf_alloc (isa);
+ sbuff = xtensa_insnbuf_alloc (isa);
+ }
+
+ input_size = bfd_get_section_limit (abfd, input_section);
+
+ /* Read the instruction into a buffer and decode the opcode. */
+ xtensa_insnbuf_from_chars (isa, ibuff, contents + rel->r_offset,
+ input_size - rel->r_offset);
+ fmt = xtensa_format_decode (isa, ibuff);
+ if (fmt == XTENSA_UNDEFINED)
+ {
+ *error_message = "cannot decode instruction format";
+ return FALSE;
+ }
+
+ BFD_ASSERT (xtensa_format_num_slots (isa, fmt) == 1);
+ xtensa_format_get_slot (isa, fmt, 0, ibuff, sbuff);
+
+ old_op = xtensa_opcode_decode (isa, fmt, 0, sbuff);
+ if (old_op == XTENSA_UNDEFINED)
+ {
+ *error_message = "cannot decode instruction opcode";
+ return FALSE;
+ }
+
+ r_type = ELF32_R_TYPE (rel->r_info);
+ switch (r_type)
+ {
+ case R_XTENSA_TLS_FUNC:
+ case R_XTENSA_TLS_ARG:
+ if (old_op != get_l32r_opcode ()
+ || xtensa_operand_get_field (isa, old_op, 0, fmt, 0,
+ sbuff, &dest_reg) != 0)
+ {
+ *error_message = "cannot extract L32R destination for TLS access";
+ return FALSE;
+ }
+ break;
+
+ case R_XTENSA_TLS_CALL:
+ if (! get_indirect_call_dest_reg (old_op, &dest_reg)
+ || xtensa_operand_get_field (isa, old_op, 0, fmt, 0,
+ sbuff, &src_reg) != 0)
+ {
+ *error_message = "cannot extract CALLXn operands for TLS access";
+ return FALSE;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (is_ld_model)
+ {
+ switch (r_type)
+ {
+ case R_XTENSA_TLS_FUNC:
+ case R_XTENSA_TLS_ARG:
+ /* Change the instruction to a NOP (or "OR a1, a1, a1" for older
+ versions of Xtensa). */
+ new_op = xtensa_opcode_lookup (isa, "nop");
+ if (new_op == XTENSA_UNDEFINED)
+ {
+ new_op = xtensa_opcode_lookup (isa, "or");
+ if (new_op == XTENSA_UNDEFINED
+ || xtensa_opcode_encode (isa, fmt, 0, sbuff, new_op) != 0
+ || xtensa_operand_set_field (isa, new_op, 0, fmt, 0,
+ sbuff, 1) != 0
+ || xtensa_operand_set_field (isa, new_op, 1, fmt, 0,
+ sbuff, 1) != 0
+ || xtensa_operand_set_field (isa, new_op, 2, fmt, 0,
+ sbuff, 1) != 0)
+ {
+ *error_message = "cannot encode OR for TLS access";
+ return FALSE;
+ }
+ }
+ else
+ {
+ if (xtensa_opcode_encode (isa, fmt, 0, sbuff, new_op) != 0)
+ {
+ *error_message = "cannot encode NOP for TLS access";
+ return FALSE;
+ }
+ }
+ break;
+
+ case R_XTENSA_TLS_CALL:
+ /* Read THREADPTR into the CALLX's return value register. */
+ new_op = xtensa_opcode_lookup (isa, "rur.threadptr");
+ if (new_op == XTENSA_UNDEFINED
+ || xtensa_opcode_encode (isa, fmt, 0, sbuff, new_op) != 0
+ || xtensa_operand_set_field (isa, new_op, 0, fmt, 0,
+ sbuff, dest_reg + 2) != 0)
+ {
+ *error_message = "cannot encode RUR.THREADPTR for TLS access";
+ return FALSE;
+ }
+ break;
+ }
+ }
+ else
+ {
+ switch (r_type)
+ {
+ case R_XTENSA_TLS_FUNC:
+ new_op = xtensa_opcode_lookup (isa, "rur.threadptr");
+ if (new_op == XTENSA_UNDEFINED
+ || xtensa_opcode_encode (isa, fmt, 0, sbuff, new_op) != 0
+ || xtensa_operand_set_field (isa, new_op, 0, fmt, 0,
+ sbuff, dest_reg) != 0)
+ {
+ *error_message = "cannot encode RUR.THREADPTR for TLS access";
+ return FALSE;
+ }
+ break;
+
+ case R_XTENSA_TLS_ARG:
+ /* Nothing to do. Keep the original L32R instruction. */
+ return TRUE;
+
+ case R_XTENSA_TLS_CALL:
+ /* Add the CALLX's src register (holding the THREADPTR value)
+ to the first argument register (holding the offset) and put
+ the result in the CALLX's return value register. */
+ new_op = xtensa_opcode_lookup (isa, "add");
+ if (new_op == XTENSA_UNDEFINED
+ || xtensa_opcode_encode (isa, fmt, 0, sbuff, new_op) != 0
+ || xtensa_operand_set_field (isa, new_op, 0, fmt, 0,
+ sbuff, dest_reg + 2) != 0
+ || xtensa_operand_set_field (isa, new_op, 1, fmt, 0,
+ sbuff, dest_reg + 2) != 0
+ || xtensa_operand_set_field (isa, new_op, 2, fmt, 0,
+ sbuff, src_reg) != 0)
+ {
+ *error_message = "cannot encode ADD for TLS access";
+ return FALSE;
+ }
+ break;
+ }
+ }
+
+ xtensa_format_set_slot (isa, fmt, 0, ibuff, sbuff);
+ xtensa_insnbuf_to_chars (isa, ibuff, contents + rel->r_offset,
+ input_size - rel->r_offset);
+
+ return TRUE;
+}
+
+
+#define IS_XTENSA_TLS_RELOC(R_TYPE) \
+ ((R_TYPE) == R_XTENSA_TLSDESC_FN \
+ || (R_TYPE) == R_XTENSA_TLSDESC_ARG \
+ || (R_TYPE) == R_XTENSA_TLS_DTPOFF \
+ || (R_TYPE) == R_XTENSA_TLS_TPOFF \
+ || (R_TYPE) == R_XTENSA_TLS_FUNC \
+ || (R_TYPE) == R_XTENSA_TLS_ARG \
+ || (R_TYPE) == R_XTENSA_TLS_CALL)
+
/* Relocate an Xtensa ELF section. This is invoked by the linker for
both relocatable and final links. */
struct elf_link_hash_entry **sym_hashes;
property_table_entry *lit_table = 0;
int ltblsize = 0;
+ char *local_got_tls_types;
char *error_message = NULL;
bfd_size_type input_size;
+ int tls_type;
if (!xtensa_default_isa)
xtensa_default_isa = xtensa_isa_init (0, 0);
+ BFD_ASSERT (is_xtensa_elf (input_bfd));
+
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (input_bfd);
+ local_got_tls_types = elf_xtensa_local_got_tls_type (input_bfd);
if (elf_hash_table (info)->dynamic_sections_created)
{
unsigned long r_symndx;
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
+ char sym_type;
+ const char *name;
asection *sec;
bfd_vma relocation;
bfd_reloc_status_type r;
bfd_boolean is_weak_undef;
bfd_boolean unresolved_reloc;
bfd_boolean warned;
+ bfd_boolean dynamic_symbol;
r_type = ELF32_R_TYPE (rel->r_info);
if (r_type == (int) R_XTENSA_GNU_VTINHERIT
if (r_symndx < symtab_hdr->sh_info)
{
sym = local_syms + r_symndx;
+ sym_type = ELF32_ST_TYPE (sym->st_info);
sec = local_sections[r_symndx];
relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
}
else
{
+ bfd_boolean ignored;
+
RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
r_symndx, symtab_hdr, sym_hashes,
h, sec, relocation,
- unresolved_reloc, warned);
+ unresolved_reloc, warned, ignored);
if (relocation == 0
&& !unresolved_reloc
&& h->root.type == bfd_link_hash_undefweak)
is_weak_undef = TRUE;
- }
- if (sec != NULL && elf_discarded_section (sec))
- {
- /* For relocs against symbols from removed linkonce sections,
- or sections discarded by a linker script, we just want the
- section contents zeroed. Avoid any special processing. */
- _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
- rel->r_info = 0;
- rel->r_addend = 0;
- continue;
+ sym_type = h->type;
}
+ if (sec != NULL && discarded_section (sec))
+ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
+ rel, 1, relend, howto, 0, contents);
+
if (info->relocatable)
{
+ bfd_vma dest_addr;
+ asection * sym_sec = get_elf_r_symndx_section (input_bfd, r_symndx);
+
/* This is a relocatable link.
1) If the reloc is against a section symbol, adjust
according to the output section.
if (!do_fix_for_relocatable_link (rel, input_bfd, input_section,
contents))
return FALSE;
- r_type = ELF32_R_TYPE (rel->r_info);
}
+ dest_addr = sym_sec->output_section->vma + sym_sec->output_offset
+ + get_elf_r_symndx_offset (input_bfd, r_symndx) + rel->r_addend;
+
if (r_type == R_XTENSA_ASM_SIMPLIFY)
{
- char *error_message = NULL;
+ error_message = NULL;
/* Convert ASM_SIMPLIFY into the simpler relocation
so that they never escape a relaxing link. */
r = contract_asm_expansion (contents, input_size, rel,
to work around problems with DWARF in relocatable links
with some previous version of BFD. Now we can't easily get
rid of the hack without breaking backward compatibility.... */
- if (rel->r_addend)
+ r = bfd_reloc_ok;
+ howto = &elf_howto_table[r_type];
+ if (howto->partial_inplace && rel->r_addend)
+ {
+ r = elf_xtensa_do_reloc (howto, input_bfd, input_section,
+ rel->r_addend, contents,
+ rel->r_offset, FALSE,
+ &error_message);
+ rel->r_addend = 0;
+ }
+ else
{
- howto = &elf_howto_table[r_type];
- if (howto->partial_inplace)
+ /* Put the correct bits in the target instruction, even
+ though the relocation will still be present in the output
+ file. This makes disassembly clearer, as well as
+ allowing loadable kernel modules to work without needing
+ relocations on anything other than calls and l32r's. */
+
+ /* If it is not in the same section, there is nothing we can do. */
+ if (r_type >= R_XTENSA_SLOT0_OP && r_type <= R_XTENSA_SLOT14_OP &&
+ sym_sec->output_section == input_section->output_section)
{
r = elf_xtensa_do_reloc (howto, input_bfd, input_section,
- rel->r_addend, contents,
+ dest_addr, contents,
rel->r_offset, FALSE,
&error_message);
- if (r != bfd_reloc_ok)
- {
- if (!((*info->callbacks->reloc_dangerous)
- (info, error_message, input_bfd, input_section,
- rel->r_offset)))
- return FALSE;
- }
- rel->r_addend = 0;
}
}
+ if (r != bfd_reloc_ok)
+ {
+ if (!((*info->callbacks->reloc_dangerous)
+ (info, error_message, input_bfd, input_section,
+ rel->r_offset)))
+ return FALSE;
+ }
/* Done with work for relocatable link; continue with next reloc. */
continue;
/* Check if this references a section in another input file. */
do_fix_for_final_link (rel, input_bfd, input_section, contents,
&relocation);
-
- /* Update some already cached values. */
- r_type = ELF32_R_TYPE (rel->r_info);
- howto = &elf_howto_table[r_type];
}
/* Sanity check the address. */
return FALSE;
}
- /* Generate dynamic relocations. */
- if (elf_hash_table (info)->dynamic_sections_created)
+ if (h != NULL)
+ name = h->root.root.string;
+ else
{
- bfd_boolean dynamic_symbol = elf_xtensa_dynamic_symbol_p (h, info);
+ name = (bfd_elf_string_from_elf_section
+ (input_bfd, symtab_hdr->sh_link, sym->st_name));
+ if (name == NULL || *name == '\0')
+ name = bfd_section_name (input_bfd, sec);
+ }
- if (dynamic_symbol && is_operand_relocation (r_type))
- {
- /* This is an error. The symbol's real value won't be known
- until runtime and it's likely to be out of range anyway. */
- const char *name = h->root.root.string;
- error_message = vsprint_msg ("invalid relocation for dynamic "
- "symbol", ": %s",
- strlen (name) + 2, name);
- if (!((*info->callbacks->reloc_dangerous)
- (info, error_message, input_bfd, input_section,
- rel->r_offset)))
- return FALSE;
- }
- else if ((r_type == R_XTENSA_32 || r_type == R_XTENSA_PLT)
- && (input_section->flags & SEC_ALLOC) != 0
- && (dynamic_symbol || info->shared))
+ if (r_symndx != STN_UNDEF
+ && r_type != R_XTENSA_NONE
+ && (h == NULL
+ || h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ && IS_XTENSA_TLS_RELOC (r_type) != (sym_type == STT_TLS))
+ {
+ (*_bfd_error_handler)
+ ((sym_type == STT_TLS
+ ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
+ : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
+ input_bfd,
+ input_section,
+ (long) rel->r_offset,
+ howto->name,
+ name);
+ }
+
+ dynamic_symbol = elf_xtensa_dynamic_symbol_p (h, info);
+
+ tls_type = GOT_UNKNOWN;
+ if (h)
+ tls_type = elf_xtensa_hash_entry (h)->tls_type;
+ else if (local_got_tls_types)
+ tls_type = local_got_tls_types [r_symndx];
+
+ switch (r_type)
+ {
+ case R_XTENSA_32:
+ case R_XTENSA_PLT:
+ if (elf_hash_table (info)->dynamic_sections_created
+ && (input_section->flags & SEC_ALLOC) != 0
+ && (dynamic_symbol || info->shared))
{
Elf_Internal_Rela outrel;
bfd_byte *loc;
}
}
- loc = (srel->contents
- + srel->reloc_count++ * sizeof (Elf32_External_Rela));
- bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
- BFD_ASSERT (sizeof (Elf32_External_Rela) * srel->reloc_count
- <= srel->size);
+ loc = (srel->contents
+ + srel->reloc_count++ * sizeof (Elf32_External_Rela));
+ bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
+ BFD_ASSERT (sizeof (Elf32_External_Rela) * srel->reloc_count
+ <= srel->size);
+ }
+ else if (r_type == R_XTENSA_ASM_EXPAND && dynamic_symbol)
+ {
+ /* This should only happen for non-PIC code, which is not
+ supposed to be used on systems with dynamic linking.
+ Just ignore these relocations. */
+ continue;
+ }
+ break;
+
+ case R_XTENSA_TLS_TPOFF:
+ /* Switch to LE model for local symbols in an executable. */
+ if (! info->shared && ! dynamic_symbol)
+ {
+ relocation = tpoff (info, relocation);
+ break;
+ }
+ /* fall through */
+
+ case R_XTENSA_TLSDESC_FN:
+ case R_XTENSA_TLSDESC_ARG:
+ {
+ if (r_type == R_XTENSA_TLSDESC_FN)
+ {
+ if (! info->shared || (tls_type & GOT_TLS_IE) != 0)
+ r_type = R_XTENSA_NONE;
+ }
+ else if (r_type == R_XTENSA_TLSDESC_ARG)
+ {
+ if (info->shared)
+ {
+ if ((tls_type & GOT_TLS_IE) != 0)
+ r_type = R_XTENSA_TLS_TPOFF;
+ }
+ else
+ {
+ r_type = R_XTENSA_TLS_TPOFF;
+ if (! dynamic_symbol)
+ {
+ relocation = tpoff (info, relocation);
+ break;
+ }
+ }
+ }
+
+ if (r_type == R_XTENSA_NONE)
+ /* Nothing to do here; skip to the next reloc. */
+ continue;
+
+ if (! elf_hash_table (info)->dynamic_sections_created)
+ {
+ error_message =
+ _("TLS relocation invalid without dynamic sections");
+ if (!((*info->callbacks->reloc_dangerous)
+ (info, error_message, input_bfd, input_section,
+ rel->r_offset)))
+ return FALSE;
+ }
+ else
+ {
+ Elf_Internal_Rela outrel;
+ bfd_byte *loc;
+ asection *srel = htab->srelgot;
+ int indx;
+
+ outrel.r_offset = (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset);
+
+ /* Complain if the relocation is in a read-only section
+ and not in a literal pool. */
+ if ((input_section->flags & SEC_READONLY) != 0
+ && ! elf_xtensa_in_literal_pool (lit_table, ltblsize,
+ outrel.r_offset))
+ {
+ error_message =
+ _("dynamic relocation in read-only section");
+ if (!((*info->callbacks->reloc_dangerous)
+ (info, error_message, input_bfd, input_section,
+ rel->r_offset)))
+ return FALSE;
+ }
+
+ indx = h && h->dynindx != -1 ? h->dynindx : 0;
+ if (indx == 0)
+ outrel.r_addend = relocation - dtpoff_base (info);
+ else
+ outrel.r_addend = 0;
+ rel->r_addend = 0;
+
+ outrel.r_info = ELF32_R_INFO (indx, r_type);
+ relocation = 0;
+ unresolved_reloc = FALSE;
+
+ BFD_ASSERT (srel);
+ loc = (srel->contents
+ + srel->reloc_count++ * sizeof (Elf32_External_Rela));
+ bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
+ BFD_ASSERT (sizeof (Elf32_External_Rela) * srel->reloc_count
+ <= srel->size);
+ }
+ }
+ break;
+
+ case R_XTENSA_TLS_DTPOFF:
+ if (! info->shared)
+ /* Switch from LD model to LE model. */
+ relocation = tpoff (info, relocation);
+ else
+ relocation -= dtpoff_base (info);
+ break;
+
+ case R_XTENSA_TLS_FUNC:
+ case R_XTENSA_TLS_ARG:
+ case R_XTENSA_TLS_CALL:
+ /* Check if optimizing to IE or LE model. */
+ if ((tls_type & GOT_TLS_IE) != 0)
+ {
+ bfd_boolean is_ld_model =
+ (h && elf_xtensa_hash_entry (h) == htab->tlsbase);
+ if (! replace_tls_insn (rel, input_bfd, input_section, contents,
+ is_ld_model, &error_message))
+ {
+ if (!((*info->callbacks->reloc_dangerous)
+ (info, error_message, input_bfd, input_section,
+ rel->r_offset)))
+ return FALSE;
+ }
+
+ if (r_type != R_XTENSA_TLS_ARG || is_ld_model)
+ {
+ /* Skip subsequent relocations on the same instruction. */
+ while (rel + 1 < relend && rel[1].r_offset == rel->r_offset)
+ rel++;
+ }
+ }
+ continue;
+
+ default:
+ if (elf_hash_table (info)->dynamic_sections_created
+ && dynamic_symbol && (is_operand_relocation (r_type)
+ || r_type == R_XTENSA_32_PCREL))
+ {
+ error_message =
+ vsprint_msg ("invalid relocation for dynamic symbol", ": %s",
+ strlen (name) + 2, name);
+ if (!((*info->callbacks->reloc_dangerous)
+ (info, error_message, input_bfd, input_section,
+ rel->r_offset)))
+ return FALSE;
+ continue;
}
+ break;
}
/* Dynamic relocs are not propagated for SEC_DEBUGGING sections
not process them. */
if (unresolved_reloc
&& !((input_section->flags & SEC_DEBUGGING) != 0
- && h->def_dynamic))
+ && h->def_dynamic)
+ && _bfd_elf_section_offset (output_bfd, info, input_section,
+ rel->r_offset) != (bfd_vma) -1)
{
(*_bfd_error_handler)
(_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
input_section,
(long) rel->r_offset,
howto->name,
- h->root.root.string);
+ name);
return FALSE;
}
+ /* TLS optimizations may have changed r_type; update "howto". */
+ howto = &elf_howto_table[r_type];
+
/* There's no point in calling bfd_perform_relocation here.
Just go directly to our "special function". */
r = elf_xtensa_do_reloc (howto, input_bfd, input_section,
if (r != bfd_reloc_ok && !warned)
{
- const char *name;
-
BFD_ASSERT (r == bfd_reloc_dangerous || r == bfd_reloc_other);
BFD_ASSERT (error_message != NULL);
- if (h)
- name = h->root.root.string;
+ if (rel->r_addend == 0)
+ error_message = vsprint_msg (error_message, ": %s",
+ strlen (name) + 2, name);
else
- {
- name = bfd_elf_string_from_elf_section
- (input_bfd, symtab_hdr->sh_link, sym->st_name);
- if (name && *name == '\0')
- name = bfd_section_name (input_bfd, sec);
- }
- if (name)
- {
- if (rel->r_addend == 0)
- error_message = vsprint_msg (error_message, ": %s",
- strlen (name) + 2, name);
- else
- error_message = vsprint_msg (error_message, ": (%s+0x%x)",
- strlen (name) + 22,
- name, (int)rel->r_addend);
- }
+ error_message = vsprint_msg (error_message, ": (%s+0x%x)",
+ strlen (name) + 22,
+ name, (int) rel->r_addend);
if (!((*info->callbacks->reloc_dangerous)
(info, error_message, input_bfd, input_section,
}
/* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. */
- if (strcmp (h->root.root.string, "_DYNAMIC") == 0
+ if (h == elf_hash_table (info)->hdynamic
|| h == elf_hash_table (info)->hgot)
sym->st_shndx = SHN_ABS;
for (n = 0; n < num; n++)
{
- bfd_boolean remove = FALSE;
+ bfd_boolean remove_entry = FALSE;
if (table[n].size == 0)
- remove = TRUE;
- else if (n > 0 &&
- (table[n-1].address + table[n-1].size == table[n].address))
+ remove_entry = TRUE;
+ else if (n > 0
+ && (table[n-1].address + table[n-1].size == table[n].address))
{
table[n-1].size += table[n].size;
- remove = TRUE;
+ remove_entry = TRUE;
}
- if (remove)
+ if (remove_entry)
{
for (m = n; m < num - 1; m++)
{
bfd *dynobj;
asection *sdyn, *srelplt, *sgot, *sxtlit, *sgotloc;
Elf32_External_Dyn *dyncon, *dynconend;
- int num_xtlit_entries;
+ int num_xtlit_entries = 0;
if (! elf_hash_table (info)->dynamic_sections_created)
return TRUE;
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
dynobj = elf_hash_table (info)->dynobj;
- sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
+ sdyn = bfd_get_linker_section (dynobj, ".dynamic");
BFD_ASSERT (sdyn != NULL);
/* Set the first entry in the global offset table to the address of
BFD_ASSERT (! info->relocatable);
sxtlit = bfd_get_section_by_name (output_bfd, ".xt.lit");
sgotloc = htab->sgotloc;
- BFD_ASSERT (sxtlit && sgotloc);
- num_xtlit_entries =
- elf_xtensa_combine_prop_entries (output_bfd, sxtlit, sgotloc);
- if (num_xtlit_entries < 0)
- return FALSE;
+ BFD_ASSERT (sgotloc);
+ if (sxtlit)
+ {
+ num_xtlit_entries =
+ elf_xtensa_combine_prop_entries (output_bfd, sxtlit, sgotloc);
+ if (num_xtlit_entries < 0)
+ return FALSE;
+ }
dyncon = (Elf32_External_Dyn *) sdyn->contents;
dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
break;
case DT_XTENSA_GOT_LOC_OFF:
- dyn.d_un.d_ptr = htab->sgotloc->vma;
+ dyn.d_un.d_ptr = htab->sgotloc->output_section->vma;
break;
case DT_PLTGOT:
- dyn.d_un.d_ptr = htab->sgot->vma;
+ dyn.d_un.d_ptr = htab->sgot->output_section->vma;
break;
case DT_JMPREL:
- dyn.d_un.d_ptr = htab->srelplt->vma;
+ dyn.d_un.d_ptr = htab->srelplt->output_section->vma;
break;
case DT_PLTRELSZ:
- dyn.d_un.d_val = htab->srelplt->size;
+ dyn.d_un.d_val = htab->srelplt->output_section->size;
break;
case DT_RELASZ:
for .rela.plt to follow all other relocation sections, we
don't have to worry about changing the DT_RELA entry. */
if (htab->srelplt)
- dyn.d_un.d_val -= htab->srelplt->size;
+ dyn.d_un.d_val -= htab->srelplt->output_section->size;
break;
}
unsigned out_mach, in_mach;
flagword out_flag, in_flag;
- /* Check if we have the same endianess. */
+ /* Check if we have the same endianness. */
if (!_bfd_generic_verify_endian_match (ibfd, obfd))
return FALSE;
return TRUE;
}
- if ((out_flag & EF_XTENSA_XT_INSN) != (in_flag & EF_XTENSA_XT_INSN))
+ if ((out_flag & EF_XTENSA_XT_INSN) != (in_flag & EF_XTENSA_XT_INSN))
elf_elfheader (obfd)->e_flags &= (~ EF_XTENSA_XT_INSN);
- if ((out_flag & EF_XTENSA_XT_LIT) != (in_flag & EF_XTENSA_XT_LIT))
+ if ((out_flag & EF_XTENSA_XT_LIT) != (in_flag & EF_XTENSA_XT_LIT))
elf_elfheader (obfd)->e_flags &= (~ EF_XTENSA_XT_LIT);
return TRUE;
static enum elf_reloc_type_class
-elf_xtensa_reloc_type_class (const Elf_Internal_Rela *rela)
+elf_xtensa_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
+ const asection *rel_sec ATTRIBUTE_UNUSED,
+ const Elf_Internal_Rela *rela)
{
switch ((int) ELF32_R_TYPE (rela->r_info))
{
asection *sec)
{
bfd_byte *contents;
- bfd_vma section_size;
bfd_vma offset, actual_offset;
bfd_size_type removed_bytes = 0;
bfd_size_type entry_size;
else
entry_size = 8;
- section_size = sec->size;
- if (section_size == 0 || section_size % entry_size != 0)
+ if (sec->size == 0 || sec->size % entry_size != 0)
return FALSE;
contents = retrieve_contents (abfd, sec, info->keep_memory);
cookie->rel = cookie->rels;
cookie->relend = cookie->rels + sec->reloc_count;
- for (offset = 0; offset < section_size; offset += entry_size)
+ for (offset = 0; offset < sec->size; offset += entry_size)
{
actual_offset = offset - removed_bytes;
if (ELF32_R_TYPE (cookie->rel->r_info) != R_XTENSA_NONE)
{
/* Shift the contents up. */
- if (offset + entry_size < section_size)
+ if (offset + entry_size < sec->size)
memmove (&contents[actual_offset],
&contents[actual_offset + entry_size],
- section_size - offset - entry_size);
+ sec->size - offset - entry_size);
removed_bytes += entry_size;
}
}
/* Clear the removed bytes. */
- memset (&contents[section_size - removed_bytes], 0, removed_bytes);
+ memset (&contents[sec->size - removed_bytes], 0, removed_bytes);
pin_contents (sec, contents);
pin_internal_relocs (sec, cookie->rels);
/* Shrink size. */
- sec->size = section_size - removed_bytes;
+ if (sec->rawsize == 0)
+ sec->rawsize = sec->size;
+ sec->size -= removed_bytes;
if (xtensa_is_littable_section (sec))
{
based on the size. Just assume this is GNU/Linux. */
/* pr_cursig */
- elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
+ elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
/* pr_pid */
- elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
+ elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
/* pr_reg */
offset = 72;
return FALSE;
case 128: /* GNU/Linux elf_prpsinfo */
- elf_tdata (abfd)->core_program
+ elf_tdata (abfd)->core->program
= _bfd_elfcore_strndup (abfd, note->descdata + 32, 16);
- elf_tdata (abfd)->core_command
+ elf_tdata (abfd)->core->command
= _bfd_elfcore_strndup (abfd, note->descdata + 48, 80);
}
implementations, so strip it off if it exists. */
{
- char *command = elf_tdata (abfd)->core_command;
+ char *command = elf_tdata (abfd)->core->command;
int n = strlen (command);
if (0 < n && command[n - 1] == ' ')
}
+static bfd_boolean
+get_indirect_call_dest_reg (xtensa_opcode opcode, unsigned *pdst)
+{
+ unsigned dst = (unsigned) -1;
+
+ init_call_opcodes ();
+ if (opcode == callx0_op)
+ dst = 0;
+ else if (opcode == callx4_op)
+ dst = 4;
+ else if (opcode == callx8_op)
+ dst = 8;
+ else if (opcode == callx12_op)
+ dst = 12;
+
+ if (dst == (unsigned) -1)
+ return FALSE;
+
+ *pdst = dst;
+ return TRUE;
+}
+
+
static xtensa_opcode
get_const16_opcode (void)
{
return 0;
size += insnlen;
-
+
insnlen = insn_decode_len (contents, content_len, offset + size);
if (insnlen == 0)
return 0;
return FALSE;
}
-
+
#define MIN_INSN_LENGTH 2
/* Return 0 if it fails to decode. */
BFD_ASSERT (FALSE);
return FALSE;
}
-
+
loop_len = insn_decode_len (contents, content_length, offset);
insn_len = insn_decode_len (contents, content_length, offset + loop_len);
if (loop_len == 0 || insn_len == 0)
return 0;
}
-
+
/* Attempt to widen an instruction. If the widening is valid, perform
the action in-place directly into the contents and return TRUE. Otherwise,
the return value is FALSE and the contents are not modified. */
*error_message = _("Attempt to convert L32R/CALLX to CALL failed");
return bfd_reloc_other;
}
-
+
/* Assemble a NOP ("or a1, a1, a1") into the 0 byte offset. */
core_format = xtensa_format_lookup (isa, "x24");
opcode = xtensa_opcode_lookup (isa, "or");
xtensa_opcode_encode (isa, core_format, 0, slotbuf, opcode);
- for (opn = 0; opn < 3; opn++)
+ for (opn = 0; opn < 3; opn++)
{
uint32 regno = 1;
xtensa_operand_encode (isa, opcode, opn, ®no);
#define CONST16_TARGET_REG_OPERAND 0
#define CALLN_SOURCE_OPERAND 0
-static xtensa_opcode
+static xtensa_opcode
get_expanded_call_opcode (bfd_byte *buf, int bufsize, bfd_boolean *p_uses_l32r)
{
static xtensa_insnbuf insnbuf = NULL;
|| xtensa_format_get_slot (isa, fmt, 0, insnbuf, slotbuf))
return XTENSA_UNDEFINED;
opcode = xtensa_opcode_decode (isa, fmt, 0, slotbuf);
- if (opcode == XTENSA_UNDEFINED
+ if (opcode == XTENSA_UNDEFINED
|| !is_indirect_call_opcode (opcode))
return XTENSA_UNDEFINED;
For efficiency, an r_reloc also contains a "target_offset" field to
cache the target-section-relative offset value that is represented by
the relocation.
-
+
The r_reloc also contains a virtual offset that allows multiple
inserted literals to be placed at the same "address" with
different offsets. */
fprintf (fp, " + ");
fprintf_vma (fp, r_rel->virtual_offset);
}
-
+
fprintf (fp, ")");
}
struct literal_value_struct
{
- r_reloc r_rel;
+ r_reloc r_rel;
unsigned long value;
bfd_boolean is_abs_literal;
};
{
struct elf_link_hash_entry *h1, *h2;
- if (r_reloc_is_const (&src1->r_rel) != r_reloc_is_const (&src2->r_rel))
+ if (r_reloc_is_const (&src1->r_rel) != r_reloc_is_const (&src2->r_rel))
return FALSE;
if (r_reloc_is_const (&src1->r_rel))
if (src1->r_rel.target_offset != src2->r_rel.target_offset)
return FALSE;
-
+
if (src1->r_rel.virtual_offset != src2->r_rel.virtual_offset)
return FALSE;
if (src1->value != src2->value)
return FALSE;
-
+
/* Now check for the same section (if defined) or the same elf_hash
(if undefined or weak). */
h1 = r_reloc_get_hash_entry (&src1->r_rel);
values->count = 0;
values->buckets = (value_map **)
bfd_zmalloc (sizeof (value_map *) * values->bucket_count);
- if (values->buckets == NULL)
+ if (values->buckets == NULL)
{
free (values);
return NULL;
hash_val += hash_bfd_vma (src->is_abs_literal * 1000);
hash_val += hash_bfd_vma (src->r_rel.target_offset);
hash_val += hash_bfd_vma (src->r_rel.virtual_offset);
-
+
/* Now check for the same section and the same elf_hash. */
if (r_reloc_is_defined (&src->r_rel))
sec_or_hash = r_reloc_get_section (&src->r_rel);
*bucket_p = val_e;
map->count++;
/* FIXME: Consider resizing the hash table if we get too many entries. */
-
+
return val_e;
}
"unreachable_space" bytes can be freely contracted. Note that a
negative removed value is a fill. */
-static void
+static void
text_action_add (text_action_list *l,
text_action_t action,
asection *sec,
for (m_p = &l->head; *m_p && (*m_p)->offset <= offset; m_p = &(*m_p)->next)
{
text_action *t = *m_p;
- /* When the action is another fill at the same address,
- just increase the size. */
- if (t->offset == offset && t->action == ta_fill && action == ta_fill)
+
+ if (action == ta_fill)
{
- t->removed_bytes += removed;
- return;
+ /* When the action is another fill at the same address,
+ just increase the size. */
+ if (t->offset == offset && t->action == ta_fill)
+ {
+ t->removed_bytes += removed;
+ return;
+ }
+ /* Fills need to happen before widens so that we don't
+ insert fill bytes into the instruction stream. */
+ if (t->offset == offset && t->action == ta_widen_insn)
+ break;
}
}
}
-static bfd_vma
-offset_with_removed_text (text_action_list *action_list, bfd_vma offset)
+/* Find the total offset adjustment for the relaxations specified by
+ text_actions, beginning from a particular starting action. This is
+ typically used from offset_with_removed_text to search an entire list of
+ actions, but it may also be called directly when adjusting adjacent offsets
+ so that each search may begin where the previous one left off. */
+
+static int
+removed_by_actions (text_action **p_start_action,
+ bfd_vma offset,
+ bfd_boolean before_fill)
{
text_action *r;
int removed = 0;
- for (r = action_list->head; r && r->offset <= offset; r = r->next)
+ r = *p_start_action;
+ while (r)
{
- if (r->offset < offset
- || (r->action == ta_fill && r->removed_bytes < 0))
- removed += r->removed_bytes;
+ if (r->offset > offset)
+ break;
+
+ if (r->offset == offset
+ && (before_fill || r->action != ta_fill || r->removed_bytes >= 0))
+ break;
+
+ removed += r->removed_bytes;
+
+ r = r->next;
}
- return (offset - removed);
+ *p_start_action = r;
+ return removed;
+}
+
+
+static bfd_vma
+offset_with_removed_text (text_action_list *action_list, bfd_vma offset)
+{
+ text_action *r = action_list->head;
+ return offset - removed_by_actions (&r, offset, FALSE);
}
}
-static bfd_vma
-offset_with_removed_text_before_fill (text_action_list *action_list,
- bfd_vma offset)
-{
- text_action *r;
- int removed = 0;
-
- for (r = action_list->head; r && r->offset < offset; r = r->next)
- removed += r->removed_bytes;
-
- return (offset - removed);
-}
-
-
/* The find_insn_action routine will only find non-fill actions. */
static text_action *
case ta_remove_longcall:
t = "remove_longcall"; break;
case ta_convert_longcall:
- t = "remove_longcall"; break;
+ t = "convert_longcall"; break;
case ta_narrow_insn:
t = "narrow_insn"; break;
case ta_widen_insn:
fprintf (fp, "%s: %s[0x%lx] \"%s\" %d\n",
r->sec->owner->filename,
- r->sec->name, r->offset, t, r->removed_bytes);
+ r->sec->name, (unsigned long) r->offset, t, r->removed_bytes);
}
}
else
new_r->to.abfd = NULL;
new_r->next = NULL;
-
+
r = removed_list->head;
- if (r == NULL)
+ if (r == NULL)
{
removed_list->head = new_r;
removed_list->tail = new_r;
}
else
{
- while (r->from.target_offset < from->target_offset && r->next)
+ while (r->from.target_offset < from->target_offset && r->next)
{
r = r->next;
}
reallocated, the newly allocated relocations will be referenced
here along with the actual size allocated. The relocation
count will always be found in the section structure. */
- Elf_Internal_Rela *allocated_relocs;
+ Elf_Internal_Rela *allocated_relocs;
unsigned relocs_count;
unsigned allocated_relocs_count;
};
relax_info->fix_array = NULL;
relax_info->fix_array_count = 0;
- relax_info->allocated_relocs = NULL;
+ relax_info->allocated_relocs = NULL;
relax_info->relocs_count = 0;
relax_info->allocated_relocs_count = 0;
}
asection *src_sec;
bfd_vma src_offset;
unsigned src_type; /* Relocation type. */
-
- bfd *target_abfd;
+
asection *target_sec;
bfd_vma target_offset;
bfd_boolean translated;
-
+
reloc_bfd_fix *next;
};
reloc_bfd_fix_init (asection *src_sec,
bfd_vma src_offset,
unsigned src_type,
- bfd *target_abfd,
asection *target_sec,
bfd_vma target_offset,
bfd_boolean translated)
fix->src_sec = src_sec;
fix->src_offset = src_offset;
fix->src_type = src_type;
- fix->target_abfd = target_abfd;
fix->target_sec = target_sec;
fix->target_offset = target_offset;
fix->translated = translated;
static void
-clear_section_cache (section_cache_t *sec_cache)
+free_section_cache (section_cache_t *sec_cache)
{
if (sec_cache->sec)
{
release_internal_relocs (sec_cache->sec, sec_cache->relocs);
if (sec_cache->ptbl)
free (sec_cache->ptbl);
- memset (sec_cache, 0, sizeof (sec_cache));
}
}
goto err;
/* Fill in the new section cache. */
- clear_section_cache (sec_cache);
- memset (sec_cache, 0, sizeof (sec_cache));
+ free_section_cache (sec_cache);
+ init_section_cache (sec_cache);
sec_cache->sec = sec;
sec_cache->contents = contents;
new_entry = &ebb->ptbl[ebb->end_ptbl_idx + 1];
if (((new_entry->flags & XTENSA_PROP_INSN) == 0)
- || ((new_entry->flags & XTENSA_PROP_INSN_NO_TRANSFORM) != 0)
+ || ((new_entry->flags & XTENSA_PROP_NO_TRANSFORM) != 0)
|| ((the_entry->flags & XTENSA_PROP_ALIGN) != 0))
break;
new_entry = &ebb->ptbl[ebb->start_ptbl_idx - 1];
if ((new_entry->flags & XTENSA_PROP_INSN) == 0
- || ((new_entry->flags & XTENSA_PROP_INSN_NO_TRANSFORM) != 0)
+ || ((new_entry->flags & XTENSA_PROP_NO_TRANSFORM) != 0)
|| ((new_entry->flags & XTENSA_PROP_ALIGN) != 0))
return TRUE;
if (new_entry->address + new_entry->size != the_entry->address)
sec_size = bfd_get_section_limit (abfd, sec);
contents = elf_section_data (sec)->this_hdr.contents;
-
+
if (contents == NULL && sec_size != 0)
{
if (!bfd_malloc_and_get_section (abfd, sec, &contents))
free (contents);
return NULL;
}
- if (keep_memory)
+ if (keep_memory)
elf_section_data (sec)->this_hdr.contents = contents;
}
return contents;
(bfd *, asection *, struct bfd_link_info *, value_map_hash_table *);
static Elf_Internal_Rela *get_irel_at_offset
(asection *, Elf_Internal_Rela *, bfd_vma);
-static bfd_boolean is_removable_literal
- (const source_reloc *, int, const source_reloc *, int);
+static bfd_boolean is_removable_literal
+ (const source_reloc *, int, const source_reloc *, int, asection *,
+ property_table_entry *, int);
static bfd_boolean remove_dead_literal
(bfd *, asection *, struct bfd_link_info *, Elf_Internal_Rela *,
- Elf_Internal_Rela *, source_reloc *, property_table_entry *, int);
+ Elf_Internal_Rela *, source_reloc *, property_table_entry *, int);
static bfd_boolean identify_literal_placement
(bfd *, asection *, bfd_byte *, struct bfd_link_info *,
value_map_hash_table *, bfd_boolean *, Elf_Internal_Rela *, int,
static bfd_boolean relax_section (bfd *, asection *, struct bfd_link_info *);
static bfd_boolean translate_section_fixes (asection *);
static bfd_boolean translate_reloc_bfd_fix (reloc_bfd_fix *);
-static void translate_reloc (const r_reloc *, r_reloc *);
+static asection *translate_reloc (const r_reloc *, r_reloc *, asection *);
static void shrink_dynamic_reloc_sections
(struct bfd_link_info *, bfd *, asection *, Elf_Internal_Rela *);
static bfd_boolean move_literal
static bfd_boolean relax_section_symbols (bfd *, asection *);
-static bfd_boolean
+static bfd_boolean
elf_xtensa_relax_section (bfd *abfd,
asection *sec,
struct bfd_link_info *link_info,
bfd_boolean is_relaxable = FALSE;
/* Initialize the per-section relaxation info. */
- for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link_next)
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
init_xtensa_relax_info (sec);
}
/* Mark relaxable sections (and count relocations against each one). */
- for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link_next)
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
if (!find_relaxable_sections (abfd, sec, link_info, &is_relaxable))
return TRUE;
/* Allocate space for source_relocs. */
- for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link_next)
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
xtensa_relax_info *relax_info;
}
/* Collect info on relocations against each relaxable section. */
- for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link_next)
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
if (!collect_source_relocs (abfd, sec, link_info))
}
/* Compute the text actions. */
- for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link_next)
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
if (!compute_text_actions (abfd, sec, link_info))
internal_relocs = retrieve_internal_relocs (abfd, sec,
link_info->keep_memory);
- if (internal_relocs == NULL)
+ if (internal_relocs == NULL)
return ok;
contents = retrieve_contents (abfd, sec, link_info->keep_memory);
}
source_relax_info = get_xtensa_relax_info (sec);
- for (i = 0; i < sec->reloc_count; i++)
+ for (i = 0; i < sec->reloc_count; i++)
{
Elf_Internal_Rela *irel = &internal_relocs[i];
r_reloc r_rel;
unsigned i;
bfd_size_type sec_size;
- internal_relocs = retrieve_internal_relocs (abfd, sec,
+ internal_relocs = retrieve_internal_relocs (abfd, sec,
link_info->keep_memory);
- if (internal_relocs == NULL)
+ if (internal_relocs == NULL)
return ok;
sec_size = bfd_get_section_limit (abfd, sec);
}
/* Record relocations against relaxable literal sections. */
- for (i = 0; i < sec->reloc_count; i++)
+ for (i = 0; i < sec->reloc_count; i++)
{
Elf_Internal_Rela *irel = &internal_relocs[i];
r_reloc r_rel;
relocations associated with ASM_EXPANDs because they were just
added in the preceding loop over the relocations. */
- for (i = 0; i < sec->reloc_count; i++)
+ for (i = 0; i < sec->reloc_count; i++)
{
Elf_Internal_Rela *irel = &internal_relocs[i];
bfd_boolean is_reachable;
the l32r_irel. Note: The src_relocs array is not yet
sorted, but it wouldn't matter anyway because we're
searching by source offset instead of target offset. */
- s_reloc = find_source_reloc (target_relax_info->src_relocs,
+ s_reloc = find_source_reloc (target_relax_info->src_relocs,
target_relax_info->src_next,
sec, l32r_irel);
BFD_ASSERT (s_reloc);
if (contents == NULL)
return FALSE;
- if (ELF32_R_TYPE (irel->r_info) != R_XTENSA_ASM_EXPAND)
+ if (ELF32_R_TYPE (irel->r_info) != R_XTENSA_ASM_EXPAND)
return FALSE;
sec_size = bfd_get_section_limit (abfd, sec);
/* Optimization of longcalls that use CONST16 is not yet implemented. */
if (!uses_l32r)
return FALSE;
-
+
direct_call_opcode = swap_callx_for_call_opcode (opcode);
if (direct_call_opcode == XTENSA_UNDEFINED)
return FALSE;
shouldn't crash regardless. */
if (!target_sec->output_section)
return FALSE;
-
+
/* For relocatable sections, we can only simplify when the output
section of the target is the same as the output section of the
source. */
|| is_reloc_sym_weak (abfd, irel)))
return FALSE;
- self_address = (sec->output_section->vma
- + sec->output_offset + irel->r_offset + 3);
- dest_address = (target_sec->output_section->vma
- + target_sec->output_offset + target_offset);
-
+ if (target_sec->output_section != sec->output_section)
+ {
+ /* If the two sections are sufficiently far away that relaxation
+ might take the call out of range, we can't simplify. For
+ example, a positive displacement call into another memory
+ could get moved to a lower address due to literal removal,
+ but the destination won't move, and so the displacment might
+ get larger.
+
+ If the displacement is negative, assume the destination could
+ move as far back as the start of the output section. The
+ self_address will be at least as far into the output section
+ as it is prior to relaxation.
+
+ If the displacement is postive, assume the destination will be in
+ it's pre-relaxed location (because relaxation only makes sections
+ smaller). The self_address could go all the way to the beginning
+ of the output section. */
+
+ dest_address = target_sec->output_section->vma;
+ self_address = sec->output_section->vma;
+
+ if (sec->output_section->vma > target_sec->output_section->vma)
+ self_address += sec->output_offset + irel->r_offset + 3;
+ else
+ dest_address += bfd_get_section_limit (abfd, target_sec->output_section);
+ /* Call targets should be four-byte aligned. */
+ dest_address = (dest_address + 3) & ~3;
+ }
+ else
+ {
+
+ self_address = (sec->output_section->vma
+ + sec->output_offset + irel->r_offset + 3);
+ dest_address = (target_sec->output_section->vma
+ + target_sec->output_offset + target_offset);
+ }
+
*is_reachable_p = pcrel_reloc_fits (direct_call_opcode, 0,
self_address, dest_address);
{
unsigned i;
- for (i = 0; i < sec->reloc_count; i++)
+ for (i = 0; i < sec->reloc_count; i++)
{
Elf_Internal_Rela *irel = &internal_relocs[i];
the_entry++;
}
- if (the_entry->flags & XTENSA_PROP_INSN_NO_TRANSFORM)
+ if (the_entry->flags & XTENSA_PROP_NO_TRANSFORM)
/* NO_REORDER is OK */
continue;
return FALSE;
prev_opcode = insn_decode_opcode (contents, content_length, offset-3, 0);
return (xtensa_opcode_is_loop (xtensa_default_isa, prev_opcode) == 1);
-}
+}
/* Find all of the possible actions for an extended basic block. */
insn_len = insn_decode_len (ebb->contents, ebb->content_length,
offset);
- if (insn_len == 0)
+ if (insn_len == 0)
goto decode_error;
if (check_branch_target_aligned_address (offset, insn_len))
{
bfd_size_type simplify_size;
- simplify_size = get_asm_simplify_size (ebb->contents,
+ simplify_size = get_asm_simplify_size (ebb->contents,
ebb->content_length,
irel->r_offset);
if (simplify_size == 0)
ebb_propose_action (ebb_table, EBB_NO_ALIGN, 0,
ta_convert_longcall, offset, 0, TRUE);
-
+
offset += simplify_size;
continue;
}
goto decode_error;
if ((entry->flags & XTENSA_PROP_INSN_NO_DENSITY) == 0
- && (entry->flags & XTENSA_PROP_INSN_NO_TRANSFORM) == 0
+ && (entry->flags & XTENSA_PROP_NO_TRANSFORM) == 0
&& can_narrow_instruction (slotbuf, fmt, opcode) != 0)
{
/* Add an instruction narrow action. */
ebb_propose_action (ebb_table, EBB_NO_ALIGN, 0,
ta_narrow_insn, offset, 0, FALSE);
}
- else if ((entry->flags & XTENSA_PROP_INSN_NO_TRANSFORM) == 0
+ else if ((entry->flags & XTENSA_PROP_NO_TRANSFORM) == 0
&& can_widen_instruction (slotbuf, fmt, opcode) != 0
&& ! prev_instr_is_a_loop (ebb->contents,
ebb->content_length, offset))
bad_alignment = FALSE;
break;
}
+ if (new_action->do_action)
+ removed_bytes += new_action->removed_bytes;
}
if (!bad_alignment)
{
};
-static int
+static int
xlate_compare (const void *a_v, const void *b_v)
{
const xlate_map_entry_t *a = (const xlate_map_entry_t *) a_v;
text_action_list *action_list,
bfd_vma offset)
{
- xlate_map_entry_t tmp;
void *r;
xlate_map_entry_t *e;
if (map->entry_count == 0)
return offset;
- tmp.orig_address = offset;
- tmp.new_address = offset;
- tmp.size = 1;
-
r = bsearch (&offset, map->entry, map->entry_count,
sizeof (xlate_map_entry_t), &xlate_compare);
e = (xlate_map_entry_t *) r;
-
+
BFD_ASSERT (e != NULL);
if (e == NULL)
return offset;
return NULL;
num_actions = action_list_count (action_list);
- map->entry = (xlate_map_entry_t *)
+ map->entry = (xlate_map_entry_t *)
bfd_malloc (sizeof (xlate_map_entry_t) * (num_actions + 1));
if (map->entry == NULL)
{
return NULL;
}
map->entry_count = 0;
-
+
removed = 0;
current_entry = &map->entry[0];
/* Free an offset translation map. */
-static void
+static void
free_xlate_map (xlate_map_t *map)
{
if (map && map->entry)
that fit before linking must fit after linking. Thus we only
need to deal with relocations to the same section that are
PC-relative. */
- if (ELF32_R_TYPE (irel->r_info) == R_XTENSA_ASM_SIMPLIFY
+ if (r_type == R_XTENSA_ASM_SIMPLIFY
+ || r_type == R_XTENSA_32_PCREL
|| !howto->pc_relative)
continue;
add an entry to the per-section list of removed literals. The
actual changes are deferred until the next pass. */
-static bfd_boolean
+static bfd_boolean
compute_removed_literals (bfd *abfd,
asection *sec,
struct bfd_link_info *link_info,
if (!relax_info->is_relaxable_literal_section)
return ok;
- internal_relocs = retrieve_internal_relocs (abfd, sec,
+ internal_relocs = retrieve_internal_relocs (abfd, sec,
link_info->keep_memory);
sec_size = bfd_get_section_limit (abfd, sec);
continue;
prev_i = i;
- if (last_loc_is_prev &&
+ if (last_loc_is_prev &&
last_target_offset + 4 != rel->r_rel.target_offset)
last_loc_is_prev = FALSE;
/* Check if the relocation was from an L32R that is being removed
because a CALLX was converted to a direct CALL, and check if
there are no other relocations to the literal. */
- if (is_removable_literal (rel, i, src_relocs, relax_info->src_count))
+ if (is_removable_literal (rel, i, src_relocs, relax_info->src_count,
+ sec, prop_table, ptblsize))
{
if (!remove_dead_literal (abfd, sec, link_info, internal_relocs,
irel, rel, prop_table, ptblsize))
}
if (!identify_literal_placement (abfd, sec, contents, link_info,
- values,
- &last_loc_is_prev, irel,
+ values,
+ &last_loc_is_prev, irel,
relax_info->src_count - i, rel,
prop_table, ptblsize,
&target_sec_cache, rel->is_abs_literal))
#endif /* DEBUG */
error_return:
- if (prop_table) free (prop_table);
- clear_section_cache (&target_sec_cache);
+ if (prop_table)
+ free (prop_table);
+ free_section_cache (&target_sec_cache);
release_contents (sec, contents);
release_internal_relocs (sec, internal_relocs);
unsigned r_type;
Elf_Internal_Rela key;
- if (!internal_relocs)
+ if (!internal_relocs)
return NULL;
key.r_offset = offset;
is_removable_literal (const source_reloc *rel,
int i,
const source_reloc *src_relocs,
- int src_count)
+ int src_count,
+ asection *sec,
+ property_table_entry *prop_table,
+ int ptblsize)
{
const source_reloc *curr_rel;
+ property_table_entry *entry;
+
if (!rel->is_null)
return FALSE;
-
+
+ entry = elf_xtensa_find_property_entry (prop_table, ptblsize,
+ sec->vma + rel->r_rel.target_offset);
+ if (entry && (entry->flags & XTENSA_PROP_NO_TRANSFORM))
+ return FALSE;
+
for (++i; i < src_count; ++i)
{
curr_rel = &src_relocs[i];
}
-bfd_boolean
+bfd_boolean
remove_dead_literal (bfd *abfd,
asection *sec,
struct bfd_link_info *link_info,
ta_remove_literal, sec, rel->r_rel.target_offset, 4);
/* If the section is 4-byte aligned, do not add fill. */
- if (sec->alignment_power > 2)
+ if (sec->alignment_power > 2)
{
int fill_extra_space;
bfd_vma entry_sec_offset;
}
-bfd_boolean
+bfd_boolean
identify_literal_placement (bfd *abfd,
asection *sec,
bfd_byte *contents,
/* For relocatable links, do not try to move literals. To do it
correctly might increase the number of relocations in an input
section making the default relocatable linking fail. */
- if (!link_info->relocatable && !literal_placed
+ if (!link_info->relocatable && !literal_placed
&& values->has_last_loc && !(*last_loc_is_prev_p))
{
asection *target_sec = r_reloc_get_section (&values->last_loc);
/* There is a last loc that was in the same output section. */
if (relocations_reach (rel, remaining_src_rels, &try_loc)
&& move_shared_literal (sec, link_info, rel,
- prop_table, ptblsize,
+ prop_table, ptblsize,
&try_loc, &val, target_sec_cache))
{
values->last_loc.virtual_offset += 4;
/* Move a literal to another literal location because it is
the same as the other literal value. */
-static bfd_boolean
+static bfd_boolean
coalesce_shared_literal (asection *sec,
source_reloc *rel,
property_table_entry *prop_table,
entry = elf_xtensa_find_property_entry
(prop_table, ptblsize, sec->vma + rel->r_rel.target_offset);
- if (entry && (entry->flags & XTENSA_PROP_INSN_NO_TRANSFORM))
+ if (entry && (entry->flags & XTENSA_PROP_NO_TRANSFORM))
return TRUE;
/* Mark that the literal will be coalesced. */
ta_remove_literal, sec, rel->r_rel.target_offset, 4);
/* If the section is 4-byte aligned, do not add fill. */
- if (sec->alignment_power > 2)
+ if (sec->alignment_power > 2)
{
int fill_extra_space;
bfd_vma entry_sec_offset;
total amount of space used because of alignments so we need to do
this carefully. Also, it may make a branch go out of range. */
-static bfd_boolean
+static bfd_boolean
move_shared_literal (asection *sec,
struct bfd_link_info *link_info,
source_reloc *rel,
return FALSE;
target_entry = elf_xtensa_find_property_entry
- (target_sec_cache->ptbl, target_sec_cache->pte_count,
+ (target_sec_cache->ptbl, target_sec_cache->pte_count,
target_sec->vma + target_loc->target_offset);
if (!target_entry)
init_ebb_constraint (&ebb_table);
ebb = &ebb_table.ebb;
- init_ebb (ebb, target_sec_cache->sec, target_sec_cache->contents,
+ init_ebb (ebb, target_sec_cache->sec, target_sec_cache->contents,
target_sec_cache->content_length,
target_sec_cache->ptbl, target_sec_cache->pte_count,
target_sec_cache->relocs, target_sec_cache->reloc_count);
-4 - (1 << target_sec->alignment_power), TRUE);
/* Check all of the PC-relative relocations to make sure they still fit. */
- relocs_fit = check_section_ebb_pcrels_fit (target_sec->owner, target_sec,
+ relocs_fit = check_section_ebb_pcrels_fit (target_sec->owner, target_sec,
target_sec_cache->contents,
target_sec_cache->relocs,
&ebb_table, NULL);
- if (!relocs_fit)
+ if (!relocs_fit)
return FALSE;
text_action_add_literal (&target_relax_info->action_list,
ta_add_literal, target_loc, lit_value, -4);
- if (target_sec->alignment_power > 2 && target_entry != src_entry)
+ if (target_sec->alignment_power > 2 && target_entry != src_entry)
{
/* May need to add or remove some fill to maintain alignment. */
int fill_extra_space;
bfd_vma entry_sec_offset;
- entry_sec_offset =
+ entry_sec_offset =
target_entry->address - target_sec->vma + target_entry->size;
/* If the literal range is at the end of the section,
ta_remove_literal, sec, rel->r_rel.target_offset, 4);
/* If the section is 4-byte aligned, do not add fill. */
- if (sec->alignment_power > 2 && target_entry != src_entry)
+ if (sec->alignment_power > 2 && target_entry != src_entry)
{
int fill_extra_space;
bfd_vma entry_sec_offset;
return relax_property_section (abfd, sec, link_info);
}
- internal_relocs = retrieve_internal_relocs (abfd, sec,
+ internal_relocs = retrieve_internal_relocs (abfd, sec,
link_info->keep_memory);
+ if (!internal_relocs && !relax_info->action_list.head)
+ return TRUE;
+
contents = retrieve_contents (abfd, sec, link_info->keep_memory);
if (contents == NULL && sec_size != 0)
{
if (relax_info->is_relaxable_literal_section
|| relax_info->is_relaxable_asm_section)
{
+ pin_internal_relocs (sec, internal_relocs);
+
if (r_type != R_XTENSA_NONE
&& find_removed_literal (&relax_info->removed_list,
irel->r_offset))
irel->r_info = ELF32_R_INFO (0, R_XTENSA_NONE);
irel->r_offset = offset_with_removed_text
(&relax_info->action_list, irel->r_offset);
- pin_internal_relocs (sec, internal_relocs);
continue;
}
we may need to change the relocation's target offset. */
target_sec = r_reloc_get_section (&r_rel);
- target_relax_info = get_xtensa_relax_info (target_sec);
+ /* For a reference to a discarded section from a DWARF section,
+ i.e., where action_discarded is PRETEND, the symbol will
+ eventually be modified to refer to the kept section (at least if
+ the kept and discarded sections are the same size). Anticipate
+ that here and adjust things accordingly. */
+ if (! elf_xtensa_ignore_discarded_relocs (sec)
+ && elf_xtensa_action_discarded (sec) == PRETEND
+ && sec->sec_info_type != SEC_INFO_TYPE_STABS
+ && target_sec != NULL
+ && discarded_section (target_sec))
+ {
+ /* It would be natural to call _bfd_elf_check_kept_section
+ here, but it's not exported from elflink.c. It's also a
+ fairly expensive check. Adjusting the relocations to the
+ discarded section is fairly harmless; it will only adjust
+ some addends and difference values. If it turns out that
+ _bfd_elf_check_kept_section fails later, it won't matter,
+ so just compare the section names to find the right group
+ member. */
+ asection *kept = target_sec->kept_section;
+ if (kept != NULL)
+ {
+ if ((kept->flags & SEC_GROUP) != 0)
+ {
+ asection *first = elf_next_in_group (kept);
+ asection *s = first;
+
+ kept = NULL;
+ while (s != NULL)
+ {
+ if (strcmp (s->name, target_sec->name) == 0)
+ {
+ kept = s;
+ break;
+ }
+ s = elf_next_in_group (s);
+ if (s == first)
+ break;
+ }
+ }
+ }
+ if (kept != NULL
+ && ((target_sec->rawsize != 0
+ ? target_sec->rawsize : target_sec->size)
+ == (kept->rawsize != 0 ? kept->rawsize : kept->size)))
+ target_sec = kept;
+ }
+
+ target_relax_info = get_xtensa_relax_info (target_sec);
if (target_relax_info
&& (target_relax_info->is_relaxable_literal_section
|| target_relax_info->is_relaxable_asm_section))
{
r_reloc new_reloc;
- reloc_bfd_fix *fix;
- bfd_vma addend_displacement;
-
- translate_reloc (&r_rel, &new_reloc);
+ target_sec = translate_reloc (&r_rel, &new_reloc, target_sec);
if (r_type == R_XTENSA_DIFF8
|| r_type == R_XTENSA_DIFF16
|| r_type == R_XTENSA_DIFF32)
{
- bfd_vma diff_value = 0, new_end_offset, diff_mask = 0;
+ bfd_signed_vma diff_value = 0;
+ bfd_vma new_end_offset, diff_mask = 0;
if (bfd_get_section_limit (abfd, sec) < old_source_offset)
{
{
case R_XTENSA_DIFF8:
diff_value =
- bfd_get_8 (abfd, &contents[old_source_offset]);
+ bfd_get_signed_8 (abfd, &contents[old_source_offset]);
break;
case R_XTENSA_DIFF16:
diff_value =
- bfd_get_16 (abfd, &contents[old_source_offset]);
+ bfd_get_signed_16 (abfd, &contents[old_source_offset]);
break;
case R_XTENSA_DIFF32:
diff_value =
- bfd_get_32 (abfd, &contents[old_source_offset]);
+ bfd_get_signed_32 (abfd, &contents[old_source_offset]);
break;
}
switch (r_type)
{
case R_XTENSA_DIFF8:
- diff_mask = 0xff;
- bfd_put_8 (abfd, diff_value,
+ diff_mask = 0x7f;
+ bfd_put_signed_8 (abfd, diff_value,
&contents[old_source_offset]);
break;
case R_XTENSA_DIFF16:
- diff_mask = 0xffff;
- bfd_put_16 (abfd, diff_value,
+ diff_mask = 0x7fff;
+ bfd_put_signed_16 (abfd, diff_value,
&contents[old_source_offset]);
break;
case R_XTENSA_DIFF32:
- diff_mask = 0xffffffff;
- bfd_put_32 (abfd, diff_value,
+ diff_mask = 0x7fffffff;
+ bfd_put_signed_32 (abfd, diff_value,
&contents[old_source_offset]);
break;
}
- /* Check for overflow. */
- if ((diff_value & ~diff_mask) != 0)
+ /* Check for overflow. Sign bits must be all zeroes or all ones */
+ if ((diff_value & ~diff_mask) != 0 &&
+ (diff_value & ~diff_mask) != (-1 & ~diff_mask))
{
(*link_info->callbacks->reloc_dangerous)
(link_info, _("overflow after relaxation"),
pin_contents (sec, contents);
}
- /* FIXME: If the relocation still references a section in
- the same input file, the relocation should be modified
- directly instead of adding a "fix" record. */
-
- addend_displacement =
- new_reloc.target_offset + new_reloc.virtual_offset;
-
- fix = reloc_bfd_fix_init (sec, source_offset, r_type, 0,
- r_reloc_get_section (&new_reloc),
- addend_displacement, TRUE);
- add_fix (sec, fix);
+ /* If the relocation still references a section in the same
+ input file, modify the relocation directly instead of
+ adding a "fix" record. */
+ if (target_sec->owner == abfd)
+ {
+ unsigned r_symndx = ELF32_R_SYM (new_reloc.rela.r_info);
+ irel->r_info = ELF32_R_INFO (r_symndx, r_type);
+ irel->r_addend = new_reloc.rela.r_addend;
+ pin_internal_relocs (sec, internal_relocs);
+ }
+ else
+ {
+ bfd_vma addend_displacement;
+ reloc_bfd_fix *fix;
+
+ addend_displacement =
+ new_reloc.target_offset + new_reloc.virtual_offset;
+ fix = reloc_bfd_fix_init (sec, source_offset, r_type,
+ target_sec,
+ addend_displacement, TRUE);
+ add_fix (sec, fix);
+ }
}
-
- pin_internal_relocs (sec, internal_relocs);
}
}
of move, copy and fill records. Use the move, copy and
fill records to perform the actions once. */
- bfd_size_type size = sec->size;
int removed = 0;
bfd_size_type final_size, copy_size, orig_insn_size;
bfd_byte *scratch = NULL;
bfd_byte *dup_contents = NULL;
- bfd_size_type orig_size = size;
+ bfd_size_type orig_size = sec->size;
bfd_vma orig_dot = 0;
bfd_vma orig_dot_copied = 0; /* Byte copied already from
orig dot in physical memory. */
dup_dot += copy_size;
}
virtual_action = TRUE;
- }
+ }
else
BFD_ASSERT (action->virtual_offset <= orig_dot_vo);
}
relax_info, &internal_relocs, &action->value))
goto error_return;
- if (virtual_action)
+ if (virtual_action)
orig_dot_vo += copy_size;
orig_dot += orig_insn_size;
break;
}
- size -= action->removed_bytes;
removed += action->removed_bytes;
BFD_ASSERT (dup_dot <= final_size);
BFD_ASSERT (orig_dot <= orig_size);
free (scratch);
pin_contents (sec, contents);
+ if (sec->rawsize == 0)
+ sec->rawsize = sec->size;
sec->size = final_size;
}
}
-static bfd_boolean
+static bfd_boolean
translate_section_fixes (asection *sec)
{
xtensa_relax_info *relax_info;
/* Translate a fix given the mapping in the relax info for the target
section. If it has already been translated, no work is required. */
-static bfd_boolean
+static bfd_boolean
translate_reloc_bfd_fix (reloc_bfd_fix *fix)
{
reloc_bfd_fix new_fix;
target_offset);
}
- if (removed)
+ if (removed)
{
asection *new_sec;
/* This was moved to some other address (possibly another section). */
new_sec = r_reloc_get_section (&removed->to);
- if (new_sec != sec)
+ if (new_sec != sec)
{
sec = new_sec;
relax_info = get_xtensa_relax_info (sec);
- if (!relax_info ||
+ if (!relax_info ||
(!relax_info->is_relaxable_literal_section
&& !relax_info->is_relaxable_asm_section))
{
/* Fix up a relocation to take account of removed literals. */
-static void
-translate_reloc (const r_reloc *orig_rel, r_reloc *new_rel)
+static asection *
+translate_reloc (const r_reloc *orig_rel, r_reloc *new_rel, asection *sec)
{
- asection *sec;
xtensa_relax_info *relax_info;
removed_literal *removed;
- bfd_vma new_offset, target_offset, removed_bytes;
+ bfd_vma target_offset, base_offset;
+ text_action *act;
*new_rel = *orig_rel;
if (!r_reloc_is_defined (orig_rel))
- return;
- sec = r_reloc_get_section (orig_rel);
+ return sec ;
relax_info = get_xtensa_relax_info (sec);
- BFD_ASSERT (relax_info);
-
- if (!relax_info->is_relaxable_literal_section
- && !relax_info->is_relaxable_asm_section)
- return;
+ BFD_ASSERT (relax_info && (relax_info->is_relaxable_literal_section
+ || relax_info->is_relaxable_asm_section));
target_offset = orig_rel->target_offset;
if (!relax_info
|| (!relax_info->is_relaxable_literal_section
&& !relax_info->is_relaxable_asm_section))
- return;
+ return sec;
}
target_offset = new_rel->target_offset;
}
- /* ...and the target address may have been moved within its section. */
- new_offset = offset_with_removed_text (&relax_info->action_list,
- target_offset);
+ /* Find the base offset of the reloc symbol, excluding any addend from the
+ reloc or from the section contents (for a partial_inplace reloc). Then
+ find the adjusted values of the offsets due to relaxation. The base
+ offset is needed to determine the change to the reloc's addend; the reloc
+ addend should not be adjusted due to relaxations located before the base
+ offset. */
+
+ base_offset = r_reloc_get_target_offset (new_rel) - new_rel->rela.r_addend;
+ act = relax_info->action_list.head;
+ if (base_offset <= target_offset)
+ {
+ int base_removed = removed_by_actions (&act, base_offset, FALSE);
+ int addend_removed = removed_by_actions (&act, target_offset, FALSE);
+ new_rel->target_offset = target_offset - base_removed - addend_removed;
+ new_rel->rela.r_addend -= addend_removed;
+ }
+ else
+ {
+ /* Handle a negative addend. The base offset comes first. */
+ int tgt_removed = removed_by_actions (&act, target_offset, FALSE);
+ int addend_removed = removed_by_actions (&act, base_offset, FALSE);
+ new_rel->target_offset = target_offset - tgt_removed;
+ new_rel->rela.r_addend += addend_removed;
+ }
- /* Modify the offset and addend. */
- removed_bytes = target_offset - new_offset;
- new_rel->target_offset = new_offset;
- new_rel->rela.r_addend -= removed_bytes;
+ return sec;
}
bfd_boolean dynamic_symbol;
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return;
+
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
{
int r_type;
unsigned i;
- asection *target_sec;
reloc_bfd_fix *fix;
unsigned insert_at;
r_type = ELF32_R_TYPE (r_rel->rela.r_info);
- target_sec = r_reloc_get_section (r_rel);
/* This is the difficult case. We have to create a fix up. */
this_rela.r_offset = offset;
/* Currently, we cannot move relocations during a relocatable link. */
BFD_ASSERT (!link_info->relocatable);
- fix = reloc_bfd_fix_init (sec, offset, r_type, r_rel->abfd,
+ fix = reloc_bfd_fix_init (sec, offset, r_type,
r_reloc_get_section (r_rel),
r_rel->target_offset + r_rel->virtual_offset,
FALSE);
BFD_ASSERT (relax_info->allocated_relocs == NULL
|| sec->reloc_count == relax_info->relocs_count);
- if (relax_info->allocated_relocs_count == 0)
+ if (relax_info->allocated_relocs_count == 0)
new_relocs_count = (sec->reloc_count + 2) * 2;
else
new_relocs_count = (relax_info->allocated_relocs_count + 2) * 2;
if (insert_at != sec->reloc_count)
memcpy (new_relocs + insert_at + 1,
(*internal_relocs_p) + insert_at,
- (sec->reloc_count - insert_at)
+ (sec->reloc_count - insert_at)
* sizeof (Elf_Internal_Rela));
if (*internal_relocs_p != relax_info->allocated_relocs)
bfd_size_type entry_size;
sec_size = bfd_get_section_limit (abfd, sec);
- internal_relocs = retrieve_internal_relocs (abfd, sec,
+ internal_relocs = retrieve_internal_relocs (abfd, sec,
link_info->keep_memory);
contents = retrieve_contents (abfd, sec, link_info->keep_memory);
if (contents == NULL && sec_size != 0)
|| target_relax_info->is_relaxable_asm_section ))
{
/* Translate the relocation's destination. */
- bfd_vma new_offset, new_end_offset;
+ bfd_vma old_offset = val.r_rel.target_offset;
+ bfd_vma new_offset;
long old_size, new_size;
-
- new_offset = offset_with_removed_text
- (&target_relax_info->action_list, val.r_rel.target_offset);
+ text_action *act = target_relax_info->action_list.head;
+ new_offset = old_offset -
+ removed_by_actions (&act, old_offset, FALSE);
/* Assert that we are not out of bounds. */
old_size = bfd_get_32 (abfd, size_p);
+ new_size = old_size;
if (old_size == 0)
{
offset before or after the fill address depending
on whether the expanding unreachable entry
preceeds it. */
- if (last_zfill_target_sec
- && last_zfill_target_sec == target_sec
- && last_zfill_target_offset == val.r_rel.target_offset)
- new_end_offset = new_offset;
- else
+ if (last_zfill_target_sec == 0
+ || last_zfill_target_sec != target_sec
+ || last_zfill_target_offset != old_offset)
{
- new_end_offset = new_offset;
- new_offset = offset_with_removed_text_before_fill
- (&target_relax_info->action_list,
- val.r_rel.target_offset);
+ bfd_vma new_end_offset = new_offset;
+
+ /* Recompute the new_offset, but this time don't
+ include any fill inserted by relaxation. */
+ act = target_relax_info->action_list.head;
+ new_offset = old_offset -
+ removed_by_actions (&act, old_offset, TRUE);
/* If it is not unreachable and we have not yet
seen an unreachable at this address, place it
before the fill address. */
- if (!flags_p
- || (bfd_get_32 (abfd, flags_p)
- & XTENSA_PROP_UNREACHABLE) == 0)
- new_end_offset = new_offset;
- else
+ if (flags_p && (bfd_get_32 (abfd, flags_p)
+ & XTENSA_PROP_UNREACHABLE) != 0)
{
+ new_size = new_end_offset - new_offset;
+
last_zfill_target_sec = target_sec;
- last_zfill_target_offset = val.r_rel.target_offset;
+ last_zfill_target_offset = old_offset;
}
}
}
else
- {
- new_end_offset = offset_with_removed_text_before_fill
- (&target_relax_info->action_list,
- val.r_rel.target_offset + old_size);
- }
-
- new_size = new_end_offset - new_offset;
+ new_size -=
+ removed_by_actions (&act, old_offset + old_size, TRUE);
if (new_size != old_size)
{
pin_contents (sec, contents);
}
- if (new_offset != val.r_rel.target_offset)
+ if (new_offset != old_offset)
{
- bfd_vma diff = new_offset - val.r_rel.target_offset;
+ bfd_vma diff = new_offset - old_offset;
irel->r_addend += diff;
pin_internal_relocs (sec, internal_relocs);
}
Elf_Internal_Rela *irel, *next_rel, *rel_end;
int removed_bytes = 0;
bfd_vma offset;
- bfd_vma section_size;
flagword predef_flags;
predef_flags = xtensa_get_property_predef_flags (sec);
next_rel = internal_relocs;
rel_end = internal_relocs + sec->reloc_count;
- section_size = sec->size;
- BFD_ASSERT (section_size % entry_size == 0);
+ BFD_ASSERT (sec->size % entry_size == 0);
- for (offset = 0; offset < section_size; offset += entry_size)
+ for (offset = 0; offset < sec->size; offset += entry_size)
{
Elf_Internal_Rela *offset_rel, *extra_rel;
bfd_vma bytes_to_remove, size, actual_offset;
actual_offset = offset - removed_bytes;
size = bfd_get_32 (abfd, &contents[actual_offset + 4]);
- if (is_full_prop_section)
+ if (is_full_prop_section)
flags = bfd_get_32 (abfd, &contents[actual_offset + 8]);
else
flags = predef_flags;
bfd_vma new_address =
(offset_rel->r_addend
+ bfd_get_32 (abfd, &contents[actual_offset]));
- if (is_full_prop_section)
+ if (is_full_prop_section)
old_flags = bfd_get_32
(abfd, &contents[last_irel->r_offset + 8]);
else
if (remove_this_rel)
{
offset_rel->r_info = ELF32_R_INFO (0, R_XTENSA_NONE);
- /* In case this is the last entry, move the relocation offset
- to the previous entry, if there is one. */
- if (offset_rel->r_offset >= bytes_to_remove)
- offset_rel->r_offset -= bytes_to_remove;
- else
- offset_rel->r_offset = 0;
+ offset_rel->r_offset = 0;
}
if (bytes_to_remove != 0)
{
removed_bytes += bytes_to_remove;
- if (offset + bytes_to_remove < section_size)
+ if (offset + bytes_to_remove < sec->size)
memmove (&contents[actual_offset],
&contents[actual_offset + bytes_to_remove],
- section_size - offset - bytes_to_remove);
+ sec->size - offset - bytes_to_remove);
}
}
irel->r_offset -= removed_bytes;
/* Clear the removed bytes. */
- memset (&contents[section_size - removed_bytes], 0, removed_bytes);
+ memset (&contents[sec->size - removed_bytes], 0, removed_bytes);
- sec->size = section_size - removed_bytes;
+ if (sec->rawsize == 0)
+ sec->rawsize = sec->size;
+ sec->size -= removed_bytes;
if (xtensa_is_littable_section (sec))
{
if (isym->st_shndx == sec_shndx)
{
- bfd_vma new_address = offset_with_removed_text
- (&relax_info->action_list, isym->st_value);
- bfd_vma new_size = isym->st_size;
+ text_action *act = relax_info->action_list.head;
+ bfd_vma orig_addr = isym->st_value;
- if (ELF32_ST_TYPE (isym->st_info) == STT_FUNC)
- {
- bfd_vma new_end = offset_with_removed_text
- (&relax_info->action_list, isym->st_value + isym->st_size);
- new_size = new_end - new_address;
- }
+ isym->st_value -= removed_by_actions (&act, orig_addr, FALSE);
- isym->st_value = new_address;
- isym->st_size = new_size;
+ if (ELF32_ST_TYPE (isym->st_info) == STT_FUNC)
+ isym->st_size -=
+ removed_by_actions (&act, orig_addr + isym->st_size, FALSE);
}
}
|| sym_hash->root.type == bfd_link_hash_defweak)
&& sym_hash->root.u.def.section == sec)
{
- bfd_vma new_address = offset_with_removed_text
- (&relax_info->action_list, sym_hash->root.u.def.value);
- bfd_vma new_size = sym_hash->size;
+ text_action *act = relax_info->action_list.head;
+ bfd_vma orig_addr = sym_hash->root.u.def.value;
- if (sym_hash->type == STT_FUNC)
- {
- bfd_vma new_end = offset_with_removed_text
- (&relax_info->action_list,
- sym_hash->root.u.def.value + sym_hash->size);
- new_size = new_end - new_address;
- }
+ sym_hash->root.u.def.value -=
+ removed_by_actions (&act, orig_addr, FALSE);
- sym_hash->root.u.def.value = new_address;
- sym_hash->size = new_size;
+ if (sym_hash->type == STT_FUNC)
+ sym_hash->size -=
+ removed_by_actions (&act, orig_addr + sym_hash->size, FALSE);
}
}
if (chunk == 0)
{
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return NULL;
+
return htab->splt;
}
dynobj = elf_hash_table (info)->dynobj;
sprintf (plt_name, ".plt.%u", chunk);
- return bfd_get_section_by_name (dynobj, plt_name);
+ return bfd_get_linker_section (dynobj, plt_name);
}
if (chunk == 0)
{
htab = elf_xtensa_hash_table (info);
+ if (htab == NULL)
+ return NULL;
return htab->sgotplt;
}
dynobj = elf_hash_table (info)->dynobj;
sprintf (got_name, ".got.plt.%u", chunk);
- return bfd_get_section_by_name (dynobj, got_name);
+ return bfd_get_linker_section (dynobj, got_name);
}
if (section_index == SHN_UNDEF)
target_sec = bfd_und_section_ptr;
- else if (section_index > 0 && section_index < SHN_LORESERVE)
- target_sec = bfd_section_from_elf_index (abfd, section_index);
else if (section_index == SHN_ABS)
target_sec = bfd_abs_section_ptr;
else if (section_index == SHN_COMMON)
target_sec = bfd_com_section_ptr;
else
- /* Who knows? */
- target_sec = NULL;
+ target_sec = bfd_section_from_elf_index (abfd, section_index);
}
else
{
}
-static bfd_boolean
+static bfd_boolean
xtensa_is_property_section (asection *sec)
{
if (xtensa_is_insntable_section (sec)
}
-static bfd_boolean
+static bfd_boolean
xtensa_is_insntable_section (asection *sec)
{
if (CONST_STRNEQ (sec->name, XTENSA_INSN_SEC_NAME)
}
-static bfd_boolean
+static bfd_boolean
xtensa_is_littable_section (asection *sec)
{
if (CONST_STRNEQ (sec->name, XTENSA_LIT_SEC_NAME)
}
-static bfd_boolean
+static bfd_boolean
xtensa_is_proptable_section (asection *sec)
{
if (CONST_STRNEQ (sec->name, XTENSA_PROP_SEC_NAME)
{
const char *gname = inf;
const char *group_name = elf_group_name (sec);
-
+
return (group_name == gname
|| (group_name != NULL
&& gname != NULL
static int linkonce_len = sizeof (".gnu.linkonce.") - 1;
-asection *
-xtensa_get_property_section (asection *sec, const char *base_name)
+static char *
+xtensa_property_section_name (asection *sec, const char *base_name)
{
const char *suffix, *group_name;
char *prop_sec_name;
- asection *prop_sec;
group_name = elf_group_name (sec);
if (group_name)
{
char *linkonce_kind = 0;
- if (strcmp (base_name, XTENSA_INSN_SEC_NAME) == 0)
+ if (strcmp (base_name, XTENSA_INSN_SEC_NAME) == 0)
linkonce_kind = "x.";
- else if (strcmp (base_name, XTENSA_LIT_SEC_NAME) == 0)
+ else if (strcmp (base_name, XTENSA_LIT_SEC_NAME) == 0)
linkonce_kind = "p.";
else if (strcmp (base_name, XTENSA_PROP_SEC_NAME) == 0)
linkonce_kind = "prop.";
else
prop_sec_name = strdup (base_name);
+ return prop_sec_name;
+}
+
+
+static asection *
+xtensa_get_property_section (asection *sec, const char *base_name)
+{
+ char *prop_sec_name;
+ asection *prop_sec;
+
+ prop_sec_name = xtensa_property_section_name (sec, base_name);
+ prop_sec = bfd_get_section_by_name_if (sec->owner, prop_sec_name,
+ match_section_group,
+ (void *) elf_group_name (sec));
+ free (prop_sec_name);
+ return prop_sec;
+}
+
+
+asection *
+xtensa_make_property_section (asection *sec, const char *base_name)
+{
+ char *prop_sec_name;
+ asection *prop_sec;
+
/* Check if the section already exists. */
+ prop_sec_name = xtensa_property_section_name (sec, base_name);
prop_sec = bfd_get_section_by_name_if (sec->owner, prop_sec_name,
match_section_group,
- (void *) group_name);
+ (void *) elf_group_name (sec));
/* If not, create it. */
if (! prop_sec)
{
if (! prop_sec)
return 0;
- elf_group_name (prop_sec) = group_name;
+ elf_group_name (prop_sec) = elf_group_name (sec);
}
free (prop_sec_name);
{
if (xtensa_is_insntable_section (sec))
return (XTENSA_PROP_INSN
- | XTENSA_PROP_INSN_NO_TRANSFORM
+ | XTENSA_PROP_NO_TRANSFORM
| XTENSA_PROP_INSN_NO_REORDER);
if (xtensa_is_littable_section (sec))
return (XTENSA_PROP_LITERAL
- | XTENSA_PROP_INSN_NO_TRANSFORM
+ | XTENSA_PROP_NO_TRANSFORM
| XTENSA_PROP_INSN_NO_REORDER);
return 0;
/* Find the corresponding ".got.plt*" section. */
if (sec->name[4] == '\0')
- sgotplt = bfd_get_section_by_name (sec->owner, ".got.plt");
+ sgotplt = bfd_get_linker_section (sec->owner, ".got.plt");
else
{
char got_name[14];
chunk = strtol (&sec->name[5], NULL, 10);
sprintf (got_name, ".got.plt.%u", chunk);
- sgotplt = bfd_get_section_by_name (sec->owner, got_name);
+ sgotplt = bfd_get_linker_section (sec->owner, got_name);
}
BFD_ASSERT (sgotplt);
(*callback) (sec, sec_size, sgotplt, 0, closure);
}
- internal_relocs = retrieve_internal_relocs (abfd, sec,
+ /* Only ELF files are supported for Xtensa. Check here to avoid a segfault
+ when building uclibc, which runs "ld -b binary /dev/null". */
+ if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
+ return ok;
+
+ internal_relocs = retrieve_internal_relocs (abfd, sec,
link_info->keep_memory);
if (internal_relocs == NULL
|| sec->reloc_count == 0)
{ NULL, 0, 0, 0, 0 }
};
\f
+#define ELF_TARGET_ID XTENSA_ELF_DATA
#ifndef ELF_ARCH
-#define TARGET_LITTLE_SYM bfd_elf32_xtensa_le_vec
+#define TARGET_LITTLE_SYM xtensa_elf32_le_vec
#define TARGET_LITTLE_NAME "elf32-xtensa-le"
-#define TARGET_BIG_SYM bfd_elf32_xtensa_be_vec
+#define TARGET_BIG_SYM xtensa_elf32_be_vec
#define TARGET_BIG_NAME "elf32-xtensa-be"
#define ELF_ARCH bfd_arch_xtensa
#define elf_info_to_howto elf_xtensa_info_to_howto_rela
+#define bfd_elf32_mkobject elf_xtensa_mkobject
+
#define bfd_elf32_bfd_merge_private_bfd_data elf_xtensa_merge_private_bfd_data
#define bfd_elf32_new_section_hook elf_xtensa_new_section_hook
#define bfd_elf32_bfd_print_private_bfd_data elf_xtensa_print_private_bfd_data
#define elf_backend_reloc_type_class elf_xtensa_reloc_type_class
#define elf_backend_relocate_section elf_xtensa_relocate_section
#define elf_backend_size_dynamic_sections elf_xtensa_size_dynamic_sections
+#define elf_backend_always_size_sections elf_xtensa_always_size_sections
#define elf_backend_omit_section_dynsym \
((bfd_boolean (*) (bfd *, struct bfd_link_info *, asection *)) bfd_true)
#define elf_backend_special_sections elf_xtensa_special_sections
#define elf_backend_action_discarded elf_xtensa_action_discarded
+#define elf_backend_copy_indirect_symbol elf_xtensa_copy_indirect_symbol
#include "elf32-target.h"