/* 32-bit ELF support for ARM
Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
- 2008 Free Software Foundation, Inc.
+ 2008, 2009 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
MA 02110-1301, USA. */
#include "sysdep.h"
+#include <limits.h>
+
#include "bfd.h"
#include "libiberty.h"
#include "libbfd.h"
static struct elf_backend_data elf32_arm_vxworks_bed;
+static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
+ struct bfd_link_info *link_info,
+ asection *sec,
+ bfd_byte *contents);
+
/* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
in that slot. */
interworkable. */
#define INTERWORK_FLAG(abfd) \
(EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
- || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK))
+ || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
+ || ((abfd)->flags & BFD_LINKER_CREATED))
/* The linker script knows the section names for placement.
The entry_names are used to do simple name mangling on the stubs.
#define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
#define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
-static const bfd_vma arm_long_branch_stub[] =
+enum stub_insn_type
{
- 0xe51ff004, /* ldr pc, [pc, #-4] */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ THUMB16_TYPE = 1,
+ THUMB32_TYPE,
+ ARM_TYPE,
+ DATA_TYPE
+ };
+
+#define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
+/* A bit of a hack. A Thumb conditional branch, in which the proper condition
+ is inserted in arm_build_one_stub(). */
+#define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
+#define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
+#define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
+#define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
+#define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
+#define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
+
+typedef struct
+{
+ bfd_vma data;
+ enum stub_insn_type type;
+ unsigned int r_type;
+ int reloc_addend;
+} insn_sequence;
+
+/* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
+ to reach the stub if necessary. */
+static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
+ {
+ ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
+ available. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
+ {
+ ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
+static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
+ {
+ THUMB16_INSN(0xb401), /* push {r0} */
+ THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
+ THUMB16_INSN(0x4684), /* mov ip, r0 */
+ THUMB16_INSN(0xbc01), /* pop {r0} */
+ THUMB16_INSN(0x4760), /* bx ip */
+ THUMB16_INSN(0xbf00), /* nop */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Thumb -> Thumb long branch stub. Using the stack is not
+ allowed. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
+ {
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
+ available. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
+ {
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
+ DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
+ };
+
+/* V4T Thumb -> ARM short branch stub. Shorter variant of the above
+ one, when the destination is close enough. */
+static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
+ {
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
+ };
+
+/* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
+ blx to reach the stub if necessary. */
+static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
+ {
+ ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
+ ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
+ DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
+ };
+
+/* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
+ blx to reach the stub if necessary. We can not add into pc;
+ it is not guaranteed to mode switch (different in ARMv6 and
+ ARMv7). */
+static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
+ {
+ ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
+ ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_thumb_v4t_long_branch_stub[] =
+/* V4T ARM -> ARM long branch stub, PIC. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
{
- 0xe59fc000, /* ldr ip, [pc, #0] */
- 0xe12fff1c, /* bx ip */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
+ ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_thumb_thumb_long_branch_stub[] =
+/* V4T Thumb -> ARM long branch stub, PIC. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
{
- 0x4e02b540, /* push {r6, lr} */
- /* ldr r6, [pc, #8] */
- 0x473046fe, /* mov lr, pc */
- /* bx r6 */
- 0xbf00bd40, /* pop {r6, pc} */
- /* nop */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
+ ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
+ DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_thumb_arm_v4t_long_branch_stub[] =
+/* Thumb -> Thumb long branch stub, PIC. Used on M-profile
+ architectures. */
+static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
{
- 0x4e03b540, /* push {r6, lr} */
- /* ldr r6, [pc, #12] */
- 0x473046fe, /* mov lr, pc */
- /* bx r6 */
- 0xe8bd4040, /* pop {r6, pc} */
- 0xe12fff1e, /* bx lr */
- 0x00000000, /* dcd R_ARM_ABS32(X) */
+ THUMB16_INSN(0xb401), /* push {r0} */
+ THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
+ THUMB16_INSN(0x46fc), /* mov ip, pc */
+ THUMB16_INSN(0x4484), /* add ip, r0 */
+ THUMB16_INSN(0xbc01), /* pop {r0} */
+ THUMB16_INSN(0x4760), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_thumb_arm_v4t_short_branch_stub[] =
+/* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
+ allowed. */
+static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
{
- 0x46c04778, /* bx pc */
- /* nop */
- 0xea000000, /* b (X) */
+ THUMB16_INSN(0x4778), /* bx pc */
+ THUMB16_INSN(0x46c0), /* nop */
+ ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
+ ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
+ ARM_INSN(0xe12fff1c), /* bx ip */
+ DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
};
-static const bfd_vma arm_pic_long_branch_stub[] =
+/* Cortex-A8 erratum-workaround stubs. */
+
+/* Stub used for conditional branches (which may be beyond +/-1MB away, so we
+ can't use a conditional branch to reach this stub). */
+
+static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
+ {
+ THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
+ THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
+ THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
+ };
+
+/* Stub used for b.w and bl.w instructions. */
+
+static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
+ {
+ THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
+ };
+
+static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
+ {
+ THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
+ };
+
+/* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
+ instruction (which switches to ARM mode) to point to this stub. Jump to the
+ real destination using an ARM-mode branch. */
+
+static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
{
- 0xe59fc000, /* ldr r12, [pc] */
- 0xe08ff00c, /* add pc, pc, ip */
- 0x00000000, /* dcd R_ARM_REL32(X) */
+ ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
};
/* Section name for stubs is the associated section name plus this
string. */
#define STUB_SUFFIX ".stub"
-enum elf32_arm_stub_type
-{
+/* One entry per long/short branch stub defined above. */
+#define DEF_STUBS \
+ DEF_STUB(long_branch_any_any) \
+ DEF_STUB(long_branch_v4t_arm_thumb) \
+ DEF_STUB(long_branch_thumb_only) \
+ DEF_STUB(long_branch_v4t_thumb_thumb) \
+ DEF_STUB(long_branch_v4t_thumb_arm) \
+ DEF_STUB(short_branch_v4t_thumb_arm) \
+ DEF_STUB(long_branch_any_arm_pic) \
+ DEF_STUB(long_branch_any_thumb_pic) \
+ DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
+ DEF_STUB(long_branch_v4t_arm_thumb_pic) \
+ DEF_STUB(long_branch_v4t_thumb_arm_pic) \
+ DEF_STUB(long_branch_thumb_only_pic) \
+ DEF_STUB(a8_veneer_b_cond) \
+ DEF_STUB(a8_veneer_b) \
+ DEF_STUB(a8_veneer_bl) \
+ DEF_STUB(a8_veneer_blx)
+
+#define DEF_STUB(x) arm_stub_##x,
+enum elf32_arm_stub_type {
arm_stub_none,
- arm_stub_long_branch,
- arm_thumb_v4t_stub_long_branch,
- arm_thumb_thumb_stub_long_branch,
- arm_thumb_arm_v4t_stub_long_branch,
- arm_thumb_arm_v4t_stub_short_branch,
- arm_stub_pic_long_branch,
+ DEF_STUBS
+ /* Note the first a8_veneer type */
+ arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
+};
+#undef DEF_STUB
+
+typedef struct
+{
+ const insn_sequence* template;
+ int template_size;
+} stub_def;
+
+#define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
+static const stub_def stub_definitions[] = {
+ {NULL, 0},
+ DEF_STUBS
};
struct elf32_arm_stub_hash_entry
bfd_vma target_value;
asection *target_section;
+ /* Offset to apply to relocation referencing target_value. */
+ bfd_vma target_addend;
+
+ /* The instruction which caused this stub to be generated (only valid for
+ Cortex-A8 erratum workaround stubs at present). */
+ unsigned long orig_insn;
+
+ /* The stub type. */
enum elf32_arm_stub_type stub_type;
+ /* Its encoding size in bytes. */
+ int stub_size;
+ /* Its template. */
+ const insn_sequence *stub_template;
+ /* The size of the template (number of entries). */
+ int stub_template_size;
/* The symbol table entry, if any, that this was derived from. */
struct elf32_arm_link_hash_entry *h;
}
elf32_vfp11_erratum_list;
+typedef enum
+{
+ DELETE_EXIDX_ENTRY,
+ INSERT_EXIDX_CANTUNWIND_AT_END
+}
+arm_unwind_edit_type;
+
+/* A (sorted) list of edits to apply to an unwind table. */
+typedef struct arm_unwind_table_edit
+{
+ arm_unwind_edit_type type;
+ /* Note: we sometimes want to insert an unwind entry corresponding to a
+ section different from the one we're currently writing out, so record the
+ (text) section this edit relates to here. */
+ asection *linked_section;
+ unsigned int index;
+ struct arm_unwind_table_edit *next;
+}
+arm_unwind_table_edit;
+
typedef struct _arm_elf_section_data
{
+ /* Information about mapping symbols. */
struct bfd_elf_section_data elf;
unsigned int mapcount;
unsigned int mapsize;
elf32_arm_section_map *map;
+ /* Information about CPU errata. */
unsigned int erratumcount;
elf32_vfp11_erratum_list *erratumlist;
+ /* Information about unwind tables. */
+ union
+ {
+ /* Unwind info attached to a text section. */
+ struct
+ {
+ asection *arm_exidx_sec;
+ } text;
+
+ /* Unwind info attached to an .ARM.exidx section. */
+ struct
+ {
+ arm_unwind_table_edit *unwind_edit_list;
+ arm_unwind_table_edit *unwind_edit_tail;
+ } exidx;
+ } u;
}
_arm_elf_section_data;
#define elf32_arm_section_data(sec) \
((_arm_elf_section_data *) elf_section_data (sec))
+/* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
+ These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
+ so may be created multiple times: we use an array of these entries whilst
+ relaxing which we can refresh easily, then create stubs for each potentially
+ erratum-triggering instruction once we've settled on a solution. */
+
+struct a8_erratum_fix {
+ bfd *input_bfd;
+ asection *section;
+ bfd_vma offset;
+ bfd_vma addend;
+ unsigned long orig_insn;
+ char *stub_name;
+ enum elf32_arm_stub_type stub_type;
+};
+
+/* A table of relocs applied to branches which might trigger Cortex-A8
+ erratum. */
+
+struct a8_erratum_reloc {
+ bfd_vma from;
+ bfd_vma destination;
+ unsigned int r_type;
+ unsigned char st_type;
+ const char *sym_name;
+ bfd_boolean non_a8_stub;
+};
+
/* The size of the thread control block. */
#define TCB_SIZE 8
veneers. */
bfd_size_type vfp11_erratum_glue_size;
+ /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
+ holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
+ elf32_arm_write_section(). */
+ struct a8_erratum_fix *a8_erratum_fixes;
+ unsigned int num_a8_erratum_fixes;
+
/* An arbitrary input BFD chosen to hold the glue sections. */
bfd * bfd_of_glue_owner;
2 = Generate v4 interworing stubs. */
int fix_v4bx;
+ /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
+ int fix_cortex_a8;
+
/* Nonzero if the ARM/Thumb BLX instructions are available for use. */
int use_blx;
bfd_vma offset;
} tls_ldm_got;
- /* Small local sym to section mapping cache. */
- struct sym_sec_cache sym_sec;
+ /* Small local sym cache. */
+ struct sym_cache sym_cache;
/* For convenience in allocate_dynrelocs. */
bfd * obfd;
eh->stub_offset = 0;
eh->target_value = 0;
eh->target_section = NULL;
+ eh->target_addend = 0;
+ eh->orig_insn = 0;
eh->stub_type = arm_stub_none;
+ eh->stub_size = 0;
+ eh->stub_template = NULL;
+ eh->stub_template_size = 0;
eh->h = NULL;
eh->id_sec = NULL;
+ eh->output_name = NULL;
}
return entry;
if (!htab->sgot || !htab->sgotplt)
abort ();
- htab->srelgot = bfd_make_section_with_flags (dynobj,
- RELOC_SECTION (htab, ".got"),
- (SEC_ALLOC | SEC_LOAD
- | SEC_HAS_CONTENTS
- | SEC_IN_MEMORY
- | SEC_LINKER_CREATED
- | SEC_READONLY));
- if (htab->srelgot == NULL
- || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2))
+ htab->srelgot = bfd_get_section_by_name (dynobj,
+ RELOC_SECTION (htab, ".got"));
+ if (htab->srelgot == NULL)
return FALSE;
return TRUE;
}
ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
ret->vfp11_erratum_glue_size = 0;
ret->num_vfp11_fixes = 0;
+ ret->fix_cortex_a8 = 0;
ret->bfd_of_glue_owner = NULL;
ret->byteswap_code = 0;
ret->target1_is_rel = 0;
ret->vxworks_p = 0;
ret->symbian_p = 0;
ret->use_rel = 1;
- ret->sym_sec.abfd = NULL;
+ ret->sym_cache.abfd = NULL;
ret->obfd = abfd;
ret->tls_ldm_got.refcount = 0;
ret->stub_bfd = NULL;
return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
}
+/* Determine what kind of NOPs are available. */
+
+static bfd_boolean
+arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
+{
+ const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
+ Tag_CPU_arch);
+ return arch == TAG_CPU_ARCH_V6T2
+ || arch == TAG_CPU_ARCH_V6K
+ || arch == TAG_CPU_ARCH_V7;
+}
+
+static bfd_boolean
+arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
+{
+ const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
+ Tag_CPU_arch);
+ return arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7;
+}
+
static bfd_boolean
arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
{
switch (stub_type)
{
- case arm_thumb_thumb_stub_long_branch:
- case arm_thumb_arm_v4t_stub_long_branch:
- case arm_thumb_arm_v4t_stub_short_branch:
+ case arm_stub_long_branch_thumb_only:
+ case arm_stub_long_branch_v4t_thumb_arm:
+ case arm_stub_short_branch_v4t_thumb_arm:
+ case arm_stub_long_branch_v4t_thumb_arm_pic:
+ case arm_stub_long_branch_thumb_only_pic:
return TRUE;
case arm_stub_none:
BFD_FAIL ();
int thumb2;
int thumb_only;
enum elf32_arm_stub_type stub_type = arm_stub_none;
+ int use_plt = 0;
/* We don't know the actual type of destination in case it is of
type STT_SECTION: give up. */
r_type = ELF32_R_TYPE (rel->r_info);
- /* If the call will go through a PLT entry then we do not need
- glue. */
+ /* Keep a simpler condition, for the sake of clarity. */
if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
- return stub_type;
-
- if (r_type == R_ARM_THM_CALL)
{
+ use_plt = 1;
+ /* Note when dealing with PLT entries: the main PLT stub is in
+ ARM mode, so if the branch is in Thumb mode, another
+ Thumb->ARM stub will be inserted later just before the ARM
+ PLT stub. We don't take this extra distance into account
+ here, because if a long branch stub is needed, we'll add a
+ Thumb->Arm one and branch directly to the ARM PLT entry
+ because it avoids spreading offset corrections in several
+ places. */
+ }
+
+ if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
+ {
+ /* Handle cases where:
+ - this call goes too far (different Thumb/Thumb2 max
+ distance)
+ - it's a Thumb->Arm call and blx is not available, or it's a
+ Thumb->Arm branch (not bl). A stub is needed in this case,
+ but only if this call is not through a PLT entry. Indeed,
+ PLT stubs handle mode switching already.
+ */
if ((!thumb2
&& (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
|| (thumb2
&& (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
|| (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
- || ((st_type != STT_ARM_TFUNC) && !globals->use_blx))
+ || ((st_type != STT_ARM_TFUNC)
+ && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
+ || (r_type == R_ARM_THM_JUMP24))
+ && !use_plt))
{
if (st_type == STT_ARM_TFUNC)
{
if (!thumb_only)
{
stub_type = (info->shared | globals->pic_veneer)
- ? ((globals->use_blx)
- ? arm_stub_pic_long_branch
- : arm_stub_none)
- : (globals->use_blx)
- ? arm_stub_long_branch
- : arm_stub_none;
+ /* PIC stubs. */
+ ? ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. Stub starts with ARM code, so
+ we must be able to switch mode before
+ reaching it, which is only possible for 'bl'
+ (ie R_ARM_THM_CALL relocation). */
+ ? arm_stub_long_branch_any_thumb_pic
+ /* On V4T, use Thumb code only. */
+ : arm_stub_long_branch_v4t_thumb_thumb_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_thumb_thumb);
}
else
{
stub_type = (info->shared | globals->pic_veneer)
- ? arm_stub_none
- : (globals->use_blx)
- ? arm_thumb_thumb_stub_long_branch
- : arm_stub_none;
+ /* PIC stub. */
+ ? arm_stub_long_branch_thumb_only_pic
+ /* non-PIC stub. */
+ : arm_stub_long_branch_thumb_only;
}
}
else
}
stub_type = (info->shared | globals->pic_veneer)
- ? ((globals->use_blx)
- ? arm_stub_pic_long_branch
- : arm_stub_none)
- : (globals->use_blx)
- ? arm_stub_long_branch
- : arm_thumb_arm_v4t_stub_long_branch;
+ /* PIC stubs. */
+ ? ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_arm_pic
+ /* V4T PIC stub. */
+ : arm_stub_long_branch_v4t_thumb_arm_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx
+ && (r_type ==R_ARM_THM_CALL))
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_thumb_arm);
/* Handle v4t short branches. */
- if ((stub_type == arm_thumb_arm_v4t_stub_long_branch)
+ if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
&& (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
&& (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
- stub_type = arm_thumb_arm_v4t_stub_short_branch;
+ stub_type = arm_stub_short_branch_v4t_thumb_arm;
}
}
}
- else if (r_type == R_ARM_CALL)
+ else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
{
if (st_type == STT_ARM_TFUNC)
{
{
(*_bfd_error_handler)
(_("%B(%s): warning: interworking not enabled.\n"
- " first occurrence: %B: Thumb call to ARM"),
+ " first occurrence: %B: ARM call to Thumb"),
sym_sec->owner, input_bfd, name);
}
the mode change (bit 24 (H) of BLX encoding). */
if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
|| (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
- || !globals->use_blx)
+ || ((r_type == R_ARM_CALL) && !globals->use_blx)
+ || (r_type == R_ARM_JUMP24)
+ || (r_type == R_ARM_PLT32))
{
stub_type = (info->shared | globals->pic_veneer)
- ? arm_stub_pic_long_branch
- : (globals->use_blx)
- ? arm_stub_long_branch
- : arm_thumb_v4t_stub_long_branch;
+ /* PIC stubs. */
+ ? ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_thumb_pic
+ /* V4T stub. */
+ : arm_stub_long_branch_v4t_arm_thumb_pic)
+
+ /* non-PIC stubs. */
+ : ((globals->use_blx)
+ /* V5T and above. */
+ ? arm_stub_long_branch_any_any
+ /* V4T. */
+ : arm_stub_long_branch_v4t_arm_thumb);
}
}
else
|| (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
{
stub_type = (info->shared | globals->pic_veneer)
- ? arm_stub_pic_long_branch
- : arm_stub_long_branch;
+ /* PIC stubs. */
+ ? arm_stub_long_branch_any_arm_pic
+ /* non-PIC stubs. */
+ : arm_stub_long_branch_any_any;
}
}
}
return stub_entry;
}
-/* Add a new stub entry to the stub hash. Not all fields of the new
- stub entry are initialised. */
+/* Find or create a stub section. Returns a pointer to the stub section, and
+ the section to which the stub section will be attached (in *LINK_SEC_P).
+ LINK_SEC_P may be NULL. */
-static struct elf32_arm_stub_hash_entry *
-elf32_arm_add_stub (const char *stub_name,
- asection *section,
- struct elf32_arm_link_hash_table *htab)
+static asection *
+elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
+ struct elf32_arm_link_hash_table *htab)
{
asection *link_sec;
asection *stub_sec;
- struct elf32_arm_stub_hash_entry *stub_entry;
link_sec = htab->stub_group[section->id].link_sec;
stub_sec = htab->stub_group[section->id].stub_sec;
}
htab->stub_group[section->id].stub_sec = stub_sec;
}
+
+ if (link_sec_p)
+ *link_sec_p = link_sec;
+
+ return stub_sec;
+}
+
+/* Add a new stub entry to the stub hash. Not all fields of the new
+ stub entry are initialised. */
+
+static struct elf32_arm_stub_hash_entry *
+elf32_arm_add_stub (const char *stub_name,
+ asection *section,
+ struct elf32_arm_link_hash_table *htab)
+{
+ asection *link_sec;
+ asection *stub_sec;
+ struct elf32_arm_stub_hash_entry *stub_entry;
+
+ stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
+ if (stub_sec == NULL)
+ return NULL;
/* Enter this entry into the linker stub hash table. */
stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
bfd_putb16 (val, ptr);
}
+static bfd_reloc_status_type elf32_arm_final_link_relocate
+ (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
+ Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
+ const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
+
static bfd_boolean
arm_build_one_stub (struct bfd_hash_entry *gen_entry,
void * in_arg)
{
+#define MAXRELOCS 2
struct elf32_arm_stub_hash_entry *stub_entry;
struct bfd_link_info *info;
struct elf32_arm_link_hash_table *htab;
bfd_vma sym_value;
int template_size;
int size;
- const bfd_vma *template;
+ const insn_sequence *template;
int i;
struct elf32_arm_link_hash_table * globals;
+ int stub_reloc_idx[MAXRELOCS] = {-1, -1};
+ int stub_reloc_offset[MAXRELOCS] = {0, 0};
+ int nrelocs = 0;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
htab = elf32_arm_hash_table (info);
stub_sec = stub_entry->stub_sec;
+ if ((htab->fix_cortex_a8 < 0)
+ != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
+ /* We have to do the a8 fixes last, as they are less aligned than
+ the other veneers. */
+ return TRUE;
+
/* Make a note of the offset within the stubs for this entry. */
stub_entry->stub_offset = stub_sec->size;
loc = stub_sec->contents + stub_entry->stub_offset;
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
- switch (stub_entry->stub_type)
- {
- case arm_stub_long_branch:
- template = arm_long_branch_stub;
- template_size = (sizeof (arm_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_v4t_stub_long_branch:
- template = arm_thumb_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_thumb_stub_long_branch:
- template = arm_thumb_thumb_long_branch_stub;
- template_size = (sizeof (arm_thumb_thumb_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- template = arm_thumb_arm_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_arm_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
- template = arm_thumb_arm_v4t_short_branch_stub;
- template_size = (sizeof(arm_thumb_arm_v4t_short_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_stub_pic_long_branch:
- template = arm_pic_long_branch_stub;
- template_size = (sizeof (arm_pic_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- default:
- BFD_FAIL ();
- return FALSE;
- }
+ template = stub_entry->stub_template;
+ template_size = stub_entry->stub_template_size;
size = 0;
- for (i = 0; i < (template_size / 4); i++)
+ for (i = 0; i < template_size; i++)
{
- /* A 0 pattern is a placeholder, every other pattern is an
- instruction. */
- if (template[i] != 0)
- put_arm_insn (globals, stub_bfd, template[i], loc + size);
- else
- bfd_put_32 (stub_bfd, template[i], loc + size);
+ switch (template[i].type)
+ {
+ case THUMB16_TYPE:
+ {
+ bfd_vma data = template[i].data;
+ if (template[i].reloc_addend != 0)
+ {
+ /* We've borrowed the reloc_addend field to mean we should
+ insert a condition code into this (Thumb-1 branch)
+ instruction. See THUMB16_BCOND_INSN. */
+ BFD_ASSERT ((data & 0xff00) == 0xd000);
+ data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
+ }
+ put_thumb_insn (globals, stub_bfd, data, loc + size);
+ size += 2;
+ }
+ break;
+
+ case THUMB32_TYPE:
+ put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
+ loc + size);
+ put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
+ loc + size + 2);
+ if (template[i].r_type != R_ARM_NONE)
+ {
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ }
+ size += 4;
+ break;
+
+ case ARM_TYPE:
+ put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
+ /* Handle cases where the target is encoded within the
+ instruction. */
+ if (template[i].r_type == R_ARM_JUMP24)
+ {
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ }
+ size += 4;
+ break;
+
+ case DATA_TYPE:
+ bfd_put_32 (stub_bfd, template[i].data, loc + size);
+ stub_reloc_idx[nrelocs] = i;
+ stub_reloc_offset[nrelocs++] = size;
+ size += 4;
+ break;
- size += 4;
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
}
+
stub_sec->size += size;
+ /* Stub size has already been computed in arm_size_one_stub. Check
+ consistency. */
+ BFD_ASSERT (size == stub_entry->stub_size);
+
/* Destination is Thumb. Force bit 0 to 1 to reflect this. */
if (stub_entry->st_type == STT_ARM_TFUNC)
sym_value |= 1;
- switch (stub_entry->stub_type)
- {
- case arm_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 4, sym_value, 0);
- break;
- case arm_thumb_v4t_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 8, sym_value, 0);
- break;
- case arm_thumb_thumb_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 12, sym_value, 0);
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_ABS32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 16, sym_value, 0);
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
+ /* Assume there is at least one and at most MAXRELOCS entries to relocate
+ in each stub. */
+ BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
+
+ for (i = 0; i < nrelocs; i++)
+ if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
+ || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
+ || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
+ || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
{
- long int rel_offset;
- static const insn32 t2a3_b_insn = 0xea000000;
+ Elf_Internal_Rela rel;
+ bfd_boolean unresolved_reloc;
+ char *error_message;
+ int sym_flags
+ = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
+ ? STT_ARM_TFUNC : 0;
+ bfd_vma points_to = sym_value + stub_entry->target_addend;
+
+ rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
+ rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
+ rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
+
+ if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
+ /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
+ template should refer back to the instruction after the original
+ branch. */
+ points_to = sym_value;
+
+ /* There may be unintended consequences if this is not true. */
+ BFD_ASSERT (stub_entry->h == NULL);
+
+ /* Note: _bfd_final_link_relocate doesn't handle these relocations
+ properly. We should probably use this function unconditionally,
+ rather than only for certain relocations listed in the enclosing
+ conditional, for the sake of consistency. */
+ elf32_arm_final_link_relocate (elf32_arm_howto_from_type
+ (template[stub_reloc_idx[i]].r_type),
+ stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
+ points_to, info, stub_entry->target_section, "", sym_flags,
+ (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
+ &error_message);
+ }
+ else
+ {
+ _bfd_final_link_relocate (elf32_arm_howto_from_type
+ (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
+ stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
+ sym_value + stub_entry->target_addend,
+ template[stub_reloc_idx[i]].reloc_addend);
+ }
- rel_offset = sym_value - (stub_addr + 8 + 4);
+ return TRUE;
+#undef MAXRELOCS
+}
- put_arm_insn (globals, stub_bfd,
- (bfd_vma) t2a3_b_insn | ((rel_offset >> 2) & 0x00FFFFFF),
- loc + 4);
- }
- break;
+/* Calculate the template, template size and instruction size for a stub.
+ Return value is the instruction size. */
- case arm_stub_pic_long_branch:
- /* We want the value relative to the address 8 bytes from the
- start of the stub. */
- _bfd_final_link_relocate (elf32_arm_howto_from_type (R_ARM_REL32),
- stub_bfd, stub_sec, stub_sec->contents,
- stub_entry->stub_offset + 8, sym_value, 0);
- break;
- default:
- break;
+static unsigned int
+find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
+ const insn_sequence **stub_template,
+ int *stub_template_size)
+{
+ const insn_sequence *template = NULL;
+ int template_size = 0, i;
+ unsigned int size;
+
+ template = stub_definitions[stub_type].template;
+ template_size = stub_definitions[stub_type].template_size;
+
+ size = 0;
+ for (i = 0; i < template_size; i++)
+ {
+ switch (template[i].type)
+ {
+ case THUMB16_TYPE:
+ size += 2;
+ break;
+
+ case ARM_TYPE:
+ case THUMB32_TYPE:
+ case DATA_TYPE:
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
}
- return TRUE;
+ if (stub_template)
+ *stub_template = template;
+
+ if (stub_template_size)
+ *stub_template_size = template_size;
+
+ return size;
}
/* As above, but don't actually build the stub. Just bump offset so
{
struct elf32_arm_stub_hash_entry *stub_entry;
struct elf32_arm_link_hash_table *htab;
- const bfd_vma *template;
- int template_size;
- int size;
- int i;
+ const insn_sequence *template;
+ int template_size, size;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
htab = (struct elf32_arm_link_hash_table *) in_arg;
- switch (stub_entry->stub_type)
- {
- case arm_stub_long_branch:
- template = arm_long_branch_stub;
- template_size = (sizeof (arm_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_v4t_stub_long_branch:
- template = arm_thumb_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_thumb_stub_long_branch:
- template = arm_thumb_thumb_long_branch_stub;
- template_size = (sizeof (arm_thumb_thumb_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- template = arm_thumb_arm_v4t_long_branch_stub;
- template_size = (sizeof (arm_thumb_arm_v4t_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
- template = arm_thumb_arm_v4t_short_branch_stub;
- template_size = (sizeof(arm_thumb_arm_v4t_short_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- case arm_stub_pic_long_branch:
- template = arm_pic_long_branch_stub;
- template_size = (sizeof (arm_pic_long_branch_stub) / sizeof (bfd_vma)) * 4;
- break;
- default:
- BFD_FAIL ();
- return FALSE;
- break;
- }
+ BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
+ && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
+
+ size = find_stub_size_and_template (stub_entry->stub_type, &template,
+ &template_size);
+
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template;
+ stub_entry->stub_template_size = template_size;
- size = 0;
- for (i = 0; i < (template_size / 4); i++)
- size += 4;
size = (size + 7) & ~7;
stub_entry->stub_sec->size += size;
+
return TRUE;
}
/* Steal the link_sec pointer for our list. */
#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
/* This happens to make the list in reverse order,
- which is what we want. */
+ which we reverse later. */
PREV_SEC (isec) = *list;
*list = isec;
}
/* See whether we can group stub sections together. Grouping stub
sections may result in fewer stubs. More importantly, we need to
- put all .init* and .fini* stubs at the beginning of the .init or
+ put all .init* and .fini* stubs at the end of the .init or
.fini output sections respectively, because glibc splits the
_init and _fini functions into multiple parts. Putting a stub in
the middle of a function is not a good idea. */
static void
group_sections (struct elf32_arm_link_hash_table *htab,
bfd_size_type stub_group_size,
- bfd_boolean stubs_always_before_branch)
+ bfd_boolean stubs_always_after_branch)
{
- asection **list = htab->input_list + htab->top_index;
+ asection **list = htab->input_list;
do
{
asection *tail = *list;
+ asection *head;
if (tail == bfd_abs_section_ptr)
continue;
+ /* Reverse the list: we must avoid placing stubs at the
+ beginning of the section because the beginning of the text
+ section may be required for an interrupt vector in bare metal
+ code. */
+#define NEXT_SEC PREV_SEC
+ head = NULL;
while (tail != NULL)
+ {
+ /* Pop from tail. */
+ asection *item = tail;
+ tail = PREV_SEC (item);
+
+ /* Push on head. */
+ NEXT_SEC (item) = head;
+ head = item;
+ }
+
+ while (head != NULL)
{
asection *curr;
- asection *prev;
- bfd_size_type total;
+ asection *next;
+ bfd_vma stub_group_start = head->output_offset;
+ bfd_vma end_of_next;
- curr = tail;
- total = tail->size;
- while ((prev = PREV_SEC (curr)) != NULL
- && ((total += curr->output_offset - prev->output_offset)
- < stub_group_size))
- curr = prev;
+ curr = head;
+ while (NEXT_SEC (curr) != NULL)
+ {
+ next = NEXT_SEC (curr);
+ end_of_next = next->output_offset + next->size;
+ if (end_of_next - stub_group_start >= stub_group_size)
+ /* End of NEXT is too far from start, so stop. */
+ break;
+ /* Add NEXT to the group. */
+ curr = next;
+ }
- /* OK, the size from the start of CURR to the end is less
+ /* OK, the size from the start to the start of CURR is less
than stub_group_size and thus can be handled by one stub
- section. (Or the tail section is itself larger than
+ section. (Or the head section is itself larger than
stub_group_size, in which case we may be toast.)
We should really be keeping track of the total size of
stubs added here, as stubs contribute to the final output
section size. */
do
{
- prev = PREV_SEC (tail);
+ next = NEXT_SEC (head);
/* Set up this stub group. */
- htab->stub_group[tail->id].link_sec = curr;
+ htab->stub_group[head->id].link_sec = curr;
}
- while (tail != curr && (tail = prev) != NULL);
+ while (head != curr && (head = next) != NULL);
/* But wait, there's more! Input sections up to stub_group_size
- bytes before the stub section can be handled by it too. */
- if (!stubs_always_before_branch)
+ bytes after the stub section can be handled by it too. */
+ if (!stubs_always_after_branch)
{
- total = 0;
- while (prev != NULL
- && ((total += tail->output_offset - prev->output_offset)
- < stub_group_size))
+ stub_group_start = curr->output_offset + curr->size;
+
+ while (next != NULL)
{
- tail = prev;
- prev = PREV_SEC (tail);
- htab->stub_group[tail->id].link_sec = curr;
+ end_of_next = next->output_offset + next->size;
+ if (end_of_next - stub_group_start >= stub_group_size)
+ /* End of NEXT is too far from stubs, so stop. */
+ break;
+ /* Add NEXT to the stub group. */
+ head = next;
+ next = NEXT_SEC (head);
+ htab->stub_group[head->id].link_sec = curr;
}
}
- tail = prev;
+ head = next;
}
}
- while (list-- != htab->input_list);
+ while (list++ != htab->input_list + htab->top_index);
free (htab->input_list);
#undef PREV_SEC
+#undef NEXT_SEC
}
-/* Determine and set the size of the stub section for a final link.
-
- The basic idea here is to examine all the relocations looking for
- PC-relative calls to a target that is unreachable with a "bl"
- instruction. */
+/* Comparison function for sorting/searching relocations relating to Cortex-A8
+ erratum fix. */
-bfd_boolean
-elf32_arm_size_stubs (bfd *output_bfd,
- bfd *stub_bfd,
- struct bfd_link_info *info,
- bfd_signed_vma group_size,
- asection * (*add_stub_section) (const char *, asection *),
- void (*layout_sections_again) (void))
+static int
+a8_reloc_compare (const void *a, const void *b)
{
- bfd_size_type stub_group_size;
- bfd_boolean stubs_always_before_branch;
- bfd_boolean stub_changed = 0;
- struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
-
- /* Propagate mach to stub bfd, because it may not have been
- finalized when we created stub_bfd. */
- bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
- bfd_get_mach (output_bfd));
+ const struct a8_erratum_reloc *ra = a, *rb = b;
- /* Stash our params away. */
- htab->stub_bfd = stub_bfd;
- htab->add_stub_section = add_stub_section;
- htab->layout_sections_again = layout_sections_again;
- stubs_always_before_branch = group_size < 0;
- if (group_size < 0)
- stub_group_size = -group_size;
+ if (ra->from < rb->from)
+ return -1;
+ else if (ra->from > rb->from)
+ return 1;
else
- stub_group_size = group_size;
+ return 0;
+}
- if (stub_group_size == 1)
- {
- /* Default values. */
- /* Thumb branch range is +-4MB has to be used as the default
- maximum size (a given section can contain both ARM and Thumb
- code, so the worst case has to be taken into account).
+static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
+ const char *, char **);
+
+/* Helper function to scan code for sequences which might trigger the Cortex-A8
+ branch/TLB erratum. Fill in the table described by A8_FIXES_P,
+ NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
+ otherwise. */
+
+static bfd_boolean
+cortex_a8_erratum_scan (bfd *input_bfd,
+ struct bfd_link_info *info,
+ struct a8_erratum_fix **a8_fixes_p,
+ unsigned int *num_a8_fixes_p,
+ unsigned int *a8_fix_table_size_p,
+ struct a8_erratum_reloc *a8_relocs,
+ unsigned int num_a8_relocs,
+ unsigned prev_num_a8_fixes,
+ bfd_boolean *stub_changed_p)
+{
+ asection *section;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+ struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
+ unsigned int num_a8_fixes = *num_a8_fixes_p;
+ unsigned int a8_fix_table_size = *a8_fix_table_size_p;
+
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ bfd_byte *contents = NULL;
+ struct _arm_elf_section_data *sec_data;
+ unsigned int span;
+ bfd_vma base_vma;
+
+ if (elf_section_type (section) != SHT_PROGBITS
+ || (elf_section_flags (section) & SHF_EXECINSTR) == 0
+ || (section->flags & SEC_EXCLUDE) != 0
+ || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
+ || (section->output_section == bfd_abs_section_ptr))
+ continue;
+
+ base_vma = section->output_section->vma + section->output_offset;
+
+ if (elf_section_data (section)->this_hdr.contents != NULL)
+ contents = elf_section_data (section)->this_hdr.contents;
+ else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
+ return TRUE;
+
+ sec_data = elf32_arm_section_data (section);
+
+ for (span = 0; span < sec_data->mapcount; span++)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = (span == sec_data->mapcount - 1)
+ ? section->size : sec_data->map[span + 1].vma;
+ unsigned int i;
+ char span_type = sec_data->map[span].type;
+ bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
+
+ if (span_type != 't')
+ continue;
+
+ /* Span is entirely within a single 4KB region: skip scanning. */
+ if (((base_vma + span_start) & ~0xfff)
+ == ((base_vma + span_end) & ~0xfff))
+ continue;
+
+ /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
+
+ * The opcode is BLX.W, BL.W, B.W, Bcc.W
+ * The branch target is in the same 4KB region as the
+ first half of the branch.
+ * The instruction before the branch is a 32-bit
+ length non-branch instruction. */
+ for (i = span_start; i < span_end;)
+ {
+ unsigned int insn = bfd_getl16 (&contents[i]);
+ bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
+ bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
+
+ if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
+ insn_32bit = TRUE;
+
+ if (insn_32bit)
+ {
+ /* Load the rest of the insn (in manual-friendly order). */
+ insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
+
+ /* Encoding T4: B<c>.W. */
+ is_b = (insn & 0xf800d000) == 0xf0009000;
+ /* Encoding T1: BL<c>.W. */
+ is_bl = (insn & 0xf800d000) == 0xf000d000;
+ /* Encoding T2: BLX<c>.W. */
+ is_blx = (insn & 0xf800d000) == 0xf000c000;
+ /* Encoding T3: B<c>.W (not permitted in IT block). */
+ is_bcc = (insn & 0xf800d000) == 0xf0008000
+ && (insn & 0x07f00000) != 0x03800000;
+ }
+
+ is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
+
+ if (((base_vma + i) & 0xfff) == 0xffe
+ && insn_32bit
+ && is_32bit_branch
+ && last_was_32bit
+ && ! last_was_branch)
+ {
+ bfd_signed_vma offset;
+ bfd_boolean force_target_arm = FALSE;
+ bfd_boolean force_target_thumb = FALSE;
+ bfd_vma target;
+ enum elf32_arm_stub_type stub_type = arm_stub_none;
+ struct a8_erratum_reloc key, *found;
+
+ key.from = base_vma + i;
+ found = bsearch (&key, a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
+
+ if (found)
+ {
+ char *error_message = NULL;
+ struct elf_link_hash_entry *entry;
+
+ /* We don't care about the error returned from this
+ function, only if there is glue or not. */
+ entry = find_thumb_glue (info, found->sym_name,
+ &error_message);
+
+ if (entry)
+ found->non_a8_stub = TRUE;
+
+ if (found->r_type == R_ARM_THM_CALL
+ && found->st_type != STT_ARM_TFUNC)
+ force_target_arm = TRUE;
+ else if (found->r_type == R_ARM_THM_CALL
+ && found->st_type == STT_ARM_TFUNC)
+ force_target_thumb = TRUE;
+ }
+
+ /* Check if we have an offending branch instruction. */
+
+ if (found && found->non_a8_stub)
+ /* We've already made a stub for this instruction, e.g.
+ it's a long branch or a Thumb->ARM stub. Assume that
+ stub will suffice to work around the A8 erratum (see
+ setting of always_after_branch above). */
+ ;
+ else if (is_bcc)
+ {
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3f0000) >> 4;
+ offset |= (insn & 0x2000) ? 0x40000 : 0;
+ offset |= (insn & 0x800) ? 0x80000 : 0;
+ offset |= (insn & 0x4000000) ? 0x100000 : 0;
+ if (offset & 0x100000)
+ offset |= ~ ((bfd_signed_vma) 0xfffff);
+ stub_type = arm_stub_a8_veneer_b_cond;
+ }
+ else if (is_b || is_bl || is_blx)
+ {
+ int s = (insn & 0x4000000) != 0;
+ int j1 = (insn & 0x2000) != 0;
+ int j2 = (insn & 0x800) != 0;
+ int i1 = !(j1 ^ s);
+ int i2 = !(j2 ^ s);
+
+ offset = (insn & 0x7ff) << 1;
+ offset |= (insn & 0x3ff0000) >> 4;
+ offset |= i2 << 22;
+ offset |= i1 << 23;
+ offset |= s << 24;
+ if (offset & 0x1000000)
+ offset |= ~ ((bfd_signed_vma) 0xffffff);
+
+ if (is_blx)
+ offset &= ~ ((bfd_signed_vma) 3);
+
+ stub_type = is_blx ? arm_stub_a8_veneer_blx :
+ is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
+ }
+
+ if (stub_type != arm_stub_none)
+ {
+ bfd_vma pc_for_insn = base_vma + i + 4;
+
+ /* The original instruction is a BL, but the target is
+ an ARM instruction. If we were not making a stub,
+ the BL would have been converted to a BLX. Use the
+ BLX stub instead in that case. */
+ if (htab->use_blx && force_target_arm
+ && stub_type == arm_stub_a8_veneer_bl)
+ {
+ stub_type = arm_stub_a8_veneer_blx;
+ is_blx = TRUE;
+ is_bl = FALSE;
+ }
+ /* Conversely, if the original instruction was
+ BLX but the target is Thumb mode, use the BL
+ stub. */
+ else if (force_target_thumb
+ && stub_type == arm_stub_a8_veneer_blx)
+ {
+ stub_type = arm_stub_a8_veneer_bl;
+ is_blx = FALSE;
+ is_bl = TRUE;
+ }
+
+ if (is_blx)
+ pc_for_insn &= ~ ((bfd_vma) 3);
+
+ /* If we found a relocation, use the proper destination,
+ not the offset in the (unrelocated) instruction.
+ Note this is always done if we switched the stub type
+ above. */
+ if (found)
+ offset =
+ (bfd_signed_vma) (found->destination - pc_for_insn);
+
+ target = pc_for_insn + offset;
+
+ /* The BLX stub is ARM-mode code. Adjust the offset to
+ take the different PC value (+8 instead of +4) into
+ account. */
+ if (stub_type == arm_stub_a8_veneer_blx)
+ offset += 4;
+
+ if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
+ {
+ char *stub_name = NULL;
+
+ if (num_a8_fixes == a8_fix_table_size)
+ {
+ a8_fix_table_size *= 2;
+ a8_fixes = bfd_realloc (a8_fixes,
+ sizeof (struct a8_erratum_fix)
+ * a8_fix_table_size);
+ }
+
+ if (num_a8_fixes < prev_num_a8_fixes)
+ {
+ /* If we're doing a subsequent scan,
+ check if we've found the same fix as
+ before, and try and reuse the stub
+ name. */
+ stub_name = a8_fixes[num_a8_fixes].stub_name;
+ if ((a8_fixes[num_a8_fixes].section != section)
+ || (a8_fixes[num_a8_fixes].offset != i))
+ {
+ free (stub_name);
+ stub_name = NULL;
+ *stub_changed_p = TRUE;
+ }
+ }
+
+ if (!stub_name)
+ {
+ stub_name = bfd_malloc (8 + 1 + 8 + 1);
+ if (stub_name != NULL)
+ sprintf (stub_name, "%x:%x", section->id, i);
+ }
+
+ a8_fixes[num_a8_fixes].input_bfd = input_bfd;
+ a8_fixes[num_a8_fixes].section = section;
+ a8_fixes[num_a8_fixes].offset = i;
+ a8_fixes[num_a8_fixes].addend = offset;
+ a8_fixes[num_a8_fixes].orig_insn = insn;
+ a8_fixes[num_a8_fixes].stub_name = stub_name;
+ a8_fixes[num_a8_fixes].stub_type = stub_type;
+
+ num_a8_fixes++;
+ }
+ }
+ }
+
+ i += insn_32bit ? 4 : 2;
+ last_was_32bit = insn_32bit;
+ last_was_branch = is_32bit_branch;
+ }
+ }
+
+ if (elf_section_data (section)->this_hdr.contents == NULL)
+ free (contents);
+ }
+
+ *a8_fixes_p = a8_fixes;
+ *num_a8_fixes_p = num_a8_fixes;
+ *a8_fix_table_size_p = a8_fix_table_size;
+
+ return FALSE;
+}
+
+/* Determine and set the size of the stub section for a final link.
+
+ The basic idea here is to examine all the relocations looking for
+ PC-relative calls to a target that is unreachable with a "bl"
+ instruction. */
+
+bfd_boolean
+elf32_arm_size_stubs (bfd *output_bfd,
+ bfd *stub_bfd,
+ struct bfd_link_info *info,
+ bfd_signed_vma group_size,
+ asection * (*add_stub_section) (const char *, asection *),
+ void (*layout_sections_again) (void))
+{
+ bfd_size_type stub_group_size;
+ bfd_boolean stubs_always_after_branch;
+ struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
+ struct a8_erratum_fix *a8_fixes = NULL;
+ unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
+ struct a8_erratum_reloc *a8_relocs = NULL;
+ unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
+
+ if (htab->fix_cortex_a8)
+ {
+ a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
+ * a8_fix_table_size);
+ a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
+ * a8_reloc_table_size);
+ }
+
+ /* Propagate mach to stub bfd, because it may not have been
+ finalized when we created stub_bfd. */
+ bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
+ bfd_get_mach (output_bfd));
+
+ /* Stash our params away. */
+ htab->stub_bfd = stub_bfd;
+ htab->add_stub_section = add_stub_section;
+ htab->layout_sections_again = layout_sections_again;
+ stubs_always_after_branch = group_size < 0;
+
+ /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
+ as the first half of a 32-bit branch straddling two 4K pages. This is a
+ crude way of enforcing that. */
+ if (htab->fix_cortex_a8)
+ stubs_always_after_branch = 1;
+
+ if (group_size < 0)
+ stub_group_size = -group_size;
+ else
+ stub_group_size = group_size;
+
+ if (stub_group_size == 1)
+ {
+ /* Default values. */
+ /* Thumb branch range is +-4MB has to be used as the default
+ maximum size (a given section can contain both ARM and Thumb
+ code, so the worst case has to be taken into account).
This value is 24K less than that, which allows for 2025
12-byte stubs. If we exceed that, then we will fail to link.
stub_group_size = 4170000;
}
- group_sections (htab, stub_group_size, stubs_always_before_branch);
+ group_sections (htab, stub_group_size, stubs_always_after_branch);
+
+ /* If we're applying the cortex A8 fix, we need to determine the
+ program header size now, because we cannot change it later --
+ that could alter section placements. Notice the A8 erratum fix
+ ends up requiring the section addresses to remain unchanged
+ modulo the page size. That's something we cannot represent
+ inside BFD, and we don't want to force the section alignment to
+ be the page size. */
+ if (htab->fix_cortex_a8)
+ (*htab->layout_sections_again) ();
while (1)
{
bfd *input_bfd;
unsigned int bfd_indx;
asection *stub_sec;
+ bfd_boolean stub_changed = FALSE;
+ unsigned prev_num_a8_fixes = num_a8_fixes;
+ num_a8_fixes = 0;
for (input_bfd = info->input_bfds, bfd_indx = 0;
input_bfd != NULL;
input_bfd = input_bfd->link_next, bfd_indx++)
asection *section;
Elf_Internal_Sym *local_syms = NULL;
+ num_a8_relocs = 0;
+
/* We'll need the symbol table in a second. */
symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
if (symtab_hdr->sh_info == 0)
char *stub_name;
const asection *id_sec;
unsigned char st_type;
+ bfd_boolean created_stub = FALSE;
r_type = ELF32_R_TYPE (irela->r_info);
r_indx = ELF32_R_SYM (irela->r_info);
goto error_ret_free_local;
}
- /* Only look for stubs on call instructions. */
+ /* Only look for stubs on branch instructions. */
if ((r_type != (unsigned int) R_ARM_CALL)
- && (r_type != (unsigned int) R_ARM_THM_CALL))
+ && (r_type != (unsigned int) R_ARM_THM_CALL)
+ && (r_type != (unsigned int) R_ARM_JUMP24)
+ && (r_type != (unsigned int) R_ARM_THM_JUMP19)
+ && (r_type != (unsigned int) R_ARM_THM_XPC22)
+ && (r_type != (unsigned int) R_ARM_THM_JUMP24)
+ && (r_type != (unsigned int) R_ARM_PLT32))
continue;
/* Now determine the call target, its name, value,
sym = local_syms + r_indx;
hdr = elf_elfsections (input_bfd)[sym->st_shndx];
sym_sec = hdr->bfd_section;
+ if (!sym_sec)
+ /* This is an undefined symbol. It can never
+ be resolved. */
+ continue;
+
if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
sym_value = sym->st_value;
destination = (sym_value + irela->r_addend
{
sym_sec = hash->root.root.u.def.section;
sym_value = hash->root.root.u.def.value;
- if (sym_sec->output_section != NULL)
+
+ struct elf32_arm_link_hash_table *globals =
+ elf32_arm_hash_table (info);
+
+ /* For a destination in a shared library,
+ use the PLT stub as target address to
+ decide whether a branch stub is
+ needed. */
+ if (globals->splt != NULL && hash != NULL
+ && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ sym_sec = globals->splt;
+ sym_value = hash->root.plt.offset;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if (sym_sec->output_section != NULL)
destination = (sym_value + irela->r_addend
+ sym_sec->output_offset
+ sym_sec->output_section->vma);
}
- else if (hash->root.root.type == bfd_link_hash_undefweak
- || hash->root.root.type == bfd_link_hash_undefined)
- /* For a shared library, these will need a PLT stub,
- which is treated separately.
- For absolute code, they cannot be handled. */
- continue;
+ else if ((hash->root.root.type == bfd_link_hash_undefined)
+ || (hash->root.root.type == bfd_link_hash_undefweak))
+ {
+ /* For a shared library, use the PLT stub as
+ target address to decide whether a long
+ branch stub is needed.
+ For absolute code, they cannot be handled. */
+ struct elf32_arm_link_hash_table *globals =
+ elf32_arm_hash_table (info);
+
+ if (globals->splt != NULL && hash != NULL
+ && hash->root.plt.offset != (bfd_vma) -1)
+ {
+ sym_sec = globals->splt;
+ sym_value = hash->root.plt.offset;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else
+ continue;
+ }
else
{
bfd_set_error (bfd_error_bad_value);
sym_name = hash->root.root.root.string;
}
- /* Determine what (if any) linker stub is needed. */
- stub_type = arm_type_of_stub (info, section, irela, st_type,
- hash, destination, sym_sec,
- input_bfd, sym_name);
- if (stub_type == arm_stub_none)
- continue;
-
- /* Support for grouping stub sections. */
- id_sec = htab->stub_group[section->id].link_sec;
-
- /* Get the name of this stub. */
- stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela);
- if (!stub_name)
- goto error_ret_free_internal;
-
- stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
- stub_name,
- FALSE, FALSE);
- if (stub_entry != NULL)
- {
- /* The proper stub has already been created. */
- free (stub_name);
- continue;
- }
-
- stub_entry = elf32_arm_add_stub (stub_name, section, htab);
- if (stub_entry == NULL)
- {
- free (stub_name);
- goto error_ret_free_internal;
- }
-
- stub_entry->target_value = sym_value;
- stub_entry->target_section = sym_sec;
- stub_entry->stub_type = stub_type;
- stub_entry->h = hash;
- stub_entry->st_type = st_type;
-
- if (sym_name == NULL)
- sym_name = "unnamed";
- stub_entry->output_name
- = bfd_alloc (htab->stub_bfd,
- sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
- + strlen (sym_name));
- if (stub_entry->output_name == NULL)
+ do
{
- free (stub_name);
- goto error_ret_free_internal;
- }
+ /* Determine what (if any) linker stub is needed. */
+ stub_type = arm_type_of_stub (info, section, irela,
+ st_type, hash,
+ destination, sym_sec,
+ input_bfd, sym_name);
+ if (stub_type == arm_stub_none)
+ break;
+
+ /* Support for grouping stub sections. */
+ id_sec = htab->stub_group[section->id].link_sec;
+
+ /* Get the name of this stub. */
+ stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
+ irela);
+ if (!stub_name)
+ goto error_ret_free_internal;
+
+ /* We've either created a stub for this reloc already,
+ or we are about to. */
+ created_stub = TRUE;
+
+ stub_entry = arm_stub_hash_lookup
+ (&htab->stub_hash_table, stub_name,
+ FALSE, FALSE);
+ if (stub_entry != NULL)
+ {
+ /* The proper stub has already been created. */
+ free (stub_name);
+ stub_entry->target_value = sym_value;
+ break;
+ }
- /* For historical reasons, use the existing names for
- ARM-to-Thumb and Thumb-to-ARM stubs. */
- if (r_type == (unsigned int) R_ARM_THM_CALL
- && st_type != STT_ARM_TFUNC)
- sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME,
- sym_name);
- else if (r_type == (unsigned int) R_ARM_CALL
- && st_type == STT_ARM_TFUNC)
- sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME,
- sym_name);
- else
- sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
- sym_name);
+ stub_entry = elf32_arm_add_stub (stub_name, section,
+ htab);
+ if (stub_entry == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
- stub_changed = TRUE;
+ stub_entry->target_value = sym_value;
+ stub_entry->target_section = sym_sec;
+ stub_entry->stub_type = stub_type;
+ stub_entry->h = hash;
+ stub_entry->st_type = st_type;
+
+ if (sym_name == NULL)
+ sym_name = "unnamed";
+ stub_entry->output_name
+ = bfd_alloc (htab->stub_bfd,
+ sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
+ + strlen (sym_name));
+ if (stub_entry->output_name == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_internal;
+ }
+
+ /* For historical reasons, use the existing names for
+ ARM-to-Thumb and Thumb-to-ARM stubs. */
+ if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
+ || (r_type == (unsigned int) R_ARM_THM_JUMP24))
+ && st_type != STT_ARM_TFUNC)
+ sprintf (stub_entry->output_name,
+ THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
+ else if ( ((r_type == (unsigned int) R_ARM_CALL)
+ || (r_type == (unsigned int) R_ARM_JUMP24))
+ && st_type == STT_ARM_TFUNC)
+ sprintf (stub_entry->output_name,
+ ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
+ else
+ sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
+ sym_name);
+
+ stub_changed = TRUE;
+ }
+ while (0);
+
+ /* Look for relocations which might trigger Cortex-A8
+ erratum. */
+ if (htab->fix_cortex_a8
+ && (r_type == (unsigned int) R_ARM_THM_JUMP24
+ || r_type == (unsigned int) R_ARM_THM_JUMP19
+ || r_type == (unsigned int) R_ARM_THM_CALL
+ || r_type == (unsigned int) R_ARM_THM_XPC22))
+ {
+ bfd_vma from = section->output_section->vma
+ + section->output_offset
+ + irela->r_offset;
+
+ if ((from & 0xfff) == 0xffe)
+ {
+ /* Found a candidate. Note we haven't checked the
+ destination is within 4K here: if we do so (and
+ don't create an entry in a8_relocs) we can't tell
+ that a branch should have been relocated when
+ scanning later. */
+ if (num_a8_relocs == a8_reloc_table_size)
+ {
+ a8_reloc_table_size *= 2;
+ a8_relocs = bfd_realloc (a8_relocs,
+ sizeof (struct a8_erratum_reloc)
+ * a8_reloc_table_size);
+ }
+
+ a8_relocs[num_a8_relocs].from = from;
+ a8_relocs[num_a8_relocs].destination = destination;
+ a8_relocs[num_a8_relocs].r_type = r_type;
+ a8_relocs[num_a8_relocs].st_type = st_type;
+ a8_relocs[num_a8_relocs].sym_name = sym_name;
+ a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
+
+ num_a8_relocs++;
+ }
+ }
}
- /* We're done with the internal relocs, free them. */
- if (elf_section_data (section)->relocs == NULL)
- free (internal_relocs);
+ /* We're done with the internal relocs, free them. */
+ if (elf_section_data (section)->relocs == NULL)
+ free (internal_relocs);
+ }
+
+ if (htab->fix_cortex_a8)
+ {
+ /* Sort relocs which might apply to Cortex-A8 erratum. */
+ qsort (a8_relocs, num_a8_relocs,
+ sizeof (struct a8_erratum_reloc),
+ &a8_reloc_compare);
+
+ /* Scan for branches which might trigger Cortex-A8 erratum. */
+ if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
+ &num_a8_fixes, &a8_fix_table_size,
+ a8_relocs, num_a8_relocs,
+ prev_num_a8_fixes, &stub_changed)
+ != 0)
+ goto error_ret_free_local;
}
}
+ if (prev_num_a8_fixes != num_a8_fixes)
+ stub_changed = TRUE;
+
if (!stub_changed)
break;
for (stub_sec = htab->stub_bfd->sections;
stub_sec != NULL;
stub_sec = stub_sec->next)
- stub_sec->size = 0;
+ {
+ /* Ignore non-stub sections. */
+ if (!strstr (stub_sec->name, STUB_SUFFIX))
+ continue;
+
+ stub_sec->size = 0;
+ }
bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
+ /* Add Cortex-A8 erratum veneers to stub section sizes too. */
+ if (htab->fix_cortex_a8)
+ for (i = 0; i < num_a8_fixes; i++)
+ {
+ stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
+ a8_fixes[i].section, htab);
+
+ if (stub_sec == NULL)
+ goto error_ret_free_local;
+
+ stub_sec->size
+ += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
+ NULL);
+ }
+
+
/* Ask the linker to do its stuff. */
(*htab->layout_sections_again) ();
- stub_changed = FALSE;
}
+ /* Add stubs for Cortex-A8 erratum fixes now. */
+ if (htab->fix_cortex_a8)
+ {
+ for (i = 0; i < num_a8_fixes; i++)
+ {
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ char *stub_name = a8_fixes[i].stub_name;
+ asection *section = a8_fixes[i].section;
+ unsigned int section_id = a8_fixes[i].section->id;
+ asection *link_sec = htab->stub_group[section_id].link_sec;
+ asection *stub_sec = htab->stub_group[section_id].stub_sec;
+ const insn_sequence *template;
+ int template_size, size = 0;
+
+ stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
+ TRUE, FALSE);
+ if (stub_entry == NULL)
+ {
+ (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
+ section->owner,
+ stub_name);
+ return FALSE;
+ }
+
+ stub_entry->stub_sec = stub_sec;
+ stub_entry->stub_offset = 0;
+ stub_entry->id_sec = link_sec;
+ stub_entry->stub_type = a8_fixes[i].stub_type;
+ stub_entry->target_section = a8_fixes[i].section;
+ stub_entry->target_value = a8_fixes[i].offset;
+ stub_entry->target_addend = a8_fixes[i].addend;
+ stub_entry->orig_insn = a8_fixes[i].orig_insn;
+ stub_entry->st_type = STT_ARM_TFUNC;
+
+ size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
+ &template_size);
+
+ stub_entry->stub_size = size;
+ stub_entry->stub_template = template;
+ stub_entry->stub_template_size = template_size;
+ }
+
+ /* Stash the Cortex-A8 erratum fix array for use later in
+ elf32_arm_write_section(). */
+ htab->a8_erratum_fixes = a8_fixes;
+ htab->num_a8_erratum_fixes = num_a8_fixes;
+ }
+ else
+ {
+ htab->a8_erratum_fixes = NULL;
+ htab->num_a8_erratum_fixes = 0;
+ }
return TRUE;
error_ret_free_local:
/* Build the stubs as directed by the stub hash table. */
table = &htab->stub_hash_table;
bfd_hash_traverse (table, arm_build_one_stub, info);
+ if (htab->fix_cortex_a8)
+ {
+ /* Place the cortex a8 stubs last. */
+ htab->fix_cortex_a8 = -1;
+ bfd_hash_traverse (table, arm_build_one_stub, info);
+ }
return TRUE;
}
bfd_byte * contents;
if (size == 0)
- return;
+ {
+ /* Do not include empty glue sections in the output. */
+ if (abfd != NULL)
+ {
+ s = bfd_get_section_by_name (abfd, name);
+ if (s != NULL)
+ s->flags |= SEC_EXCLUDE;
+ }
+ return;
+ }
BFD_ASSERT (abfd != NULL);
return myh;
}
-static void
-record_thumb_to_arm_glue (struct bfd_link_info *link_info,
- struct elf_link_hash_entry *h)
-{
- const char *name = h->root.root.string;
- asection *s;
- char *tmp_name;
- struct elf_link_hash_entry *myh;
- struct bfd_link_hash_entry *bh;
- struct elf32_arm_link_hash_table *hash_table;
- bfd_vma val;
-
- hash_table = elf32_arm_hash_table (link_info);
-
- BFD_ASSERT (hash_table != NULL);
- BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
-
- s = bfd_get_section_by_name
- (hash_table->bfd_of_glue_owner, THUMB2ARM_GLUE_SECTION_NAME);
-
- BFD_ASSERT (s != NULL);
-
- tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
- + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
-
- BFD_ASSERT (tmp_name);
-
- sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
-
- myh = elf_link_hash_lookup
- (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
-
- if (myh != NULL)
- {
- /* We've already seen this guy. */
- free (tmp_name);
- return;
- }
-
- /* The only trick here is using hash_table->thumb_glue_size as the value.
- Even though the section isn't allocated yet, this is where we will be
- putting it. The +1 on the value marks that the stub has not been
- output yet - not that it is a Thumb function. */
- bh = NULL;
- val = hash_table->thumb_glue_size + 1;
- _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
- tmp_name, BSF_GLOBAL, s, val,
- NULL, TRUE, FALSE, &bh);
-
- /* If we mark it 'Thumb', the disassembler will do a better job. */
- myh = (struct elf_link_hash_entry *) bh;
- myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
- myh->forced_local = 1;
-
- free (tmp_name);
-
-#define CHANGE_TO_ARM "__%s_change_to_arm"
-#define BACK_FROM_ARM "__%s_back_from_arm"
-
- /* Allocate another symbol to mark where we switch to Arm mode. */
- tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
- + strlen (CHANGE_TO_ARM) + 1);
-
- BFD_ASSERT (tmp_name);
-
- sprintf (tmp_name, CHANGE_TO_ARM, name);
-
- bh = NULL;
- val = hash_table->thumb_glue_size + 4,
- _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
- tmp_name, BSF_LOCAL, s, val,
- NULL, TRUE, FALSE, &bh);
-
- free (tmp_name);
-
- s->size += THUMB2ARM_GLUE_SIZE;
- hash_table->thumb_glue_size += THUMB2ARM_GLUE_SIZE;
-}
-
-
/* Allocate space for ARMv4 BX veneers. */
static void
return val;
}
-/* Note: we do not include the flag SEC_LINKER_CREATED, as that
- would prevent elf_link_input_bfd() from processing the contents
- of the section. */
#define ARM_GLUE_SECTION_FLAGS \
- (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE | SEC_READONLY)
+ (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
+ | SEC_READONLY | SEC_LINKER_CREATED)
/* Create a fake section for use by the ARM backend of the linker. */
if (info->relocatable)
return TRUE;
- /* Linker stubs don't need glue. */
- if (!strcmp (abfd->filename, "linker stubs"))
- return TRUE;
-
return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
&& arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
&& arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
/* These are the only relocation types we care about. */
if ( r_type != R_ARM_PC24
- && r_type != R_ARM_PLT32
- && r_type != R_ARM_JUMP24
- && r_type != R_ARM_THM_JUMP24
&& (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
continue;
switch (r_type)
{
case R_ARM_PC24:
- case R_ARM_PLT32:
- case R_ARM_JUMP24:
/* This one is a call from arm code. We need to look up
the target of the call. If it is a thumb target, we
insert glue. */
- if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
- && !(r_type == R_ARM_CALL && globals->use_blx))
+ if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
record_arm_to_thumb_glue (link_info, h);
break;
- case R_ARM_THM_JUMP24:
- /* This one is a call from thumb code. We look
- up the target of the call. If it is not a thumb
- target, we insert glue. */
- if (ELF_ST_TYPE (h->type) != STT_ARM_TFUNC
- && !(globals->use_blx && r_type == R_ARM_THM_CALL)
- && h->root.type != bfd_link_hash_undefweak)
- record_thumb_to_arm_glue (link_info, h);
- break;
-
default:
abort ();
}
}
+/* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
+ say what they wanted. */
+
+void
+bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
+{
+ struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
+ obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
+
+ if (globals->fix_cortex_a8 == -1)
+ {
+ /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
+ if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
+ && (out_attr[Tag_CPU_arch_profile].i == 'A'
+ || out_attr[Tag_CPU_arch_profile].i == 0))
+ globals->fix_cortex_a8 = 1;
+ else
+ globals->fix_cortex_a8 = 0;
+ }
+}
+
+
void
bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
{
if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
return TRUE;
+ /* Skip this BFD if it corresponds to an executable or dynamic object. */
+ if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
+ return TRUE;
+
for (sec = abfd->sections; sec != NULL; sec = sec->next)
{
unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
if (elf_section_type (sec) != SHT_PROGBITS
|| (elf_section_flags (sec) & SHF_EXECINSTR) == 0
|| (sec->flags & SEC_EXCLUDE) != 0
+ || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
+ || sec->output_section == bfd_abs_section_ptr
|| strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
continue;
int use_blx,
bfd_arm_vfp11_fix vfp11_fix,
int no_enum_warn, int no_wchar_warn,
- int pic_veneer)
+ int pic_veneer, int fix_cortex_a8)
{
struct elf32_arm_link_hash_table *globals;
globals->use_blx |= use_blx;
globals->vfp11_fix = vfp11_fix;
globals->pic_veneer = pic_veneer;
+ globals->fix_cortex_a8 = fix_cortex_a8;
BFD_ASSERT (is_arm_elf (output_bfd));
elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
/* Handle relocations which should use the PLT entry. ABS32/REL32
will use the symbol's value, which may point to a PLT entry, but we
don't need to handle that here. If we created a PLT entry, all
- branches in this object should go to it. */
+ branches in this object should go to it, except if the PLT is too
+ far away, in which case a long branch stub should be inserted. */
if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
- && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI)
+ && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
+ && r_type != R_ARM_CALL
+ && r_type != R_ARM_JUMP24
+ && r_type != R_ARM_PLT32)
&& h != NULL
&& splt != NULL
&& h->plt.offset != (bfd_vma) -1)
case R_ARM_PC24: /* Arm B/BL instruction. */
case R_ARM_PLT32:
{
- bfd_vma from;
bfd_signed_vma branch_offset;
struct elf32_arm_stub_hash_entry *stub_entry = NULL;
- from = (input_section->output_section->vma
- + input_section->output_offset
- + rel->r_offset);
- branch_offset = (bfd_signed_vma)(value - from);
-
if (r_type == R_ARM_XPC25)
{
/* Check for Arm calling Arm function. */
input_bfd,
h ? h->root.root.string : "(local)");
}
- else if (r_type != R_ARM_CALL)
+ else if (r_type == R_ARM_PC24)
{
/* Check for Arm calling Thumb function. */
if (sym_flags == STT_ARM_TFUNC)
/* Check if a stub has to be inserted because the
destination is too far or we are changing mode. */
- if (r_type == R_ARM_CALL)
+ if ( r_type == R_ARM_CALL
+ || r_type == R_ARM_JUMP24
+ || r_type == R_ARM_PLT32)
{
+ bfd_vma from;
+
+ /* If the call goes through a PLT entry, make sure to
+ check distance to the right destination address. */
+ if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
+ {
+ value = (splt->output_section->vma
+ + splt->output_offset
+ + h->plt.offset);
+ *unresolved_reloc_p = FALSE;
+ }
+
+ from = (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset);
+ branch_offset = (bfd_signed_vma)(value - from);
+
if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
|| branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
- || sym_flags == STT_ARM_TFUNC)
+ || ((sym_flags == STT_ARM_TFUNC)
+ && (((r_type == R_ARM_CALL) && !globals->use_blx)
+ || (r_type == R_ARM_JUMP24)
+ || (r_type == R_ARM_PLT32) ))
+ )
{
/* The target is out of reach, so redirect the
branch to the local stub for this function. */
signed_addend >>= howto->rightshift;
/* A branch to an undefined weak symbol is turned into a jump to
- the next instruction. */
- if (h && h->root.type == bfd_link_hash_undefweak)
+ the next instruction unless a PLT entry will be created.
+ Do the same for local undefined symbols.
+ The jump to the next instruction is optimized as a NOP depending
+ on the architecture. */
+ if (h ? (h->root.type == bfd_link_hash_undefweak
+ && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
+ : bfd_is_und_section (sym_sec))
{
- value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
- | 0x0affffff;
+ value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
+
+ if (arch_has_arm_nop (globals))
+ value |= 0x0320f000;
+ else
+ value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
}
else
{
value = (signed_addend & howto->dst_mask)
| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
- /* Set the H bit in the BLX instruction. */
- if (sym_flags == STT_ARM_TFUNC)
- {
- if (addend)
- value |= (1 << 24);
- else
- value &= ~(bfd_vma)(1 << 24);
- }
if (r_type == R_ARM_CALL)
{
+ /* Set the H bit in the BLX instruction. */
+ if (sym_flags == STT_ARM_TFUNC)
+ {
+ if (addend)
+ value |= (1 << 24);
+ else
+ value &= ~(bfd_vma)(1 << 24);
+ }
+
/* Select the correct instruction (BL or BLX). */
/* Only if we are not handling a BL to a stub. In this
case, mode switching is performed by the stub. */
return bfd_reloc_ok;
}
+ case R_ARM_THM_PC8:
+ /* PR 10073: This reloc is not generated by the GNU toolchain,
+ but it is supported for compatibility with third party libraries
+ generated by other compilers, specifically the ARM/IAR. */
+ {
+ bfd_vma insn;
+ bfd_signed_vma relocation;
+
+ insn = bfd_get_16 (input_bfd, hit_data);
+
+ if (globals->use_rel)
+ addend = (insn & 0x00ff) << 2;
+
+ relocation = value + addend;
+ relocation -= (input_section->output_section->vma
+ + input_section->output_offset
+ + rel->r_offset);
+
+ value = abs (relocation);
+
+ /* We do not check for overflow of this reloc. Although strictly
+ speaking this is incorrect, it appears to be necessary in order
+ to work with IAR generated relocs. Since GCC and GAS do not
+ generate R_ARM_THM_PC8 relocs, the lack of a check should not be
+ a problem for them. */
+ value &= 0x3fc;
+
+ insn = (insn & 0xff00) | (value >> 2);
+
+ bfd_put_16 (input_bfd, insn, hit_data);
+
+ return bfd_reloc_ok;
+ }
+
case R_ARM_THM_PC12:
/* Corresponds to: ldr.w reg, [pc, #offset]. */
{
bfd_vma check;
bfd_signed_vma signed_check;
int bitsize;
- int thumb2 = using_thumb2 (globals);
+ const int thumb2 = using_thumb2 (globals);
/* A branch to an undefined weak symbol is turned into a jump to
- the next instruction unless a PLT entry will be created. */
+ the next instruction unless a PLT entry will be created.
+ The jump to the next instruction is optimized as a NOP.W for
+ Thumb-2 enabled architectures. */
if (h && h->root.type == bfd_link_hash_undefweak
&& !(splt != NULL && h->plt.offset != (bfd_vma) -1))
{
- bfd_put_16 (input_bfd, 0xe000, hit_data);
- bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
+ if (arch_has_thumb2_nop (globals))
+ {
+ bfd_put_16 (input_bfd, 0xf3af, hit_data);
+ bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
+ }
+ else
+ {
+ bfd_put_16 (input_bfd, 0xe000, hit_data);
+ bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
+ }
return bfd_reloc_ok;
}
/* Convert BL to BLX. */
lower_insn = (lower_insn & ~0x1000) | 0x0800;
}
- else if (r_type != R_ARM_THM_CALL)
+ else if (( r_type != R_ARM_THM_CALL)
+ && (r_type != R_ARM_THM_JUMP24))
{
if (elf32_thumb_to_arm_stub
(info, sym_name, input_bfd, output_bfd, input_section,
*unresolved_reloc_p = FALSE;
}
- if (r_type == R_ARM_THM_CALL)
+ if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
{
/* Check if a stub has to be inserted because the destination
is too far. */
(thumb2
&& (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
|| (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
- || ((sym_flags != STT_ARM_TFUNC) && !globals->use_blx))
+ || ((sym_flags != STT_ARM_TFUNC)
+ && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
+ || r_type == R_ARM_THM_JUMP24)))
{
/* The target is out of reach or we are changing modes, so
redirect the branch to the local stub for this
+ stub_entry->stub_sec->output_section->vma);
/* If this call becomes a call to Arm, force BLX. */
- if (globals->use_blx)
+ if (globals->use_blx && (r_type == R_ARM_THM_CALL))
{
if ((stub_entry
&& !arm_stub_is_thumb (stub_entry->stub_type))
sym = local_syms + r_symndx;
sym_type = ELF32_ST_TYPE (sym->st_info);
sec = local_sections[r_symndx];
+
+ /* An object file might have a reference to a local
+ undefined symbol. This is a daft object file, but we
+ should at least do something about it. V4BX & NONE
+ relocations do not use the symbol and are explicitly
+ allowed to use the undefined symbol, so allow those. */
+ if (r_type != R_ARM_V4BX
+ && r_type != R_ARM_NONE
+ && bfd_is_und_section (sec)
+ && ELF_ST_BIND (sym->st_info) != STB_WEAK)
+ {
+ if (!info->callbacks->undefined_symbol
+ (info, bfd_elf_string_from_elf_section
+ (input_bfd, symtab_hdr->sh_link, sym->st_name),
+ input_bfd, input_section,
+ rel->r_offset, TRUE))
+ return FALSE;
+ }
+
if (globals->use_rel)
{
relocation = (sec->output_section->vma
return TRUE;
}
+/* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
+ adds the edit to the start of the list. (The list must be built in order of
+ ascending INDEX: the function's callers are primarily responsible for
+ maintaining that condition). */
+
+static void
+add_unwind_table_edit (arm_unwind_table_edit **head,
+ arm_unwind_table_edit **tail,
+ arm_unwind_edit_type type,
+ asection *linked_section,
+ unsigned int index)
+{
+ arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
+
+ new_edit->type = type;
+ new_edit->linked_section = linked_section;
+ new_edit->index = index;
+
+ if (index > 0)
+ {
+ new_edit->next = NULL;
+
+ if (*tail)
+ (*tail)->next = new_edit;
+
+ (*tail) = new_edit;
+
+ if (!*head)
+ (*head) = new_edit;
+ }
+ else
+ {
+ new_edit->next = *head;
+
+ if (!*tail)
+ *tail = new_edit;
+
+ *head = new_edit;
+ }
+}
+
+static _arm_elf_section_data *get_arm_elf_section_data (asection *);
+
+/* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
+static void
+adjust_exidx_size(asection *exidx_sec, int adjust)
+{
+ asection *out_sec;
+
+ if (!exidx_sec->rawsize)
+ exidx_sec->rawsize = exidx_sec->size;
+
+ bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
+ out_sec = exidx_sec->output_section;
+ /* Adjust size of output section. */
+ bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
+}
+
+/* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
+static void
+insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
+{
+ struct _arm_elf_section_data *exidx_arm_data;
+
+ exidx_arm_data = get_arm_elf_section_data (exidx_sec);
+ add_unwind_table_edit (
+ &exidx_arm_data->u.exidx.unwind_edit_list,
+ &exidx_arm_data->u.exidx.unwind_edit_tail,
+ INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
+
+ adjust_exidx_size(exidx_sec, 8);
+}
+
+/* Scan .ARM.exidx tables, and create a list describing edits which should be
+ made to those tables, such that:
+
+ 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
+ 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
+ codes which have been inlined into the index).
+
+ The edits are applied when the tables are written
+ (in elf32_arm_write_section).
+*/
+
+bfd_boolean
+elf32_arm_fix_exidx_coverage (asection **text_section_order,
+ unsigned int num_text_sections,
+ struct bfd_link_info *info)
+{
+ bfd *inp;
+ unsigned int last_second_word = 0, i;
+ asection *last_exidx_sec = NULL;
+ asection *last_text_sec = NULL;
+ int last_unwind_type = -1;
+
+ /* Walk over all EXIDX sections, and create backlinks from the corrsponding
+ text sections. */
+ for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
+ {
+ asection *sec;
+
+ for (sec = inp->sections; sec != NULL; sec = sec->next)
+ {
+ struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
+ Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
+
+ if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
+ continue;
+
+ if (elf_sec->linked_to)
+ {
+ Elf_Internal_Shdr *linked_hdr
+ = &elf_section_data (elf_sec->linked_to)->this_hdr;
+ struct _arm_elf_section_data *linked_sec_arm_data
+ = get_arm_elf_section_data (linked_hdr->bfd_section);
+
+ if (linked_sec_arm_data == NULL)
+ continue;
+
+ /* Link this .ARM.exidx section back from the text section it
+ describes. */
+ linked_sec_arm_data->u.text.arm_exidx_sec = sec;
+ }
+ }
+ }
+
+ /* Walk all text sections in order of increasing VMA. Eilminate duplicate
+ index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
+ and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
+ */
+
+ for (i = 0; i < num_text_sections; i++)
+ {
+ asection *sec = text_section_order[i];
+ asection *exidx_sec;
+ struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
+ struct _arm_elf_section_data *exidx_arm_data;
+ bfd_byte *contents = NULL;
+ int deleted_exidx_bytes = 0;
+ bfd_vma j;
+ arm_unwind_table_edit *unwind_edit_head = NULL;
+ arm_unwind_table_edit *unwind_edit_tail = NULL;
+ Elf_Internal_Shdr *hdr;
+ bfd *ibfd;
+
+ if (arm_data == NULL)
+ continue;
+
+ exidx_sec = arm_data->u.text.arm_exidx_sec;
+ if (exidx_sec == NULL)
+ {
+ /* Section has no unwind data. */
+ if (last_unwind_type == 0 || !last_exidx_sec)
+ continue;
+
+ /* Ignore zero sized sections. */
+ if (sec->size == 0)
+ continue;
+
+ insert_cantunwind_after(last_text_sec, last_exidx_sec);
+ last_unwind_type = 0;
+ continue;
+ }
+
+ /* Skip /DISCARD/ sections. */
+ if (bfd_is_abs_section (exidx_sec->output_section))
+ continue;
+
+ hdr = &elf_section_data (exidx_sec)->this_hdr;
+ if (hdr->sh_type != SHT_ARM_EXIDX)
+ continue;
+
+ exidx_arm_data = get_arm_elf_section_data (exidx_sec);
+ if (exidx_arm_data == NULL)
+ continue;
+
+ ibfd = exidx_sec->owner;
+
+ if (hdr->contents != NULL)
+ contents = hdr->contents;
+ else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
+ /* An error? */
+ continue;
+
+ for (j = 0; j < hdr->sh_size; j += 8)
+ {
+ unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
+ int unwind_type;
+ int elide = 0;
+
+ /* An EXIDX_CANTUNWIND entry. */
+ if (second_word == 1)
+ {
+ if (last_unwind_type == 0)
+ elide = 1;
+ unwind_type = 0;
+ }
+ /* Inlined unwinding data. Merge if equal to previous. */
+ else if ((second_word & 0x80000000) != 0)
+ {
+ if (last_second_word == second_word && last_unwind_type == 1)
+ elide = 1;
+ unwind_type = 1;
+ last_second_word = second_word;
+ }
+ /* Normal table entry. In theory we could merge these too,
+ but duplicate entries are likely to be much less common. */
+ else
+ unwind_type = 2;
+
+ if (elide)
+ {
+ add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
+ DELETE_EXIDX_ENTRY, NULL, j / 8);
+
+ deleted_exidx_bytes += 8;
+ }
+
+ last_unwind_type = unwind_type;
+ }
+
+ /* Free contents if we allocated it ourselves. */
+ if (contents != hdr->contents)
+ free (contents);
+
+ /* Record edits to be applied later (in elf32_arm_write_section). */
+ exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
+ exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
+
+ if (deleted_exidx_bytes > 0)
+ adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
+
+ last_exidx_sec = exidx_sec;
+ last_text_sec = sec;
+ }
+
+ /* Add terminating CANTUNWIND entry. */
+ if (last_exidx_sec && last_unwind_type != 0)
+ insert_cantunwind_after(last_text_sec, last_exidx_sec);
+
+ return TRUE;
+}
+
+static bfd_boolean
+elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
+ bfd *ibfd, const char *name)
+{
+ asection *sec, *osec;
+
+ sec = bfd_get_section_by_name (ibfd, name);
+ if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
+ return TRUE;
+
+ osec = sec->output_section;
+ if (elf32_arm_write_section (obfd, info, sec, sec->contents))
+ return TRUE;
+
+ if (! bfd_set_section_contents (obfd, osec, sec->contents,
+ sec->output_offset, sec->size))
+ return FALSE;
+
+ return TRUE;
+}
+
+static bfd_boolean
+elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
+{
+ struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
+
+ /* Invoke the regular ELF backend linker to do all the work. */
+ if (!bfd_elf_final_link (abfd, info))
+ return FALSE;
+
+ /* Write out any glue sections now that we have created all the
+ stubs. */
+ if (globals->bfd_of_glue_owner != NULL)
+ {
+ if (! elf32_arm_output_glue_section (info, abfd,
+ globals->bfd_of_glue_owner,
+ ARM2THUMB_GLUE_SECTION_NAME))
+ return FALSE;
+
+ if (! elf32_arm_output_glue_section (info, abfd,
+ globals->bfd_of_glue_owner,
+ THUMB2ARM_GLUE_SECTION_NAME))
+ return FALSE;
+
+ if (! elf32_arm_output_glue_section (info, abfd,
+ globals->bfd_of_glue_owner,
+ VFP11_ERRATUM_VENEER_SECTION_NAME))
+ return FALSE;
+
+ if (! elf32_arm_output_glue_section (info, abfd,
+ globals->bfd_of_glue_owner,
+ ARM_BX_GLUE_SECTION_NAME))
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
/* Set the right machine number. */
static bfd_boolean
elf32_arm_obj_attrs_arg_type (int tag)
{
if (tag == Tag_compatibility)
- return 3;
- else if (tag == 4 || tag == 5)
- return 2;
+ return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
+ else if (tag == Tag_nodefaults)
+ return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
+ else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
+ return ATTR_TYPE_FLAG_STR_VAL;
else if (tag < 32)
- return 1;
+ return ATTR_TYPE_FLAG_INT_VAL;
else
- return (tag & 1) != 0 ? 2 : 1;
+ return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
+}
+
+/* The ABI defines that Tag_conformance should be emitted first, and that
+ Tag_nodefaults should be second (if either is defined). This sets those
+ two positions, and bumps up the position of all the remaining tags to
+ compensate. */
+static int
+elf32_arm_obj_attrs_order (int num)
+{
+ if (num == 4)
+ return Tag_conformance;
+ if (num == 5)
+ return Tag_nodefaults;
+ if ((num - 2) < Tag_nodefaults)
+ return num - 2;
+ if ((num - 1) < Tag_conformance)
+ return num - 1;
+ return num;
+}
+
+/* Read the architecture from the Tag_also_compatible_with attribute, if any.
+ Returns -1 if no architecture could be read. */
+
+static int
+get_secondary_compatible_arch (bfd *abfd)
+{
+ obj_attribute *attr =
+ &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
+
+ /* Note: the tag and its argument below are uleb128 values, though
+ currently-defined values fit in one byte for each. */
+ if (attr->s
+ && attr->s[0] == Tag_CPU_arch
+ && (attr->s[1] & 128) != 128
+ && attr->s[2] == 0)
+ return attr->s[1];
+
+ /* This tag is "safely ignorable", so don't complain if it looks funny. */
+ return -1;
}
+/* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
+ The tag is removed if ARCH is -1. */
+
static void
-elf32_arm_copy_one_eabi_other_attribute (bfd *ibfd, bfd *obfd, obj_attribute_list *in_list)
+set_secondary_compatible_arch (bfd *abfd, int arch)
{
- switch (in_list->tag)
- {
- case Tag_VFP_HP_extension:
- case Tag_ABI_FP_16bit_format:
- bfd_elf_add_obj_attr_int (obfd, OBJ_ATTR_PROC, in_list->tag, in_list->attr.i);
- break;
+ obj_attribute *attr =
+ &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
- default:
- if ((in_list->tag & 127) < 64)
- {
- _bfd_error_handler
- (_("Warning: %B: Unknown EABI object attribute %d"), ibfd, in_list->tag);
- break;
- }
+ if (arch == -1)
+ {
+ attr->s = NULL;
+ return;
}
+
+ /* Note: the tag and its argument below are uleb128 values, though
+ currently-defined values fit in one byte for each. */
+ if (!attr->s)
+ attr->s = bfd_alloc (abfd, 3);
+ attr->s[0] = Tag_CPU_arch;
+ attr->s[1] = arch;
+ attr->s[2] = '\0';
}
-static void
-elf32_arm_copy_eabi_other_attribute_list (bfd *ibfd, bfd *obfd, obj_attribute_list *in_list)
-{
- for (; in_list; in_list = in_list->next )
- elf32_arm_copy_one_eabi_other_attribute (ibfd, obfd, in_list);
+/* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
+ into account. */
+
+static int
+tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
+ int newtag, int secondary_compat)
+{
+#define T(X) TAG_CPU_ARCH_##X
+ int tagl, tagh, result;
+ const int v6t2[] =
+ {
+ T(V6T2), /* PRE_V4. */
+ T(V6T2), /* V4. */
+ T(V6T2), /* V4T. */
+ T(V6T2), /* V5T. */
+ T(V6T2), /* V5TE. */
+ T(V6T2), /* V5TEJ. */
+ T(V6T2), /* V6. */
+ T(V7), /* V6KZ. */
+ T(V6T2) /* V6T2. */
+ };
+ const int v6k[] =
+ {
+ T(V6K), /* PRE_V4. */
+ T(V6K), /* V4. */
+ T(V6K), /* V4T. */
+ T(V6K), /* V5T. */
+ T(V6K), /* V5TE. */
+ T(V6K), /* V5TEJ. */
+ T(V6K), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V6K) /* V6K. */
+ };
+ const int v7[] =
+ {
+ T(V7), /* PRE_V4. */
+ T(V7), /* V4. */
+ T(V7), /* V4T. */
+ T(V7), /* V5T. */
+ T(V7), /* V5TE. */
+ T(V7), /* V5TEJ. */
+ T(V7), /* V6. */
+ T(V7), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V7), /* V6K. */
+ T(V7) /* V7. */
+ };
+ const int v6_m[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ T(V6K), /* V4T. */
+ T(V6K), /* V5T. */
+ T(V6K), /* V5TE. */
+ T(V6K), /* V5TEJ. */
+ T(V6K), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V6K), /* V6K. */
+ T(V7), /* V7. */
+ T(V6_M) /* V6_M. */
+ };
+ const int v6s_m[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ T(V6K), /* V4T. */
+ T(V6K), /* V5T. */
+ T(V6K), /* V5TE. */
+ T(V6K), /* V5TEJ. */
+ T(V6K), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V7), /* V6T2. */
+ T(V6K), /* V6K. */
+ T(V7), /* V7. */
+ T(V6S_M), /* V6_M. */
+ T(V6S_M) /* V6S_M. */
+ };
+ const int v4t_plus_v6_m[] =
+ {
+ -1, /* PRE_V4. */
+ -1, /* V4. */
+ T(V4T), /* V4T. */
+ T(V5T), /* V5T. */
+ T(V5TE), /* V5TE. */
+ T(V5TEJ), /* V5TEJ. */
+ T(V6), /* V6. */
+ T(V6KZ), /* V6KZ. */
+ T(V6T2), /* V6T2. */
+ T(V6K), /* V6K. */
+ T(V7), /* V7. */
+ T(V6_M), /* V6_M. */
+ T(V6S_M), /* V6S_M. */
+ T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
+ };
+ const int *comb[] =
+ {
+ v6t2,
+ v6k,
+ v7,
+ v6_m,
+ v6s_m,
+ /* Pseudo-architecture. */
+ v4t_plus_v6_m
+ };
+
+ /* Check we've not got a higher architecture than we know about. */
+
+ if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
+ {
+ _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
+ return -1;
+ }
+
+ /* Override old tag if we have a Tag_also_compatible_with on the output. */
+
+ if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
+ || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
+ oldtag = T(V4T_PLUS_V6_M);
+
+ /* And override the new tag if we have a Tag_also_compatible_with on the
+ input. */
+
+ if ((newtag == T(V6_M) && secondary_compat == T(V4T))
+ || (newtag == T(V4T) && secondary_compat == T(V6_M)))
+ newtag = T(V4T_PLUS_V6_M);
+
+ tagl = (oldtag < newtag) ? oldtag : newtag;
+ result = tagh = (oldtag > newtag) ? oldtag : newtag;
+
+ /* Architectures before V6KZ add features monotonically. */
+ if (tagh <= TAG_CPU_ARCH_V6KZ)
+ return result;
+
+ result = comb[tagh - T(V6T2)][tagl];
+
+ /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
+ as the canonical version. */
+ if (result == T(V4T_PLUS_V6_M))
+ {
+ result = T(V4T);
+ *secondary_compat_out = T(V6_M);
+ }
+ else
+ *secondary_compat_out = -1;
+
+ if (result == -1)
+ {
+ _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
+ ibfd, oldtag, newtag);
+ return -1;
+ }
+
+ return result;
+#undef T
}
/* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
obj_attribute *out_attr;
obj_attribute_list *in_list;
obj_attribute_list *out_list;
+ obj_attribute_list **out_listp;
/* Some tags have 0 = don't care, 1 = strong requirement,
2 = weak requirement. */
- static const int order_312[3] = {3, 1, 2};
+ static const int order_021[3] = {0, 2, 1};
/* For use with Tag_VFP_arch. */
static const int order_01243[5] = {0, 1, 2, 4, 3};
int i;
+ bfd_boolean result = TRUE;
+
+ /* Skip the linker stubs file. This preserves previous behavior
+ of accepting unknown attributes in the first input file - but
+ is that a bug? */
+ if (ibfd->flags & BFD_LINKER_CREATED)
+ return TRUE;
if (!elf_known_obj_attributes_proc (obfd)[0].i)
{
else if (in_attr[Tag_ABI_FP_number_model].i != 0)
{
_bfd_error_handler
- (_("ERROR: %B uses VFP register arguments, %B does not"),
+ (_("error: %B uses VFP register arguments, %B does not"),
ibfd, obfd);
- return FALSE;
+ result = FALSE;
}
}
{
case Tag_CPU_raw_name:
case Tag_CPU_name:
- /* Use whichever has the greatest architecture requirements. We
- won't necessarily have both the above tags, so make sure input
- name is non-NULL. */
- if (in_attr[Tag_CPU_arch].i > out_attr[Tag_CPU_arch].i
- && in_attr[i].s)
- out_attr[i].s = _bfd_elf_attr_strdup (obfd, in_attr[i].s);
+ /* These are merged after Tag_CPU_arch. */
break;
case Tag_ABI_optimization_goals:
break;
case Tag_CPU_arch:
+ {
+ int secondary_compat = -1, secondary_compat_out = -1;
+ unsigned int saved_out_attr = out_attr[i].i;
+ static const char *name_table[] = {
+ /* These aren't real CPU names, but we can't guess
+ that from the architecture version alone. */
+ "Pre v4",
+ "ARM v4",
+ "ARM v4T",
+ "ARM v5T",
+ "ARM v5TE",
+ "ARM v5TEJ",
+ "ARM v6",
+ "ARM v6KZ",
+ "ARM v6T2",
+ "ARM v6K",
+ "ARM v7",
+ "ARM v6-M",
+ "ARM v6S-M"
+ };
+
+ /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
+ secondary_compat = get_secondary_compatible_arch (ibfd);
+ secondary_compat_out = get_secondary_compatible_arch (obfd);
+ out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
+ &secondary_compat_out,
+ in_attr[i].i,
+ secondary_compat);
+ set_secondary_compatible_arch (obfd, secondary_compat_out);
+
+ /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
+ if (out_attr[i].i == saved_out_attr)
+ ; /* Leave the names alone. */
+ else if (out_attr[i].i == in_attr[i].i)
+ {
+ /* The output architecture has been changed to match the
+ input architecture. Use the input names. */
+ out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
+ ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
+ : NULL;
+ out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
+ ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
+ : NULL;
+ }
+ else
+ {
+ out_attr[Tag_CPU_name].s = NULL;
+ out_attr[Tag_CPU_raw_name].s = NULL;
+ }
+
+ /* If we still don't have a value for Tag_CPU_name,
+ make one up now. Tag_CPU_raw_name remains blank. */
+ if (out_attr[Tag_CPU_name].s == NULL
+ && out_attr[i].i < ARRAY_SIZE (name_table))
+ out_attr[Tag_CPU_name].s =
+ _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
+ }
+ break;
+
case Tag_ARM_ISA_use:
case Tag_THUMB_ISA_use:
case Tag_WMMX_arch:
- case Tag_NEON_arch:
- /* ??? Do NEON and WMMX conflict? */
+ case Tag_Advanced_SIMD_arch:
+ /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
case Tag_ABI_FP_rounding:
- case Tag_ABI_FP_denormal:
case Tag_ABI_FP_exceptions:
case Tag_ABI_FP_user_exceptions:
case Tag_ABI_FP_number_model:
- case Tag_ABI_align8_preserved:
- case Tag_ABI_HardFP_use:
+ case Tag_VFP_HP_extension:
+ case Tag_CPU_unaligned_access:
+ case Tag_T2EE_use:
+ case Tag_Virtualization_use:
+ case Tag_MPextension_use:
/* Use the largest value specified. */
if (in_attr[i].i > out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_CPU_arch_profile:
- /* Warn if conflicting architecture profiles used. */
- if (out_attr[i].i && in_attr[i].i && in_attr[i].i != out_attr[i].i)
+ case Tag_ABI_align8_preserved:
+ case Tag_ABI_PCS_RO_data:
+ /* Use the smallest value specified. */
+ if (in_attr[i].i < out_attr[i].i)
+ out_attr[i].i = in_attr[i].i;
+ break;
+
+ case Tag_ABI_align8_needed:
+ if ((in_attr[i].i > 0 || out_attr[i].i > 0)
+ && (in_attr[Tag_ABI_align8_preserved].i == 0
+ || out_attr[Tag_ABI_align8_preserved].i == 0))
{
+ /* This error message should be enabled once all non-conformant
+ binaries in the toolchain have had the attributes set
+ properly.
_bfd_error_handler
- (_("ERROR: %B: Conflicting architecture profiles %c/%c"),
- ibfd, in_attr[i].i, out_attr[i].i);
- return FALSE;
+ (_("error: %B: 8-byte data alignment conflicts with %B"),
+ obfd, ibfd);
+ result = FALSE; */
+ }
+ /* Fall through. */
+ case Tag_ABI_FP_denormal:
+ case Tag_ABI_PCS_GOT_use:
+ /* Use the "greatest" from the sequence 0, 2, 1, or the largest
+ value if greater than 2 (for future-proofing). */
+ if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
+ || (in_attr[i].i <= 2 && out_attr[i].i <= 2
+ && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
+ out_attr[i].i = in_attr[i].i;
+ break;
+
+
+ case Tag_CPU_arch_profile:
+ if (out_attr[i].i != in_attr[i].i)
+ {
+ /* 0 will merge with anything.
+ 'A' and 'S' merge to 'A'.
+ 'R' and 'S' merge to 'R'.
+ 'M' and 'A|R|S' is an error. */
+ if (out_attr[i].i == 0
+ || (out_attr[i].i == 'S'
+ && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
+ out_attr[i].i = in_attr[i].i;
+ else if (in_attr[i].i == 0
+ || (in_attr[i].i == 'S'
+ && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
+ ; /* Do nothing. */
+ else
+ {
+ _bfd_error_handler
+ (_("error: %B: Conflicting architecture profiles %c/%c"),
+ ibfd,
+ in_attr[i].i ? in_attr[i].i : '0',
+ out_attr[i].i ? out_attr[i].i : '0');
+ result = FALSE;
+ }
}
- if (in_attr[i].i)
- out_attr[i].i = in_attr[i].i;
break;
case Tag_VFP_arch:
- if (in_attr[i].i > 4 || out_attr[i].i > 4
- || order_01243[in_attr[i].i] > order_01243[out_attr[i].i])
+ /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
+ largest value if greater than 4 (for future-proofing). */
+ if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
+ || (in_attr[i].i <= 4 && out_attr[i].i <= 4
+ && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
out_attr[i].i = in_attr[i].i;
break;
case Tag_PCS_config:
&& in_attr[i].i != AEABI_R9_unused)
{
_bfd_error_handler
- (_("ERROR: %B: Conflicting use of R9"), ibfd);
- return FALSE;
+ (_("error: %B: Conflicting use of R9"), ibfd);
+ result = FALSE;
}
if (out_attr[i].i == AEABI_R9_unused)
out_attr[i].i = in_attr[i].i;
&& out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
{
_bfd_error_handler
- (_("ERROR: %B: SB relative addressing conflicts with use of R9"),
+ (_("error: %B: SB relative addressing conflicts with use of R9"),
ibfd);
- return FALSE;
+ result = FALSE;
}
/* Use the smallest value specified. */
if (in_attr[i].i < out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_ABI_PCS_RO_data:
- /* Use the smallest value specified. */
- if (in_attr[i].i < out_attr[i].i)
- out_attr[i].i = in_attr[i].i;
- break;
- case Tag_ABI_PCS_GOT_use:
- if (in_attr[i].i > 2 || out_attr[i].i > 2
- || order_312[in_attr[i].i] < order_312[out_attr[i].i])
- out_attr[i].i = in_attr[i].i;
- break;
case Tag_ABI_PCS_wchar_t:
if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
&& !elf_arm_tdata (obfd)->no_wchar_size_warning)
else if (in_attr[i].i && !out_attr[i].i)
out_attr[i].i = in_attr[i].i;
break;
- case Tag_ABI_align8_needed:
- /* ??? Check against Tag_ABI_align8_preserved. */
- if (in_attr[i].i > 2 || out_attr[i].i > 2
- || order_312[in_attr[i].i] < order_312[out_attr[i].i])
- out_attr[i].i = in_attr[i].i;
- break;
case Tag_ABI_enum_size:
if (in_attr[i].i != AEABI_enum_unused)
{
&& out_attr[i].i != in_attr[i].i
&& !elf_arm_tdata (obfd)->no_enum_size_warning)
{
- const char *aeabi_enum_names[] =
+ static const char *aeabi_enum_names[] =
{ "", "variable-size", "32-bit", "" };
+ const char *in_name =
+ in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
+ ? aeabi_enum_names[in_attr[i].i]
+ : "<unknown>";
+ const char *out_name =
+ out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
+ ? aeabi_enum_names[out_attr[i].i]
+ : "<unknown>";
_bfd_error_handler
(_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
- ibfd, aeabi_enum_names[in_attr[i].i],
- aeabi_enum_names[out_attr[i].i]);
+ ibfd, in_name, out_name);
}
}
break;
if (in_attr[i].i != out_attr[i].i)
{
_bfd_error_handler
- (_("ERROR: %B uses iWMMXt register arguments, %B does not"),
+ (_("error: %B uses iWMMXt register arguments, %B does not"),
ibfd, obfd);
- return FALSE;
+ result = FALSE;
}
break;
-
case Tag_compatibility:
/* Merged in target-independent code. */
break;
+ case Tag_ABI_HardFP_use:
+ /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
+ if ((in_attr[i].i == 1 && out_attr[i].i == 2)
+ || (in_attr[i].i == 2 && out_attr[i].i == 1))
+ out_attr[i].i = 3;
+ else if (in_attr[i].i > out_attr[i].i)
+ out_attr[i].i = in_attr[i].i;
+ break;
+ case Tag_ABI_FP_16bit_format:
+ if (in_attr[i].i != 0 && out_attr[i].i != 0)
+ {
+ if (in_attr[i].i != out_attr[i].i)
+ {
+ _bfd_error_handler
+ (_("error: fp16 format mismatch between %B and %B"),
+ ibfd, obfd);
+ result = FALSE;
+ }
+ }
+ if (in_attr[i].i != 0)
+ out_attr[i].i = in_attr[i].i;
+ break;
- default: /* All known attributes should be explicitly covered. */
- abort ();
- }
+ case Tag_nodefaults:
+ /* This tag is set if it exists, but the value is unused (and is
+ typically zero). We don't actually need to do anything here -
+ the merge happens automatically when the type flags are merged
+ below. */
+ break;
+ case Tag_also_compatible_with:
+ /* Already done in Tag_CPU_arch. */
+ break;
+ case Tag_conformance:
+ /* Keep the attribute if it matches. Throw it away otherwise.
+ No attribute means no claim to conform. */
+ if (!in_attr[i].s || !out_attr[i].s
+ || strcmp (in_attr[i].s, out_attr[i].s) != 0)
+ out_attr[i].s = NULL;
+ break;
- if (in_attr[i].type && !out_attr[i].type)
- switch (in_attr[i].type)
+ default:
{
- case 1:
- if (out_attr[i].i)
- out_attr[i].type = 1;
- break;
+ bfd *err_bfd = NULL;
- case 2:
- if (out_attr[i].s)
- out_attr[i].type = 2;
- break;
+ /* The "known_obj_attributes" table does contain some undefined
+ attributes. Ensure that there are unused. */
+ if (out_attr[i].i != 0 || out_attr[i].s != NULL)
+ err_bfd = obfd;
+ else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
+ err_bfd = ibfd;
- default:
- abort ();
+ if (err_bfd != NULL)
+ {
+ /* Attribute numbers >=64 (mod 128) can be safely ignored. */
+ if ((i & 127) < 64)
+ {
+ _bfd_error_handler
+ (_("%B: Unknown mandatory EABI object attribute %d"),
+ err_bfd, i);
+ bfd_set_error (bfd_error_bad_value);
+ result = FALSE;
+ }
+ else
+ {
+ _bfd_error_handler
+ (_("Warning: %B: Unknown EABI object attribute %d"),
+ err_bfd, i);
+ }
+ }
+
+ /* Only pass on attributes that match in both inputs. */
+ if (in_attr[i].i != out_attr[i].i
+ || in_attr[i].s != out_attr[i].s
+ || (in_attr[i].s != NULL && out_attr[i].s != NULL
+ && strcmp (in_attr[i].s, out_attr[i].s) != 0))
+ {
+ out_attr[i].i = 0;
+ out_attr[i].s = NULL;
+ }
}
+ }
+
+ /* If out_attr was copied from in_attr then it won't have a type yet. */
+ if (in_attr[i].type && !out_attr[i].type)
+ out_attr[i].type = in_attr[i].type;
}
/* Merge Tag_compatibility attributes and any common GNU ones. */
/* Check for any attributes not known on ARM. */
in_list = elf_other_obj_attributes_proc (ibfd);
- out_list = elf_other_obj_attributes_proc (obfd);
+ out_listp = &elf_other_obj_attributes_proc (obfd);
+ out_list = *out_listp;
- for (; in_list != NULL; )
+ for (; in_list || out_list; )
{
- if (out_list == NULL)
- {
- elf32_arm_copy_eabi_other_attribute_list (ibfd, obfd, in_list);
- return TRUE;
- }
+ bfd *err_bfd = NULL;
+ int err_tag = 0;
/* The tags for each list are in numerical order. */
/* If the tags are equal, then merge. */
- if (in_list->tag == out_list->tag)
- {
- switch (in_list->tag)
- {
- case Tag_VFP_HP_extension:
- if (out_list->attr.i == 0)
- out_list->attr.i = in_list->attr.i;
- break;
-
- case Tag_ABI_FP_16bit_format:
- if (in_list->attr.i != 0 && out_list->attr.i != 0)
- {
- if (in_list->attr.i != out_list->attr.i)
- {
- _bfd_error_handler
- (_("ERROR: fp16 format mismatch between %B and %B"),
- ibfd, obfd);
- return FALSE;
- }
- }
- if (in_list->attr.i != 0)
- out_list->attr.i = in_list->attr.i;
- break;
-
- default:
- if ((in_list->tag & 127) < 64)
- {
- _bfd_error_handler
- (_("Warning: %B: Unknown EABI object attribute %d"), ibfd, in_list->tag);
- break;
- }
- }
- }
- else if (in_list->tag < out_list->tag)
+ if (out_list && (!in_list || in_list->tag > out_list->tag))
{
- /* This attribute is in ibfd, but not obfd. Copy to obfd and advance to
- next input attribute. */
- elf32_arm_copy_one_eabi_other_attribute (ibfd, obfd, in_list);
+ /* This attribute only exists in obfd. We can't merge, and we don't
+ know what the tag means, so delete it. */
+ err_bfd = obfd;
+ err_tag = out_list->tag;
+ *out_listp = out_list->next;
+ out_list = *out_listp;
}
- if (in_list->tag <= out_list->tag)
+ else if (in_list && (!out_list || in_list->tag < out_list->tag))
{
+ /* This attribute only exists in ibfd. We can't merge, and we don't
+ know what the tag means, so ignore it. */
+ err_bfd = ibfd;
+ err_tag = in_list->tag;
in_list = in_list->next;
- if (in_list == NULL)
- continue;
}
- while (out_list && out_list->tag < in_list->tag)
- out_list = out_list->next;
+ else /* The tags are equal. */
+ {
+ /* As present, all attributes in the list are unknown, and
+ therefore can't be merged meaningfully. */
+ err_bfd = obfd;
+ err_tag = out_list->tag;
+
+ /* Only pass on attributes that match in both inputs. */
+ if (in_list->attr.i != out_list->attr.i
+ || in_list->attr.s != out_list->attr.s
+ || (in_list->attr.s && out_list->attr.s
+ && strcmp (in_list->attr.s, out_list->attr.s) != 0))
+ {
+ /* No match. Delete the attribute. */
+ *out_listp = out_list->next;
+ out_list = *out_listp;
+ }
+ else
+ {
+ /* Matched. Keep the attribute and move to the next. */
+ out_list = out_list->next;
+ in_list = in_list->next;
+ }
+ }
+
+ if (err_bfd)
+ {
+ /* Attribute numbers >=64 (mod 128) can be safely ignored. */
+ if ((err_tag & 127) < 64)
+ {
+ _bfd_error_handler
+ (_("%B: Unknown mandatory EABI object attribute %d"),
+ err_bfd, err_tag);
+ bfd_set_error (bfd_error_bad_value);
+ result = FALSE;
+ }
+ else
+ {
+ _bfd_error_handler
+ (_("Warning: %B: Unknown EABI object attribute %d"),
+ err_bfd, err_tag);
+ }
+ }
}
- return TRUE;
+ return result;
}
&& !(ibfd->flags & DYNAMIC)
&& (in_flags & EF_ARM_BE8))
{
- _bfd_error_handler (_("ERROR: %B is already in final BE8 format"),
+ _bfd_error_handler (_("error: %B is already in final BE8 format"),
ibfd);
return FALSE;
}
EF_ARM_EABI_VERSION (out_flags)))
{
_bfd_error_handler
- (_("ERROR: Source object %B has EABI version %d, but target %B has EABI version %d"),
+ (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
ibfd, obfd,
(in_flags & EF_ARM_EABIMASK) >> 24,
(out_flags & EF_ARM_EABIMASK) >> 24);
if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
{
_bfd_error_handler
- (_("ERROR: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
+ (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
ibfd, obfd,
in_flags & EF_ARM_APCS_26 ? 26 : 32,
out_flags & EF_ARM_APCS_26 ? 26 : 32);
{
if (in_flags & EF_ARM_APCS_FLOAT)
_bfd_error_handler
- (_("ERROR: %B passes floats in float registers, whereas %B passes them in integer registers"),
+ (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
ibfd, obfd);
else
_bfd_error_handler
- (_("ERROR: %B passes floats in integer registers, whereas %B passes them in float registers"),
+ (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
ibfd, obfd);
flags_compatible = FALSE;
{
if (in_flags & EF_ARM_VFP_FLOAT)
_bfd_error_handler
- (_("ERROR: %B uses VFP instructions, whereas %B does not"),
+ (_("error: %B uses VFP instructions, whereas %B does not"),
ibfd, obfd);
else
_bfd_error_handler
- (_("ERROR: %B uses FPA instructions, whereas %B does not"),
+ (_("error: %B uses FPA instructions, whereas %B does not"),
ibfd, obfd);
flags_compatible = FALSE;
{
if (in_flags & EF_ARM_MAVERICK_FLOAT)
_bfd_error_handler
- (_("ERROR: %B uses Maverick instructions, whereas %B does not"),
+ (_("error: %B uses Maverick instructions, whereas %B does not"),
ibfd, obfd);
else
_bfd_error_handler
- (_("ERROR: %B does not use Maverick instructions, whereas %B does"),
+ (_("error: %B does not use Maverick instructions, whereas %B does"),
ibfd, obfd);
flags_compatible = FALSE;
{
if (in_flags & EF_ARM_SOFT_FLOAT)
_bfd_error_handler
- (_("ERROR: %B uses software FP, whereas %B uses hardware FP"),
+ (_("error: %B uses software FP, whereas %B uses hardware FP"),
ibfd, obfd);
else
_bfd_error_handler
- (_("ERROR: %B uses hardware FP, whereas %B uses software FP"),
+ (_("error: %B uses hardware FP, whereas %B uses software FP"),
ibfd, obfd);
flags_compatible = FALSE;
bfd_vma *local_got_offsets;
struct elf32_arm_link_hash_table *htab;
bfd_boolean needs_plt;
+ unsigned long nsyms;
if (info->relocatable)
return TRUE;
symtab_hdr = & elf_symtab_hdr (abfd);
sym_hashes = elf_sym_hashes (abfd);
-
+ nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
+
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
{
r_type = ELF32_R_TYPE (rel->r_info);
r_type = arm_real_reloc_type (htab, r_type);
- if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
+ if (r_symndx >= nsyms
+ /* PR 9934: It is possible to have relocations that do not
+ refer to symbols, thus it is also possible to have an
+ object file containing relocations but no symbol table. */
+ && (r_symndx > 0 || nsyms > 0))
{
(*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
- r_symndx);
+ r_symndx);
return FALSE;
}
- if (r_symndx < symtab_hdr->sh_info)
+ if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
h = NULL;
else
{
needs_plt = 1;
goto normal_reloc;
+ case R_ARM_MOVW_ABS_NC:
+ case R_ARM_MOVT_ABS:
+ case R_ARM_THM_MOVW_ABS_NC:
+ case R_ARM_THM_MOVT_ABS:
+ if (info->shared)
+ {
+ (*_bfd_error_handler)
+ (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
+ abfd, elf32_arm_howto_table_1[r_type].name,
+ (h) ? h->root.root.string : "a local symbol");
+ bfd_set_error (bfd_error_bad_value);
+ return FALSE;
+ }
+
+ /* Fall through. */
case R_ARM_ABS32:
case R_ARM_ABS32_NOI:
case R_ARM_REL32:
case R_ARM_REL32_NOI:
- case R_ARM_MOVW_ABS_NC:
- case R_ARM_MOVT_ABS:
case R_ARM_MOVW_PREL_NC:
case R_ARM_MOVT_PREL:
- case R_ARM_THM_MOVW_ABS_NC:
- case R_ARM_THM_MOVT_ABS:
case R_ARM_THM_MOVW_PREL_NC:
case R_ARM_THM_MOVT_PREL:
needs_plt = 0;
return FALSE;
/* BPABI objects never have dynamic relocations mapped. */
- if (! htab->symbian_p)
+ if (htab->symbian_p)
{
flagword flags;
/* Track dynamic relocs needed for local syms too.
We really need local syms available to do this
easily. Oh well. */
-
asection *s;
void *vpp;
+ Elf_Internal_Sym *isym;
- s = bfd_section_from_r_symndx (abfd, &htab->sym_sec,
- sec, r_symndx);
- if (s == NULL)
+ isym = bfd_sym_from_r_symndx (&htab->sym_cache,
+ abfd, r_symndx);
+ if (isym == NULL)
return FALSE;
+ s = bfd_section_from_elf_index (abfd, isym->st_shndx);
+ if (s == NULL)
+ s = sec;
+
vpp = &elf_section_data (s)->local_dynrel;
head = (struct elf32_arm_relocs_copied **) vpp;
}
{
h->root.u.def.section = s;
h->root.u.def.value = h->plt.offset;
-
- /* Make sure the function is not marked as Thumb, in case
- it is the target of an ABS32 relocation, which will
- point to the PLT entry. */
- if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
- h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
}
+ /* Make sure the function is not marked as Thumb, in case
+ it is the target of an ABS32 relocation, which will
+ point to the PLT entry. */
+ if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
+ h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
+
/* Make room for this entry. */
s->size += htab->plt_entry_size;
ibfd->filename);
}
+ /* Allocate space for the glue sections now that we've sized them. */
+ bfd_elf32_arm_allocate_interworking_sections (info);
+
/* The check_relocs and adjust_dynamic_symbol entry points have
determined the sizes of the various dynamic sections. Allocate
memory for them. */
struct bfd_link_info *info;
asection *sec;
int sec_shndx;
- bfd_boolean (*func) (void *, const char *, Elf_Internal_Sym *,
- asection *, struct elf_link_hash_entry *);
+ int (*func) (void *, const char *, Elf_Internal_Sym *,
+ asection *, struct elf_link_hash_entry *);
} output_arch_syminfo;
enum map_symbol_type
sym.st_other = 0;
sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
sym.st_shndx = osi->sec_shndx;
- if (!osi->func (osi->finfo, names[type], &sym, osi->sec, NULL))
- return FALSE;
- return TRUE;
+ return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
}
sym.st_other = 0;
sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
sym.st_shndx = osi->sec_shndx;
- if (!osi->func (osi->finfo, name, &sym, osi->sec, NULL))
- return FALSE;
- return TRUE;
+ return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
}
static bfd_boolean
bfd_vma addr;
char *stub_name;
output_arch_syminfo *osi;
+ const insn_sequence *template;
+ enum stub_insn_type prev_type;
+ int size;
+ int i;
+ enum map_symbol_type sym_type;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
addr = (bfd_vma) stub_entry->stub_offset;
stub_name = stub_entry->output_name;
- switch (stub_entry->stub_type)
+ template = stub_entry->stub_template;
+ switch (template[0].type)
{
- case arm_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 8))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
- return FALSE;
- break;
- case arm_thumb_v4t_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 12))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
+ case ARM_TYPE:
+ if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
return FALSE;
break;
- case arm_thumb_thumb_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 16))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
- return FALSE;
- break;
- case arm_thumb_arm_v4t_stub_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 20))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 8))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
- return FALSE;
- break;
- case arm_thumb_arm_v4t_stub_short_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1, 8))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 4))
- return FALSE;
- break;
- case arm_stub_pic_long_branch:
- if (!elf32_arm_output_stub_sym (osi, stub_name, addr, 12))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
- return FALSE;
- if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
+ case THUMB16_TYPE:
+ case THUMB32_TYPE:
+ if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
+ stub_entry->stub_size))
return FALSE;
break;
default:
BFD_FAIL ();
+ return 0;
+ }
+
+ prev_type = DATA_TYPE;
+ size = 0;
+ for (i = 0; i < stub_entry->stub_template_size; i++)
+ {
+ switch (template[i].type)
+ {
+ case ARM_TYPE:
+ sym_type = ARM_MAP_ARM;
+ break;
+
+ case THUMB16_TYPE:
+ case THUMB32_TYPE:
+ sym_type = ARM_MAP_THUMB;
+ break;
+
+ case DATA_TYPE:
+ sym_type = ARM_MAP_DATA;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
+
+ if (template[i].type != prev_type)
+ {
+ prev_type = template[i].type;
+ if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
+ return FALSE;
+ }
+
+ switch (template[i].type)
+ {
+ case ARM_TYPE:
+ case THUMB32_TYPE:
+ size += 4;
+ break;
+
+ case THUMB16_TYPE:
+ size += 2;
+ break;
+
+ case DATA_TYPE:
+ size += 4;
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
}
return TRUE;
elf32_arm_output_arch_local_syms (bfd *output_bfd,
struct bfd_link_info *info,
void *finfo,
- bfd_boolean (*func) (void *, const char *,
- Elf_Internal_Sym *,
- asection *,
- struct elf_link_hash_entry *))
+ int (*func) (void *, const char *,
+ Elf_Internal_Sym *,
+ asection *,
+ struct elf_link_hash_entry *))
{
output_arch_syminfo osi;
struct elf32_arm_link_hash_table *htab;
return 0;
}
+/* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
+
+static unsigned long
+offset_prel31 (unsigned long addr, bfd_vma offset)
+{
+ return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
+}
+
+/* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
+ relocations. */
+
+static void
+copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
+{
+ unsigned long first_word = bfd_get_32 (output_bfd, from);
+ unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
+
+ /* High bit of first word is supposed to be zero. */
+ if ((first_word & 0x80000000ul) == 0)
+ first_word = offset_prel31 (first_word, offset);
+
+ /* If the high bit of the first word is clear, and the bit pattern is not 0x1
+ (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
+ if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
+ second_word = offset_prel31 (second_word, offset);
+
+ bfd_put_32 (output_bfd, first_word, to);
+ bfd_put_32 (output_bfd, second_word, to + 4);
+}
+
+/* Data for make_branch_to_a8_stub(). */
+
+struct a8_branch_to_stub_data {
+ asection *writing_section;
+ bfd_byte *contents;
+};
+
+
+/* Helper to insert branches to Cortex-A8 erratum stubs in the right
+ places for a particular section. */
+
+static bfd_boolean
+make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
+ void *in_arg)
+{
+ struct elf32_arm_stub_hash_entry *stub_entry;
+ struct a8_branch_to_stub_data *data;
+ bfd_byte *contents;
+ unsigned long branch_insn;
+ bfd_vma veneered_insn_loc, veneer_entry_loc;
+ bfd_signed_vma branch_offset;
+ bfd *abfd;
+ unsigned int index;
+
+ stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
+ data = (struct a8_branch_to_stub_data *) in_arg;
+
+ if (stub_entry->target_section != data->writing_section
+ || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
+ return TRUE;
+
+ contents = data->contents;
+
+ veneered_insn_loc = stub_entry->target_section->output_section->vma
+ + stub_entry->target_section->output_offset
+ + stub_entry->target_value;
+
+ veneer_entry_loc = stub_entry->stub_sec->output_section->vma
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_offset;
+
+ if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
+ veneered_insn_loc &= ~3u;
+
+ branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
+
+ abfd = stub_entry->target_section->owner;
+ index = stub_entry->target_value;
+
+ /* We attempt to avoid this condition by setting stubs_always_after_branch
+ in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
+ This check is just to be on the safe side... */
+ if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
+ {
+ (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
+ "allocated in unsafe location"), abfd);
+ return FALSE;
+ }
+
+ switch (stub_entry->stub_type)
+ {
+ case arm_stub_a8_veneer_b:
+ case arm_stub_a8_veneer_b_cond:
+ branch_insn = 0xf0009000;
+ goto jump24;
+
+ case arm_stub_a8_veneer_blx:
+ branch_insn = 0xf000e800;
+ goto jump24;
+
+ case arm_stub_a8_veneer_bl:
+ {
+ unsigned int i1, j1, i2, j2, s;
+
+ branch_insn = 0xf000d000;
+
+ jump24:
+ if (branch_offset < -16777216 || branch_offset > 16777214)
+ {
+ /* There's not much we can do apart from complain if this
+ happens. */
+ (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
+ "of range (input file too large)"), abfd);
+ return FALSE;
+ }
+
+ /* i1 = not(j1 eor s), so:
+ not i1 = j1 eor s
+ j1 = (not i1) eor s. */
+
+ branch_insn |= (branch_offset >> 1) & 0x7ff;
+ branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
+ i2 = (branch_offset >> 22) & 1;
+ i1 = (branch_offset >> 23) & 1;
+ s = (branch_offset >> 24) & 1;
+ j1 = (!i1) ^ s;
+ j2 = (!i2) ^ s;
+ branch_insn |= j2 << 11;
+ branch_insn |= j1 << 13;
+ branch_insn |= s << 26;
+ }
+ break;
+
+ default:
+ BFD_FAIL ();
+ return FALSE;
+ }
+
+ bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
+ bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
+
+ return TRUE;
+}
/* Do code byteswapping. Return FALSE afterwards so that the section is
written out as normal. */
asection *sec,
bfd_byte *contents)
{
- int mapcount, errcount;
+ unsigned int mapcount, errcount;
_arm_elf_section_data *arm_data;
struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
elf32_arm_section_map *map;
bfd_vma end;
bfd_vma offset = sec->output_section->vma + sec->output_offset;
bfd_byte tmp;
- int i;
+ unsigned int i;
/* If this section has not been allocated an _arm_elf_section_data
structure then we cannot record anything. */
}
}
+ if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
+ {
+ arm_unwind_table_edit *edit_node
+ = arm_data->u.exidx.unwind_edit_list;
+ /* Now, sec->size is the size of the section we will write. The original
+ size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
+ markers) was sec->rawsize. (This isn't the case if we perform no
+ edits, then rawsize will be zero and we should use size). */
+ bfd_byte *edited_contents = bfd_malloc (sec->size);
+ unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
+ unsigned int in_index, out_index;
+ bfd_vma add_to_offsets = 0;
+
+ for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
+ {
+ if (edit_node)
+ {
+ unsigned int edit_index = edit_node->index;
+
+ if (in_index < edit_index && in_index * 8 < input_size)
+ {
+ copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
+ contents + in_index * 8, add_to_offsets);
+ out_index++;
+ in_index++;
+ }
+ else if (in_index == edit_index
+ || (in_index * 8 >= input_size
+ && edit_index == UINT_MAX))
+ {
+ switch (edit_node->type)
+ {
+ case DELETE_EXIDX_ENTRY:
+ in_index++;
+ add_to_offsets += 8;
+ break;
+
+ case INSERT_EXIDX_CANTUNWIND_AT_END:
+ {
+ asection *text_sec = edit_node->linked_section;
+ bfd_vma text_offset = text_sec->output_section->vma
+ + text_sec->output_offset
+ + text_sec->size;
+ bfd_vma exidx_offset = offset + out_index * 8;
+ unsigned long prel31_offset;
+
+ /* Note: this is meant to be equivalent to an
+ R_ARM_PREL31 relocation. These synthetic
+ EXIDX_CANTUNWIND markers are not relocated by the
+ usual BFD method. */
+ prel31_offset = (text_offset - exidx_offset)
+ & 0x7ffffffful;
+
+ /* First address we can't unwind. */
+ bfd_put_32 (output_bfd, prel31_offset,
+ &edited_contents[out_index * 8]);
+
+ /* Code for EXIDX_CANTUNWIND. */
+ bfd_put_32 (output_bfd, 0x1,
+ &edited_contents[out_index * 8 + 4]);
+
+ out_index++;
+ add_to_offsets -= 8;
+ }
+ break;
+ }
+
+ edit_node = edit_node->next;
+ }
+ }
+ else
+ {
+ /* No more edits, copy remaining entries verbatim. */
+ copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
+ contents + in_index * 8, add_to_offsets);
+ out_index++;
+ in_index++;
+ }
+ }
+
+ if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
+ bfd_set_section_contents (output_bfd, sec->output_section,
+ edited_contents,
+ (file_ptr) sec->output_offset, sec->size);
+
+ return TRUE;
+ }
+
+ /* Fix code to point to Cortex-A8 erratum stubs. */
+ if (globals->fix_cortex_a8)
+ {
+ struct a8_branch_to_stub_data data;
+
+ data.writing_section = sec;
+ data.contents = contents;
+
+ bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
+ &data);
+ }
+
if (mapcount == 0)
return FALSE;
#define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
#define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
#define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
+#define bfd_elf32_bfd_final_link elf32_arm_final_link
#define elf_backend_get_symbol_type elf32_arm_get_symbol_type
#define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
#define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
#undef elf_backend_obj_attrs_section_type
#define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
+#define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
#include "elf32-target.h"