/* AVR-specific support for 32-bit ELF
- Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- Free Software Foundation, Inc.
+ Copyright (C) 1999-2015 Free Software Foundation, Inc.
This file is part of BFD, the Binary File Descriptor library.
#include "elf-bfd.h"
#include "elf/avr.h"
#include "elf32-avr.h"
+#include "bfd_stdint.h"
/* Enable debugging printout at stdout with this variable. */
static bfd_boolean debug_relax = FALSE;
/* Enable debugging printout at stdout with this variable. */
static bfd_boolean debug_stubs = FALSE;
+static bfd_reloc_status_type
+bfd_elf_avr_diff_reloc (bfd *, arelent *, asymbol *, void *,
+ asection *, bfd *, char **);
+
/* Hash table initialization and handling. Code is taken from the hppa port
and adapted to the needs of AVR. */
/* We use two hash tables to hold information for linking avr objects.
- The first is the elf32_avr_link_hash_tablse which is derived from the
+ The first is the elf32_avr_link_hash_table which is derived from the
stanard ELF linker hash table. We use this as a place to attach the other
hash table and some static information.
/* Various hash macros and functions. */
#define avr_link_hash_table(p) \
/* PR 3874: Check that we have an AVR style hash table before using it. */\
- ((p)->hash->table.newfunc != elf32_avr_link_hash_newfunc ? NULL : \
- ((struct elf32_avr_link_hash_table *) ((p)->hash)))
+ (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
+ == AVR_ELF_DATA ? ((struct elf32_avr_link_hash_table *) ((p)->hash)) : NULL)
#define avr_stub_hash_entry(ent) \
((struct elf32_avr_stub_hash_entry *)(ent))
{
HOWTO (R_AVR_NONE, /* type */
0, /* rightshift */
- 2, /* size (0 = byte, 1 = short, 2 = long) */
- 32, /* bitsize */
+ 3, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
FALSE, /* pc_relative */
0, /* bitpos */
- complain_overflow_bitfield, /* complain_on_overflow */
+ complain_overflow_dont, /* complain_on_overflow */
bfd_elf_generic_reloc, /* special_function */
"R_AVR_NONE", /* name */
FALSE, /* partial_inplace */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* A low 8 bit absolute relocation of 24 bit program memory address.
- For LDI command. Will be changed when linker stubs are needed. */
+ For LDI command. Will be changed when linker stubs are needed. */
HOWTO (R_AVR_LO8_LDI_GS, /* type */
1, /* rightshift */
1, /* size (0 = byte, 1 = short, 2 = long) */
0xffff, /* dst_mask */
FALSE), /* pcrel_offset */
/* A low 8 bit absolute relocation of 24 bit program memory address.
- For LDI command. Will be changed when linker stubs are needed. */
+ For LDI command. Will be changed when linker stubs are needed. */
HOWTO (R_AVR_HI8_LDI_GS, /* type */
9, /* rightshift */
1, /* size (0 = byte, 1 = short, 2 = long) */
FALSE, /* partial_inplace */
0xffff, /* src_mask */
0xffff, /* dst_mask */
- FALSE) /* pcrel_offset */
+ FALSE), /* pcrel_offset */
+ /* 8 bit offset. */
+ HOWTO (R_AVR_8, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_8", /* name */
+ FALSE, /* partial_inplace */
+ 0x000000ff, /* src_mask */
+ 0x000000ff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ /* lo8-part to use in .byte lo8(sym). */
+ HOWTO (R_AVR_8_LO8, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_8_LO8", /* name */
+ FALSE, /* partial_inplace */
+ 0xffffff, /* src_mask */
+ 0xffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ /* hi8-part to use in .byte hi8(sym). */
+ HOWTO (R_AVR_8_HI8, /* type */
+ 8, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_8_HI8", /* name */
+ FALSE, /* partial_inplace */
+ 0xffffff, /* src_mask */
+ 0xffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ /* hlo8-part to use in .byte hlo8(sym). */
+ HOWTO (R_AVR_8_HLO8, /* type */
+ 16, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_8_HLO8", /* name */
+ FALSE, /* partial_inplace */
+ 0xffffff, /* src_mask */
+ 0xffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_AVR_DIFF8, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 8, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_avr_diff_reloc, /* special_function */
+ "R_AVR_DIFF8", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_AVR_DIFF16, /* type */
+ 0, /* rightshift */
+ 1, /* size (0 = byte, 1 = short, 2 = long) */
+ 16, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_avr_diff_reloc,/* special_function */
+ "R_AVR_DIFF16", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_AVR_DIFF32, /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 32, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_avr_diff_reloc,/* special_function */
+ "R_AVR_DIFF32", /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ /* 7 bit immediate for LDS/STS in Tiny core. */
+ HOWTO (R_AVR_LDS_STS_16, /* type */
+ 0, /* rightshift */
+ 1, /* size (0 = byte, 1 = short, 2 = long) */
+ 7, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_LDS_STS_16", /* name */
+ FALSE, /* partial_inplace */
+ 0xffff, /* src_mask */
+ 0xffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO (R_AVR_PORT6, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 6, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_PORT6", /* name */
+ FALSE, /* partial_inplace */
+ 0xffffff, /* src_mask */
+ 0xffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+ HOWTO (R_AVR_PORT5, /* type */
+ 0, /* rightshift */
+ 0, /* size (0 = byte, 1 = short, 2 = long) */
+ 5, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont,/* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ "R_AVR_PORT5", /* name */
+ FALSE, /* partial_inplace */
+ 0xffffff, /* src_mask */
+ 0xffffff, /* dst_mask */
+ FALSE) /* pcrel_offset */
};
/* Map BFD reloc types to AVR ELF reloc types. */
{ BFD_RELOC_AVR_CALL, R_AVR_CALL },
{ BFD_RELOC_AVR_LDI, R_AVR_LDI },
{ BFD_RELOC_AVR_6, R_AVR_6 },
- { BFD_RELOC_AVR_6_ADIW, R_AVR_6_ADIW }
+ { BFD_RELOC_AVR_6_ADIW, R_AVR_6_ADIW },
+ { BFD_RELOC_8, R_AVR_8 },
+ { BFD_RELOC_AVR_8_LO, R_AVR_8_LO8 },
+ { BFD_RELOC_AVR_8_HI, R_AVR_8_HI8 },
+ { BFD_RELOC_AVR_8_HLO, R_AVR_8_HLO8 },
+ { BFD_RELOC_AVR_DIFF8, R_AVR_DIFF8 },
+ { BFD_RELOC_AVR_DIFF16, R_AVR_DIFF16 },
+ { BFD_RELOC_AVR_DIFF32, R_AVR_DIFF32 },
+ { BFD_RELOC_AVR_LDS_STS_16, R_AVR_LDS_STS_16},
+ { BFD_RELOC_AVR_PORT6, R_AVR_PORT6},
+ { BFD_RELOC_AVR_PORT5, R_AVR_PORT5}
};
/* Meant to be filled one day with the wrap around address for the
instruction. This option could be switched off by a linker switch. */
static int avr_replace_call_ret_sequences = 1;
\f
+
+/* Per-section relaxation related information for avr. */
+
+struct avr_relax_info
+{
+ /* Track the avr property records that apply to this section. */
+
+ struct
+ {
+ /* Number of records in the list. */
+ unsigned count;
+
+ /* How many records worth of space have we allocated. */
+ unsigned allocated;
+
+ /* The records, only COUNT records are initialised. */
+ struct avr_property_record *items;
+ } records;
+};
+
+/* Per section data, specialised for avr. */
+
+struct elf_avr_section_data
+{
+ /* The standard data must appear first. */
+ struct bfd_elf_section_data elf;
+
+ /* Relaxation related information. */
+ struct avr_relax_info relax_info;
+};
+
+/* Possibly initialise avr specific data for new section SEC from ABFD. */
+
+static bfd_boolean
+elf_avr_new_section_hook (bfd *abfd, asection *sec)
+{
+ if (!sec->used_by_bfd)
+ {
+ struct elf_avr_section_data *sdata;
+ bfd_size_type amt = sizeof (*sdata);
+
+ sdata = bfd_zalloc (abfd, amt);
+ if (sdata == NULL)
+ return FALSE;
+ sec->used_by_bfd = sdata;
+ }
+
+ return _bfd_elf_new_section_hook (abfd, sec);
+}
+
+/* Return a pointer to the relaxation information for SEC. */
+
+static struct avr_relax_info *
+get_avr_relax_info (asection *sec)
+{
+ struct elf_avr_section_data *section_data;
+
+ /* No info available if no section or if it is an output section. */
+ if (!sec || sec == sec->output_section)
+ return NULL;
+
+ section_data = (struct elf_avr_section_data *) elf_section_data (sec);
+ return §ion_data->relax_info;
+}
+
+/* Initialise the per section relaxation information for SEC. */
+
+static void
+init_avr_relax_info (asection *sec)
+{
+ struct avr_relax_info *relax_info = get_avr_relax_info (sec);
+
+ relax_info->records.count = 0;
+ relax_info->records.allocated = 0;
+ relax_info->records.items = NULL;
+}
+
/* Initialize an entry in the stub hash table. */
static struct bfd_hash_entry *
return _bfd_elf_link_hash_newfunc (entry, table, string);
}
+/* Free the derived linker hash table. */
+
+static void
+elf32_avr_link_hash_table_free (bfd *obfd)
+{
+ struct elf32_avr_link_hash_table *htab
+ = (struct elf32_avr_link_hash_table *) obfd->link.hash;
+
+ /* Free the address mapping table. */
+ if (htab->amt_stub_offsets != NULL)
+ free (htab->amt_stub_offsets);
+ if (htab->amt_destination_addr != NULL)
+ free (htab->amt_destination_addr);
+
+ bfd_hash_table_free (&htab->bstab);
+ _bfd_elf_link_hash_table_free (obfd);
+}
+
/* Create the derived linker hash table. The AVR ELF port uses the derived
hash table to keep information specific to the AVR ELF linker (without
using static variables). */
struct elf32_avr_link_hash_table *htab;
bfd_size_type amt = sizeof (*htab);
- htab = bfd_malloc (amt);
+ htab = bfd_zmalloc (amt);
if (htab == NULL)
return NULL;
if (!_bfd_elf_link_hash_table_init (&htab->etab, abfd,
elf32_avr_link_hash_newfunc,
- sizeof (struct elf_link_hash_entry)))
+ sizeof (struct elf_link_hash_entry),
+ AVR_ELF_DATA))
{
free (htab);
return NULL;
/* Init the stub hash table too. */
if (!bfd_hash_table_init (&htab->bstab, stub_hash_newfunc,
sizeof (struct elf32_avr_stub_hash_entry)))
- return NULL;
-
- htab->stub_bfd = NULL;
- htab->stub_sec = NULL;
-
- /* Initialize the address mapping table. */
- htab->amt_stub_offsets = NULL;
- htab->amt_destination_addr = NULL;
- htab->amt_entry_cnt = 0;
- htab->amt_max_entry_cnt = 0;
+ {
+ _bfd_elf_link_hash_table_free (abfd);
+ return NULL;
+ }
+ htab->etab.root.hash_table_free = elf32_avr_link_hash_table_free;
return &htab->etab.root;
}
-/* Free the derived linker hash table. */
-
-static void
-elf32_avr_link_hash_table_free (struct bfd_link_hash_table *btab)
-{
- struct elf32_avr_link_hash_table *htab
- = (struct elf32_avr_link_hash_table *) btab;
-
- /* Free the address mapping table. */
- if (htab->amt_stub_offsets != NULL)
- free (htab->amt_stub_offsets);
- if (htab->amt_destination_addr != NULL)
- free (htab->amt_destination_addr);
-
- bfd_hash_table_free (&htab->bstab);
- _bfd_generic_link_hash_table_free (btab);
-}
-
/* Calculates the effective distance of a pc relative jump/call. */
static int
unsigned int r_type;
r_type = ELF32_R_TYPE (dst->r_info);
- BFD_ASSERT (r_type < (unsigned int) R_AVR_max);
- cache_ptr->howto = &elf_avr_howto_table[r_type];
-}
-
-/* Look through the relocs for a section during the first phase.
- Since we don't do .gots or .plts, we just need to consider the
- virtual table relocs for gc. */
-
-static bfd_boolean
-elf32_avr_check_relocs (bfd *abfd,
- struct bfd_link_info *info,
- asection *sec,
- const Elf_Internal_Rela *relocs)
-{
- Elf_Internal_Shdr *symtab_hdr;
- struct elf_link_hash_entry **sym_hashes;
- const Elf_Internal_Rela *rel;
- const Elf_Internal_Rela *rel_end;
-
- if (info->relocatable)
- return TRUE;
-
- symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
- sym_hashes = elf_sym_hashes (abfd);
-
- rel_end = relocs + sec->reloc_count;
- for (rel = relocs; rel < rel_end; rel++)
+ if (r_type >= (unsigned int) R_AVR_max)
{
- struct elf_link_hash_entry *h;
- unsigned long r_symndx;
-
- r_symndx = ELF32_R_SYM (rel->r_info);
- if (r_symndx < symtab_hdr->sh_info)
- h = NULL;
- else
- {
- h = sym_hashes[r_symndx - symtab_hdr->sh_info];
- while (h->root.type == bfd_link_hash_indirect
- || h->root.type == bfd_link_hash_warning)
- h = (struct elf_link_hash_entry *) h->root.u.i.link;
- }
+ _bfd_error_handler (_("%B: invalid AVR reloc number: %d"), abfd, r_type);
+ r_type = 0;
}
-
- return TRUE;
+ cache_ptr->howto = &elf_avr_howto_table[r_type];
}
static bfd_boolean
return 0x020000;
}
+/* Perform a diff relocation. Nothing to do, as the difference value is already
+ written into the section's contents. */
+
+static bfd_reloc_status_type
+bfd_elf_avr_diff_reloc (bfd *abfd ATTRIBUTE_UNUSED,
+ arelent *reloc_entry ATTRIBUTE_UNUSED,
+ asymbol *symbol ATTRIBUTE_UNUSED,
+ void *data ATTRIBUTE_UNUSED,
+ asection *input_section ATTRIBUTE_UNUSED,
+ bfd *output_bfd ATTRIBUTE_UNUSED,
+ char **error_message ATTRIBUTE_UNUSED)
+{
+ return bfd_reloc_ok;
+}
+
+
/* Perform a single relocation. By default we use the standard BFD
routines, but a few relocs, we have to do them ourselves. */
bfd_put_16 (input_bfd, (bfd_vma) srel &0x00ffff, contents);
break;
+ case R_AVR_DIFF8:
+ case R_AVR_DIFF16:
+ case R_AVR_DIFF32:
+ /* Nothing to do here, as contents already contains the diff value. */
+ r = bfd_reloc_ok;
+ break;
+
+ case R_AVR_LDS_STS_16:
+ contents += rel->r_offset;
+ srel = (bfd_signed_vma) relocation + rel->r_addend;
+ if ((srel & 0xFFFF) < 0x40 || (srel & 0xFFFF) > 0xbf)
+ return bfd_reloc_outofrange;
+ srel = srel & 0x7f;
+ x = bfd_get_16 (input_bfd, contents);
+ x |= (srel & 0x0f) | ((srel & 0x30) << 5) | ((srel & 0x40) << 2);
+ bfd_put_16 (input_bfd, x, contents);
+ break;
+
+ case R_AVR_PORT6:
+ contents += rel->r_offset;
+ srel = (bfd_signed_vma) relocation + rel->r_addend;
+ if ((srel & 0xffff) > 0x3f)
+ return bfd_reloc_outofrange;
+ x = bfd_get_16 (input_bfd, contents);
+ x = (x & 0xf9f0) | ((srel & 0x30) << 5) | (srel & 0x0f);
+ bfd_put_16 (input_bfd, x, contents);
+ break;
+
+ case R_AVR_PORT5:
+ contents += rel->r_offset;
+ srel = (bfd_signed_vma) relocation + rel->r_addend;
+ if ((srel & 0xffff) > 0x1f)
+ return bfd_reloc_outofrange;
+ x = bfd_get_16 (input_bfd, contents);
+ x = (x & 0xff07) | ((srel & 0x1f) << 3);
+ bfd_put_16 (input_bfd, x, contents);
+ break;
+
default:
r = _bfd_final_link_relocate (howto, input_bfd, input_section,
contents, rel->r_offset,
Elf_Internal_Rela * relend;
struct elf32_avr_link_hash_table * htab = avr_link_hash_table (info);
+ if (htab == NULL)
+ return FALSE;
+
symtab_hdr = & elf_tdata (input_bfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (input_bfd);
relend = relocs + input_section->reloc_count;
r_type = ELF32_R_TYPE (rel->r_info);
r_symndx = ELF32_R_SYM (rel->r_info);
- howto = elf_avr_howto_table + ELF32_R_TYPE (rel->r_info);
+ howto = elf_avr_howto_table + r_type;
h = NULL;
sym = NULL;
sec = NULL;
}
else
{
- bfd_boolean unresolved_reloc, warned;
+ bfd_boolean unresolved_reloc, warned, ignored;
RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
r_symndx, symtab_hdr, sym_hashes,
h, sec, relocation,
- unresolved_reloc, warned);
+ unresolved_reloc, warned, ignored);
name = h->root.root.string;
}
- if (sec != NULL && elf_discarded_section (sec))
- {
- /* For relocs against symbols from removed linkonce sections,
- or sections discarded by a linker script, we just want the
- section contents zeroed. Avoid any special processing. */
- _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
- rel->r_info = 0;
- rel->r_addend = 0;
- continue;
- }
+ if (sec != NULL && discarded_section (sec))
+ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
+ rel, 1, relend, howto, 0, contents);
if (info->relocatable)
continue;
case bfd_mach_avr6:
val = E_AVR_MACH_AVR6;
break;
+
+ case bfd_mach_avrxmega1:
+ val = E_AVR_MACH_XMEGA1;
+ break;
+
+ case bfd_mach_avrxmega2:
+ val = E_AVR_MACH_XMEGA2;
+ break;
+
+ case bfd_mach_avrxmega3:
+ val = E_AVR_MACH_XMEGA3;
+ break;
+
+ case bfd_mach_avrxmega4:
+ val = E_AVR_MACH_XMEGA4;
+ break;
+
+ case bfd_mach_avrxmega5:
+ val = E_AVR_MACH_XMEGA5;
+ break;
+
+ case bfd_mach_avrxmega6:
+ val = E_AVR_MACH_XMEGA6;
+ break;
+
+ case bfd_mach_avrxmega7:
+ val = E_AVR_MACH_XMEGA7;
+ break;
+
+ case bfd_mach_avrtiny:
+ val = E_AVR_MACH_AVRTINY;
+ break;
}
elf_elfheader (abfd)->e_machine = EM_AVR;
elf_elfheader (abfd)->e_flags &= ~ EF_AVR_MACH;
elf_elfheader (abfd)->e_flags |= val;
- elf_elfheader (abfd)->e_flags |= EF_AVR_LINKRELAX_PREPARED;
}
/* Set the right machine number. */
case E_AVR_MACH_AVR6:
e_set = bfd_mach_avr6;
break;
+
+ case E_AVR_MACH_XMEGA1:
+ e_set = bfd_mach_avrxmega1;
+ break;
+
+ case E_AVR_MACH_XMEGA2:
+ e_set = bfd_mach_avrxmega2;
+ break;
+
+ case E_AVR_MACH_XMEGA3:
+ e_set = bfd_mach_avrxmega3;
+ break;
+
+ case E_AVR_MACH_XMEGA4:
+ e_set = bfd_mach_avrxmega4;
+ break;
+
+ case E_AVR_MACH_XMEGA5:
+ e_set = bfd_mach_avrxmega5;
+ break;
+
+ case E_AVR_MACH_XMEGA6:
+ e_set = bfd_mach_avrxmega6;
+ break;
+
+ case E_AVR_MACH_XMEGA7:
+ e_set = bfd_mach_avrxmega7;
+ break;
+
+ case E_AVR_MACH_AVRTINY:
+ e_set = bfd_mach_avrtiny;
+ break;
}
}
return bfd_default_set_arch_mach (abfd, bfd_arch_avr,
e_set);
}
+/* Returns whether the relocation type passed is a diff reloc. */
+
+static bfd_boolean
+elf32_avr_is_diff_reloc (Elf_Internal_Rela *irel)
+{
+ return (ELF32_R_TYPE (irel->r_info) == R_AVR_DIFF8
+ ||ELF32_R_TYPE (irel->r_info) == R_AVR_DIFF16
+ || ELF32_R_TYPE (irel->r_info) == R_AVR_DIFF32);
+}
+
+/* Reduce the diff value written in the section by count if the shrinked
+ insn address happens to fall between the two symbols for which this
+ diff reloc was emitted. */
+
+static void
+elf32_avr_adjust_diff_reloc_value (bfd *abfd,
+ struct bfd_section *isec,
+ Elf_Internal_Rela *irel,
+ bfd_vma symval,
+ bfd_vma shrinked_insn_address,
+ int count)
+{
+ unsigned char *reloc_contents = NULL;
+ unsigned char *isec_contents = elf_section_data (isec)->this_hdr.contents;
+ if (isec_contents == NULL)
+ {
+ if (! bfd_malloc_and_get_section (abfd, isec, &isec_contents))
+ return;
+
+ elf_section_data (isec)->this_hdr.contents = isec_contents;
+ }
+
+ reloc_contents = isec_contents + irel->r_offset;
+
+ /* Read value written in object file. */
+ bfd_vma x = 0;
+ switch (ELF32_R_TYPE (irel->r_info))
+ {
+ case R_AVR_DIFF8:
+ {
+ x = *reloc_contents;
+ break;
+ }
+ case R_AVR_DIFF16:
+ {
+ x = bfd_get_16 (abfd, reloc_contents);
+ break;
+ }
+ case R_AVR_DIFF32:
+ {
+ x = bfd_get_32 (abfd, reloc_contents);
+ break;
+ }
+ default:
+ {
+ BFD_FAIL();
+ }
+ }
+
+ /* For a diff reloc sym1 - sym2 the diff at assembly time (x) is written
+ into the object file at the reloc offset. sym2's logical value is
+ symval (<start_of_section>) + reloc addend. Compute the start and end
+ addresses and check if the shrinked insn falls between sym1 and sym2. */
+
+ bfd_vma end_address = symval + irel->r_addend;
+ bfd_vma start_address = end_address - x;
+
+ /* Reduce the diff value by count bytes and write it back into section
+ contents. */
+
+ if (shrinked_insn_address >= start_address
+ && shrinked_insn_address <= end_address)
+ {
+ switch (ELF32_R_TYPE (irel->r_info))
+ {
+ case R_AVR_DIFF8:
+ {
+ *reloc_contents = (x - count);
+ break;
+ }
+ case R_AVR_DIFF16:
+ {
+ bfd_put_16 (abfd, (x - count) & 0xFFFF, reloc_contents);
+ break;
+ }
+ case R_AVR_DIFF32:
+ {
+ bfd_put_32 (abfd, (x - count) & 0xFFFFFFFF, reloc_contents);
+ break;
+ }
+ default:
+ {
+ BFD_FAIL();
+ }
+ }
+
+ }
+}
/* Delete some bytes from a section while changing the size of an instruction.
The parameter "addr" denotes the section-relative offset pointing just
unsigned int sec_shndx;
bfd_byte *contents;
Elf_Internal_Rela *irel, *irelend;
- Elf_Internal_Rela *irelalign;
Elf_Internal_Sym *isym;
Elf_Internal_Sym *isymbuf = NULL;
bfd_vma toaddr;
struct elf_link_hash_entry **sym_hashes;
struct elf_link_hash_entry **end_hashes;
unsigned int symcount;
+ struct avr_relax_info *relax_info;
+ struct avr_property_record *prop_record = NULL;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
contents = elf_section_data (sec)->this_hdr.contents;
+ relax_info = get_avr_relax_info (sec);
- /* The deletion must stop at the next ALIGN reloc for an aligment
- power larger than the number of bytes we are deleting. */
-
- irelalign = NULL;
toaddr = sec->size;
+ if (relax_info->records.count > 0)
+ {
+ /* There should be no property record within the range of deleted
+ bytes, however, there might be a property record for ADDR, this is
+ how we handle alignment directives.
+ Find the next (if any) property record after the deleted bytes. */
+ unsigned int i;
+
+ for (i = 0; i < relax_info->records.count; ++i)
+ {
+ bfd_vma offset = relax_info->records.items [i].offset;
+
+ BFD_ASSERT (offset <= addr || offset >= (addr + count));
+ if (offset >= (addr + count))
+ {
+ prop_record = &relax_info->records.items [i];
+ toaddr = offset;
+ break;
+ }
+ }
+ }
+
irel = elf_section_data (sec)->relocs;
irelend = irel + sec->reloc_count;
if (toaddr - addr - count > 0)
memmove (contents + addr, contents + addr + count,
(size_t) (toaddr - addr - count));
- sec->size -= count;
+ if (prop_record == NULL)
+ sec->size -= count;
+ else
+ {
+ /* Use the property record to fill in the bytes we've opened up. */
+ int fill = 0;
+ switch (prop_record->type)
+ {
+ case RECORD_ORG_AND_FILL:
+ fill = prop_record->data.org.fill;
+ /* Fall through. */
+ case RECORD_ORG:
+ break;
+ case RECORD_ALIGN_AND_FILL:
+ fill = prop_record->data.align.fill;
+ /* Fall through. */
+ case RECORD_ALIGN:
+ prop_record->data.align.preceding_deleted += count;
+ break;
+ };
+ memset (contents + toaddr - count, fill, count);
+
+ /* Adjust the TOADDR to avoid moving symbols located at the address
+ of the property record, which has not moved. */
+ toaddr -= count;
+ }
/* Adjust all the reloc addresses. */
for (irel = elf_section_data (sec)->relocs; irel < irelend; irel++)
{
bfd_vma old_reloc_address;
- bfd_vma shrinked_insn_address;
old_reloc_address = (sec->output_section->vma
+ sec->output_offset + irel->r_offset);
- shrinked_insn_address = (sec->output_section->vma
- + sec->output_offset + addr - count);
/* Get the new reloc address. */
if ((irel->r_offset > addr
bfd_vma symval;
bfd_vma shrinked_insn_address;
+ if (isec->reloc_count == 0)
+ continue;
+
shrinked_insn_address = (sec->output_section->vma
+ sec->output_offset + addr - count);
- irelend = elf_section_data (isec)->relocs + isec->reloc_count;
- for (irel = elf_section_data (isec)->relocs;
+ irel = elf_section_data (isec)->relocs;
+ /* PR 12161: Read in the relocs for this section if necessary. */
+ if (irel == NULL)
+ irel = _bfd_elf_link_read_relocs (abfd, isec, NULL, NULL, TRUE);
+
+ for (irelend = irel + isec->reloc_count;
irel < irelend;
irel++)
{
if (symval <= shrinked_insn_address
&& (symval + irel->r_addend) > shrinked_insn_address)
{
+ if (elf32_avr_is_diff_reloc (irel))
+ {
+ elf32_avr_adjust_diff_reloc_value (abfd, isec, irel,
+ symval,
+ shrinked_insn_address,
+ count);
+ }
+
irel->r_addend -= count;
if (debug_relax)
isymend = isym + symtab_hdr->sh_info;
for (; isym < isymend; isym++)
{
- if (isym->st_shndx == sec_shndx
- && isym->st_value > addr
- && isym->st_value < toaddr)
- isym->st_value -= count;
+ if (isym->st_shndx == sec_shndx)
+ {
+ if (isym->st_value > addr
+ && isym->st_value <= toaddr)
+ isym->st_value -= count;
+
+ if (isym->st_value <= addr
+ && isym->st_value + isym->st_size > addr)
+ {
+ /* If this assert fires then we have a symbol that ends
+ part way through an instruction. Does that make
+ sense? */
+ BFD_ASSERT (isym->st_value + isym->st_size >= addr + count);
+ isym->st_size -= count;
+ }
+ }
}
}
struct elf_link_hash_entry *sym_hash = *sym_hashes;
if ((sym_hash->root.type == bfd_link_hash_defined
|| sym_hash->root.type == bfd_link_hash_defweak)
- && sym_hash->root.u.def.section == sec
- && sym_hash->root.u.def.value > addr
- && sym_hash->root.u.def.value < toaddr)
+ && sym_hash->root.u.def.section == sec)
{
- sym_hash->root.u.def.value -= count;
+ if (sym_hash->root.u.def.value > addr
+ && sym_hash->root.u.def.value <= toaddr)
+ sym_hash->root.u.def.value -= count;
+
+ if (sym_hash->root.u.def.value <= addr
+ && (sym_hash->root.u.def.value + sym_hash->size > addr))
+ {
+ /* If this assert fires then we have a symbol that ends
+ part way through an instruction. Does that make
+ sense? */
+ BFD_ASSERT (sym_hash->root.u.def.value + sym_hash->size
+ >= addr + count);
+ sym_hash->size -= count;
+ }
}
}
return TRUE;
}
+static Elf_Internal_Sym *
+retrieve_local_syms (bfd *input_bfd)
+{
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Sym *isymbuf;
+ size_t locsymcount;
+
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ locsymcount = symtab_hdr->sh_info;
+
+ isymbuf = (Elf_Internal_Sym *) symtab_hdr->contents;
+ if (isymbuf == NULL && locsymcount != 0)
+ isymbuf = bfd_elf_get_elf_syms (input_bfd, symtab_hdr, locsymcount, 0,
+ NULL, NULL, NULL);
+
+ /* Save the symbols for this input file so they won't be read again. */
+ if (isymbuf && isymbuf != (Elf_Internal_Sym *) symtab_hdr->contents)
+ symtab_hdr->contents = (unsigned char *) isymbuf;
+
+ return isymbuf;
+}
+
+/* Get the input section for a given symbol index.
+ If the symbol is:
+ . a section symbol, return the section;
+ . a common symbol, return the common section;
+ . an undefined symbol, return the undefined section;
+ . an indirect symbol, follow the links;
+ . an absolute value, return the absolute section. */
+
+static asection *
+get_elf_r_symndx_section (bfd *abfd, unsigned long r_symndx)
+{
+ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
+ asection *target_sec = NULL;
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ Elf_Internal_Sym *isymbuf;
+ unsigned int section_index;
+
+ isymbuf = retrieve_local_syms (abfd);
+ section_index = isymbuf[r_symndx].st_shndx;
+
+ if (section_index == SHN_UNDEF)
+ target_sec = bfd_und_section_ptr;
+ else if (section_index == SHN_ABS)
+ target_sec = bfd_abs_section_ptr;
+ else if (section_index == SHN_COMMON)
+ target_sec = bfd_com_section_ptr;
+ else
+ target_sec = bfd_section_from_elf_index (abfd, section_index);
+ }
+ else
+ {
+ unsigned long indx = r_symndx - symtab_hdr->sh_info;
+ struct elf_link_hash_entry *h = elf_sym_hashes (abfd)[indx];
+
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+
+ switch (h->root.type)
+ {
+ case bfd_link_hash_defined:
+ case bfd_link_hash_defweak:
+ target_sec = h->root.u.def.section;
+ break;
+ case bfd_link_hash_common:
+ target_sec = bfd_com_section_ptr;
+ break;
+ case bfd_link_hash_undefined:
+ case bfd_link_hash_undefweak:
+ target_sec = bfd_und_section_ptr;
+ break;
+ default: /* New indirect warning. */
+ target_sec = bfd_und_section_ptr;
+ break;
+ }
+ }
+ return target_sec;
+}
+
+/* Get the section-relative offset for a symbol number. */
+
+static bfd_vma
+get_elf_r_symndx_offset (bfd *abfd, unsigned long r_symndx)
+{
+ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
+ bfd_vma offset = 0;
+
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ Elf_Internal_Sym *isymbuf;
+ isymbuf = retrieve_local_syms (abfd);
+ offset = isymbuf[r_symndx].st_value;
+ }
+ else
+ {
+ unsigned long indx = r_symndx - symtab_hdr->sh_info;
+ struct elf_link_hash_entry *h =
+ elf_sym_hashes (abfd)[indx];
+
+ while (h->root.type == bfd_link_hash_indirect
+ || h->root.type == bfd_link_hash_warning)
+ h = (struct elf_link_hash_entry *) h->root.u.i.link;
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ offset = h->root.u.def.value;
+ }
+ return offset;
+}
+
+/* Iterate over the property records in R_LIST, and copy each record into
+ the list of records within the relaxation information for the section to
+ which the record applies. */
+
+static void
+avr_elf32_assign_records_to_sections (struct avr_property_record_list *r_list)
+{
+ unsigned int i;
+
+ for (i = 0; i < r_list->record_count; ++i)
+ {
+ struct avr_relax_info *relax_info;
+
+ relax_info = get_avr_relax_info (r_list->records [i].section);
+ BFD_ASSERT (relax_info != NULL);
+
+ if (relax_info->records.count
+ == relax_info->records.allocated)
+ {
+ /* Allocate more space. */
+ bfd_size_type size;
+
+ relax_info->records.allocated += 10;
+ size = (sizeof (struct avr_property_record)
+ * relax_info->records.allocated);
+ relax_info->records.items
+ = bfd_realloc (relax_info->records.items, size);
+ }
+
+ memcpy (&relax_info->records.items [relax_info->records.count],
+ &r_list->records [i],
+ sizeof (struct avr_property_record));
+ relax_info->records.count++;
+ }
+}
+
+/* Compare two STRUCT AVR_PROPERTY_RECORD in AP and BP, used as the
+ ordering callback from QSORT. */
+
+static int
+avr_property_record_compare (const void *ap, const void *bp)
+{
+ const struct avr_property_record *a
+ = (struct avr_property_record *) ap;
+ const struct avr_property_record *b
+ = (struct avr_property_record *) bp;
+
+ if (a->offset != b->offset)
+ return (a->offset - b->offset);
+
+ if (a->section != b->section)
+ return (bfd_get_section_vma (a->section->owner, a->section)
+ - bfd_get_section_vma (b->section->owner, b->section));
+
+ return (a->type - b->type);
+}
+
+/* Load all of the avr property sections from all of the bfd objects
+ referenced from LINK_INFO. All of the records within each property
+ section are assigned to the STRUCT AVR_RELAX_INFO within the section
+ specific data of the appropriate section. */
+
+static void
+avr_load_all_property_sections (struct bfd_link_info *link_info)
+{
+ bfd *abfd;
+ asection *sec;
+
+ /* Initialize the per-section relaxation info. */
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
+ for (sec = abfd->sections; sec != NULL; sec = sec->next)
+ {
+ init_avr_relax_info (sec);
+ }
+
+ /* Load the descriptor tables from .avr.prop sections. */
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
+ {
+ struct avr_property_record_list *r_list;
+
+ r_list = avr_elf32_load_property_records (abfd);
+ if (r_list != NULL)
+ avr_elf32_assign_records_to_sections (r_list);
+
+ free (r_list);
+ }
+
+ /* Now, for every section, ensure that the descriptor list in the
+ relaxation data is sorted by ascending offset within the section. */
+ for (abfd = link_info->input_bfds; abfd != NULL; abfd = abfd->link.next)
+ for (sec = abfd->sections; sec != NULL; sec = sec->next)
+ {
+ struct avr_relax_info *relax_info = get_avr_relax_info (sec);
+ if (relax_info && relax_info->records.count > 0)
+ {
+ unsigned int i;
+
+ qsort (relax_info->records.items,
+ relax_info->records.count,
+ sizeof (struct avr_property_record),
+ avr_property_record_compare);
+
+ /* For debug purposes, list all the descriptors. */
+ for (i = 0; i < relax_info->records.count; ++i)
+ {
+ switch (relax_info->records.items [i].type)
+ {
+ case RECORD_ORG:
+ break;
+ case RECORD_ORG_AND_FILL:
+ break;
+ case RECORD_ALIGN:
+ break;
+ case RECORD_ALIGN_AND_FILL:
+ break;
+ };
+ }
+ }
+ }
+}
+
/* This function handles relaxing for the avr.
Many important relaxing opportunities within functions are already
realized by the compiler itself.
Elf_Internal_Rela *irel, *irelend;
bfd_byte *contents = NULL;
Elf_Internal_Sym *isymbuf = NULL;
- static asection *last_input_section = NULL;
- static Elf_Internal_Rela *last_reloc = NULL;
struct elf32_avr_link_hash_table *htab;
+ static bfd_boolean relaxation_initialised = FALSE;
+
+ if (!relaxation_initialised)
+ {
+ relaxation_initialised = TRUE;
+
+ /* Load entries from the .avr.prop sections. */
+ avr_load_all_property_sections (link_info);
+ }
+
+ /* If 'shrinkable' is FALSE, do not shrink by deleting bytes while
+ relaxing. Such shrinking can cause issues for the sections such
+ as .vectors and .jumptables. Instead the unused bytes should be
+ filled with nop instructions. */
+ bfd_boolean shrinkable = TRUE;
+
+ if (!strcmp (sec->name,".vectors")
+ || !strcmp (sec->name,".jumptables"))
+ shrinkable = FALSE;
if (link_info->relocatable)
(*link_info->callbacks->einfo)
if (internal_relocs == NULL)
goto error_return;
- if (sec != last_input_section)
- last_reloc = NULL;
-
- last_input_section = sec;
-
/* Walk through the relocs looking for relaxing opportunities. */
irelend = internal_relocs + sec->reloc_count;
for (irel = internal_relocs; irel < irelend; irel++)
bfd_vma symval;
if ( ELF32_R_TYPE (irel->r_info) != R_AVR_13_PCREL
- && ELF32_R_TYPE (irel->r_info) != R_AVR_7_PCREL
- && ELF32_R_TYPE (irel->r_info) != R_AVR_CALL)
+ && ELF32_R_TYPE (irel->r_info) != R_AVR_7_PCREL
+ && ELF32_R_TYPE (irel->r_info) != R_AVR_CALL)
continue;
/* Get the section contents if we haven't done so already. */
/* Compute the distance from this insn to the branch target. */
gap = value - dot;
- /* If the distance is within -4094..+4098 inclusive, then we can
- relax this jump/call. +4098 because the call/jump target
- will be closer after the relaxation. */
- if ((int) gap >= -4094 && (int) gap <= 4098)
+ /* Check if the gap falls in the range that can be accommodated
+ in 13bits signed (It is 12bits when encoded, as we deal with
+ word addressing). */
+ if (!shrinkable && ((int) gap >= -4096 && (int) gap <= 4095))
+ distance_short_enough = 1;
+ /* If shrinkable, then we can check for a range of distance which
+ is two bytes farther on both the directions because the call
+ or jump target will be closer by two bytes after the
+ relaxation. */
+ else if (shrinkable && ((int) gap >= -4094 && (int) gap <= 4097))
distance_short_enough = 1;
/* Here we handle the wrap-around case. E.g. for a 16k device
irel->r_info = ELF32_R_INFO (ELF32_R_SYM (irel->r_info),
R_AVR_13_PCREL);
- /* Check for the vector section. There we don't want to
- modify the ordering! */
-
- if (!strcmp (sec->name,".vectors")
- || !strcmp (sec->name,".jumptables"))
+ /* We should not modify the ordering if 'shrinkable' is
+ FALSE. */
+ if (!shrinkable)
{
/* Let's insert a nop. */
bfd_put_8 (abfd, 0x00, contents + irel->r_offset + 2);
if ((0x95 == next_insn_msb) && (0x08 == next_insn_lsb))
{
/* The next insn is a ret. We possibly could delete
- this ret. First we need to check for preceeding
+ this ret. First we need to check for preceding
sbis/sbic/sbrs or cpse "skip" instructions. */
- int there_is_preceeding_non_skip_insn = 1;
+ int there_is_preceding_non_skip_insn = 1;
bfd_vma address_of_ret;
address_of_ret = dot + insn_size;
printf ("found jmp / ret sequence at address 0x%x\n",
(int) dot);
- /* We have to make sure that there is a preceeding insn. */
+ /* We have to make sure that there is a preceding insn. */
if (irel->r_offset >= 2)
{
- unsigned char preceeding_msb;
- unsigned char preceeding_lsb;
- preceeding_msb =
+ unsigned char preceding_msb;
+ unsigned char preceding_lsb;
+
+ preceding_msb =
bfd_get_8 (abfd, contents + irel->r_offset - 1);
- preceeding_lsb =
+ preceding_lsb =
bfd_get_8 (abfd, contents + irel->r_offset - 2);
/* sbic. */
- if (0x99 == preceeding_msb)
- there_is_preceeding_non_skip_insn = 0;
+ if (0x99 == preceding_msb)
+ there_is_preceding_non_skip_insn = 0;
/* sbis. */
- if (0x9b == preceeding_msb)
- there_is_preceeding_non_skip_insn = 0;
+ if (0x9b == preceding_msb)
+ there_is_preceding_non_skip_insn = 0;
/* sbrc */
- if ((0xfc == (preceeding_msb & 0xfe)
- && (0x00 == (preceeding_lsb & 0x08))))
- there_is_preceeding_non_skip_insn = 0;
+ if ((0xfc == (preceding_msb & 0xfe)
+ && (0x00 == (preceding_lsb & 0x08))))
+ there_is_preceding_non_skip_insn = 0;
/* sbrs */
- if ((0xfe == (preceeding_msb & 0xfe)
- && (0x00 == (preceeding_lsb & 0x08))))
- there_is_preceeding_non_skip_insn = 0;
+ if ((0xfe == (preceding_msb & 0xfe)
+ && (0x00 == (preceding_lsb & 0x08))))
+ there_is_preceding_non_skip_insn = 0;
/* cpse */
- if (0x10 == (preceeding_msb & 0xfc))
- there_is_preceeding_non_skip_insn = 0;
+ if (0x10 == (preceding_msb & 0xfc))
+ there_is_preceding_non_skip_insn = 0;
- if (there_is_preceeding_non_skip_insn == 0)
+ if (there_is_preceding_non_skip_insn == 0)
if (debug_relax)
- printf ("preceeding skip insn prevents deletion of"
- " ret insn at addr 0x%x in section %s\n",
+ printf ("preceding skip insn prevents deletion of"
+ " ret insn at Addy 0x%x in section %s\n",
(int) dot + 2, sec->name);
}
else
{
/* There is no previous instruction. */
- there_is_preceeding_non_skip_insn = 0;
+ there_is_preceding_non_skip_insn = 0;
}
- if (there_is_preceeding_non_skip_insn)
+ if (there_is_preceding_non_skip_insn)
{
/* We now only have to make sure that there is no
local label defined at the address of the ret
irel->r_offset + insn_size;
Elf_Internal_Sym *isym, *isymend;
unsigned int sec_shndx;
+ struct bfd_section *isec;
sec_shndx =
_bfd_elf_section_from_bfd_section (abfd, sec);
}
}
}
+
/* Now we check for relocations pointing to ret. */
- {
- Elf_Internal_Rela *rel;
- Elf_Internal_Rela *relend;
+ for (isec = abfd->sections; isec && deleting_ret_is_safe; isec = isec->next)
+ {
+ Elf_Internal_Rela *rel;
+ Elf_Internal_Rela *relend;
- relend = elf_section_data (sec)->relocs
- + sec->reloc_count;
+ rel = elf_section_data (isec)->relocs;
+ if (rel == NULL)
+ rel = _bfd_elf_link_read_relocs (abfd, isec, NULL, NULL, TRUE);
- for (rel = elf_section_data (sec)->relocs;
- rel < relend; rel++)
- {
- bfd_vma reloc_target = 0;
+ relend = rel + isec->reloc_count;
- /* Read this BFD's local symbols if we haven't
- done so already. */
- if (isymbuf == NULL && symtab_hdr->sh_info != 0)
- {
- isymbuf = (Elf_Internal_Sym *)
- symtab_hdr->contents;
- if (isymbuf == NULL)
- isymbuf = bfd_elf_get_elf_syms
- (abfd,
- symtab_hdr,
- symtab_hdr->sh_info, 0,
- NULL, NULL, NULL);
- if (isymbuf == NULL)
+ for (; rel && rel < relend; rel++)
+ {
+ bfd_vma reloc_target = 0;
+
+ /* Read this BFD's local symbols if we haven't
+ done so already. */
+ if (isymbuf == NULL && symtab_hdr->sh_info != 0)
+ {
+ isymbuf = (Elf_Internal_Sym *)
+ symtab_hdr->contents;
+ if (isymbuf == NULL)
+ isymbuf = bfd_elf_get_elf_syms
+ (abfd,
+ symtab_hdr,
+ symtab_hdr->sh_info, 0,
+ NULL, NULL, NULL);
+ if (isymbuf == NULL)
+ break;
+ }
+
+ /* Get the value of the symbol referred to
+ by the reloc. */
+ if (ELF32_R_SYM (rel->r_info)
+ < symtab_hdr->sh_info)
+ {
+ /* A local symbol. */
+ asection *sym_sec;
+
+ isym = isymbuf
+ + ELF32_R_SYM (rel->r_info);
+ sym_sec = bfd_section_from_elf_index
+ (abfd, isym->st_shndx);
+ symval = isym->st_value;
+
+ /* If the reloc is absolute, it will not
+ have a symbol or section associated
+ with it. */
+
+ if (sym_sec)
+ {
+ symval +=
+ sym_sec->output_section->vma
+ + sym_sec->output_offset;
+ reloc_target = symval + rel->r_addend;
+ }
+ else
+ {
+ reloc_target = symval + rel->r_addend;
+ /* Reference symbol is absolute. */
+ }
+ }
+ /* else ... reference symbol is extern. */
+
+ if (address_of_ret == reloc_target)
+ {
+ deleting_ret_is_safe = 0;
+ if (debug_relax)
+ printf ("ret from "
+ "rjmp/jmp ret sequence at address"
+ " 0x%x could not be deleted. ret"
+ " is target of a relocation.\n",
+ (int) address_of_ret);
break;
- }
-
- /* Get the value of the symbol referred to
- by the reloc. */
- if (ELF32_R_SYM (rel->r_info)
- < symtab_hdr->sh_info)
- {
- /* A local symbol. */
- asection *sym_sec;
-
- isym = isymbuf
- + ELF32_R_SYM (rel->r_info);
- sym_sec = bfd_section_from_elf_index
- (abfd, isym->st_shndx);
- symval = isym->st_value;
-
- /* If the reloc is absolute, it will not
- have a symbol or section associated
- with it. */
-
- if (sym_sec)
- {
- symval +=
- sym_sec->output_section->vma
- + sym_sec->output_offset;
- reloc_target = symval + rel->r_addend;
- }
- else
- {
- reloc_target = symval + rel->r_addend;
- /* Reference symbol is absolute. */
- }
- }
- /* else ... reference symbol is extern. */
-
- if (address_of_ret == reloc_target)
- {
- deleting_ret_is_safe = 0;
- if (debug_relax)
- printf ("ret from "
- "rjmp/jmp ret sequence at address"
- " 0x%x could not be deleted. ret"
- " is target of a relocation.\n",
- (int) address_of_ret);
- }
- }
- }
+ }
+ }
+ }
if (deleting_ret_is_safe)
{
break;
}
}
-
}
}
break;
}
}
+ if (!*again)
+ {
+ /* Look through all the property records in this section to see if
+ there's any alignment records that can be moved. */
+ struct avr_relax_info *relax_info;
+
+ relax_info = get_avr_relax_info (sec);
+ if (relax_info->records.count > 0)
+ {
+ unsigned int i;
+
+ for (i = 0; i < relax_info->records.count; ++i)
+ {
+ switch (relax_info->records.items [i].type)
+ {
+ case RECORD_ORG:
+ case RECORD_ORG_AND_FILL:
+ break;
+ case RECORD_ALIGN:
+ case RECORD_ALIGN_AND_FILL:
+ {
+ struct avr_property_record *record;
+ unsigned long bytes_to_align;
+ int count = 0;
+
+ /* Look for alignment directives that have had enough
+ bytes deleted before them, such that the directive
+ can be moved backwards and still maintain the
+ required alignment. */
+ record = &relax_info->records.items [i];
+ bytes_to_align
+ = (unsigned long) (1 << record->data.align.bytes);
+ while (record->data.align.preceding_deleted >=
+ bytes_to_align)
+ {
+ record->data.align.preceding_deleted
+ -= bytes_to_align;
+ count += bytes_to_align;
+ }
+
+ if (count > 0)
+ {
+ bfd_vma addr = record->offset;
+
+ /* We can delete COUNT bytes and this alignment
+ directive will still be correctly aligned.
+ First move the alignment directive, then delete
+ the bytes. */
+ record->offset -= count;
+ elf32_avr_relax_delete_bytes (abfd, sec,
+ addr - count,
+ count);
+ *again = TRUE;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
if (contents != NULL
&& elf_section_data (sec)->this_hdr.contents != contents)
{
static bfd_boolean
avr_mark_stub_not_to_be_necessary (struct bfd_hash_entry *bh,
- void *in_arg)
+ void *in_arg ATTRIBUTE_UNUSED)
{
struct elf32_avr_stub_hash_entry *hsh;
- struct elf32_avr_link_hash_table *htab;
- htab = in_arg;
hsh = avr_stub_hash_entry (bh);
hsh->is_actually_needed = FALSE;
asection *section;
asection **input_list, **list;
bfd_size_type amt;
- struct elf32_avr_link_hash_table *htab = avr_link_hash_table(info);
+ struct elf32_avr_link_hash_table *htab = avr_link_hash_table (info);
if (htab == NULL || htab->no_stubs)
return 0;
/* Count the number of input BFDs and find the top input section id. */
for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
input_bfd != NULL;
- input_bfd = input_bfd->link_next)
+ input_bfd = input_bfd->link.next)
{
bfd_count += 1;
for (section = input_bfd->sections;
export stubs. */
for (bfd_indx = 0;
input_bfd != NULL;
- input_bfd = input_bfd->link_next, bfd_indx++)
+ input_bfd = input_bfd->link.next, bfd_indx++)
{
Elf_Internal_Shdr *symtab_hdr;
bfd_hash_traverse (&htab->bstab, avr_mark_stub_not_to_be_necessary, htab);
for (input_bfd = info->input_bfds, bfd_indx = 0;
input_bfd != NULL;
- input_bfd = input_bfd->link_next, bfd_indx++)
+ input_bfd = input_bfd->link.next, bfd_indx++)
{
Elf_Internal_Shdr *symtab_hdr;
asection *section;
return TRUE;
}
+/* Callback used by QSORT to order relocations AP and BP. */
+
+static int
+internal_reloc_compare (const void *ap, const void *bp)
+{
+ const Elf_Internal_Rela *a = (const Elf_Internal_Rela *) ap;
+ const Elf_Internal_Rela *b = (const Elf_Internal_Rela *) bp;
+
+ if (a->r_offset != b->r_offset)
+ return (a->r_offset - b->r_offset);
+
+ /* We don't need to sort on these criteria for correctness,
+ but enforcing a more strict ordering prevents unstable qsort
+ from behaving differently with different implementations.
+ Without the code below we get correct but different results
+ on Solaris 2.7 and 2.8. We would like to always produce the
+ same results no matter the host. */
+
+ if (a->r_info != b->r_info)
+ return (a->r_info - b->r_info);
+
+ return (a->r_addend - b->r_addend);
+}
+
+/* Return true if ADDRESS is within the vma range of SECTION from ABFD. */
+
+static bfd_boolean
+avr_is_section_for_address (bfd *abfd, asection *section, bfd_vma address)
+{
+ bfd_vma vma;
+ bfd_size_type size;
+
+ vma = bfd_get_section_vma (abfd, section);
+ if (address < vma)
+ return FALSE;
+
+ size = section->size;
+ if (address >= vma + size)
+ return FALSE;
+
+ return TRUE;
+}
+
+/* Data structure used by AVR_FIND_SECTION_FOR_ADDRESS. */
+
+struct avr_find_section_data
+{
+ /* The address we're looking for. */
+ bfd_vma address;
+
+ /* The section we've found. */
+ asection *section;
+};
+
+/* Helper function to locate the section holding a certain virtual memory
+ address. This is called via bfd_map_over_sections. The DATA is an
+ instance of STRUCT AVR_FIND_SECTION_DATA, the address field of which
+ has been set to the address to search for, and the section field has
+ been set to NULL. If SECTION from ABFD contains ADDRESS then the
+ section field in DATA will be set to SECTION. As an optimisation, if
+ the section field is already non-null then this function does not
+ perform any checks, and just returns. */
+
+static void
+avr_find_section_for_address (bfd *abfd,
+ asection *section, void *data)
+{
+ struct avr_find_section_data *fs_data
+ = (struct avr_find_section_data *) data;
+
+ /* Return if already found. */
+ if (fs_data->section != NULL)
+ return;
+
+ /* If this section isn't part of the addressable code content, skip it. */
+ if ((bfd_get_section_flags (abfd, section) & SEC_ALLOC) == 0
+ && (bfd_get_section_flags (abfd, section) & SEC_CODE) == 0)
+ return;
+
+ if (avr_is_section_for_address (abfd, section, fs_data->address))
+ fs_data->section = section;
+}
+
+/* Load all of the property records from SEC, a section from ABFD. Return
+ a STRUCT AVR_PROPERTY_RECORD_LIST containing all the records. The
+ memory for the returned structure, and all of the records pointed too by
+ the structure are allocated with a single call to malloc, so, only the
+ pointer returned needs to be free'd. */
+
+static struct avr_property_record_list *
+avr_elf32_load_records_from_section (bfd *abfd, asection *sec)
+{
+ char *contents = NULL, *ptr;
+ bfd_size_type size, mem_size;
+ bfd_byte version, flags;
+ uint16_t record_count, i;
+ struct avr_property_record_list *r_list = NULL;
+ Elf_Internal_Rela *internal_relocs = NULL, *rel, *rel_end;
+ struct avr_find_section_data fs_data;
+
+ fs_data.section = NULL;
+
+ size = bfd_get_section_size (sec);
+ contents = bfd_malloc (size);
+ bfd_get_section_contents (abfd, sec, contents, 0, size);
+ ptr = contents;
+
+ /* Load the relocations for the '.avr.prop' section if there are any, and
+ sort them. */
+ internal_relocs = (_bfd_elf_link_read_relocs
+ (abfd, sec, NULL, NULL, FALSE));
+ if (internal_relocs)
+ qsort (internal_relocs, sec->reloc_count,
+ sizeof (Elf_Internal_Rela), internal_reloc_compare);
+
+ /* There is a header at the start of the property record section SEC, the
+ format of this header is:
+ uint8_t : version number
+ uint8_t : flags
+ uint16_t : record counter
+ */
+
+ /* Check we have at least got a headers worth of bytes. */
+ if (size < AVR_PROPERTY_SECTION_HEADER_SIZE)
+ goto load_failed;
+
+ version = *((bfd_byte *) ptr);
+ ptr++;
+ flags = *((bfd_byte *) ptr);
+ ptr++;
+ record_count = *((uint16_t *) ptr);
+ ptr+=2;
+ BFD_ASSERT (ptr - contents == AVR_PROPERTY_SECTION_HEADER_SIZE);
+
+ /* Now allocate space for the list structure, and all of the list
+ elements in a single block. */
+ mem_size = sizeof (struct avr_property_record_list)
+ + sizeof (struct avr_property_record) * record_count;
+ r_list = bfd_malloc (mem_size);
+ if (r_list == NULL)
+ goto load_failed;
+
+ r_list->version = version;
+ r_list->flags = flags;
+ r_list->section = sec;
+ r_list->record_count = record_count;
+ r_list->records = (struct avr_property_record *) (&r_list [1]);
+ size -= AVR_PROPERTY_SECTION_HEADER_SIZE;
+
+ /* Check that we understand the version number. There is only one
+ version number right now, anything else is an error. */
+ if (r_list->version != AVR_PROPERTY_RECORDS_VERSION)
+ goto load_failed;
+
+ rel = internal_relocs;
+ rel_end = rel + sec->reloc_count;
+ for (i = 0; i < record_count; ++i)
+ {
+ bfd_vma address;
+
+ /* Each entry is a 32-bit address, followed by a single byte type.
+ After that is the type specific data. We must take care to
+ ensure that we don't read beyond the end of the section data. */
+ if (size < 5)
+ goto load_failed;
+
+ r_list->records [i].section = NULL;
+ r_list->records [i].offset = 0;
+
+ if (rel)
+ {
+ /* The offset of the address within the .avr.prop section. */
+ size_t offset = ptr - contents;
+
+ while (rel < rel_end && rel->r_offset < offset)
+ ++rel;
+
+ if (rel == rel_end)
+ rel = NULL;
+ else if (rel->r_offset == offset)
+ {
+ /* Find section and section offset. */
+ unsigned long r_symndx;
+
+ asection * rel_sec;
+ bfd_vma sec_offset;
+
+ r_symndx = ELF32_R_SYM (rel->r_info);
+ rel_sec = get_elf_r_symndx_section (abfd, r_symndx);
+ sec_offset = get_elf_r_symndx_offset (abfd, r_symndx)
+ + rel->r_addend;
+
+ r_list->records [i].section = rel_sec;
+ r_list->records [i].offset = sec_offset;
+ }
+ }
+
+ address = *((uint32_t *) ptr);
+ ptr += 4;
+ size -= 4;
+
+ if (r_list->records [i].section == NULL)
+ {
+ /* Try to find section and offset from address. */
+ if (fs_data.section != NULL
+ && !avr_is_section_for_address (abfd, fs_data.section,
+ address))
+ fs_data.section = NULL;
+
+ if (fs_data.section == NULL)
+ {
+ fs_data.address = address;
+ bfd_map_over_sections (abfd, avr_find_section_for_address,
+ &fs_data);
+ }
+
+ if (fs_data.section == NULL)
+ {
+ fprintf (stderr, "Failed to find matching section.\n");
+ goto load_failed;
+ }
+
+ r_list->records [i].section = fs_data.section;
+ r_list->records [i].offset
+ = address - bfd_get_section_vma (abfd, fs_data.section);
+ }
+
+ r_list->records [i].type = *((bfd_byte *) ptr);
+ ptr += 1;
+ size -= 1;
+
+ switch (r_list->records [i].type)
+ {
+ case RECORD_ORG:
+ /* Nothing else to load. */
+ break;
+ case RECORD_ORG_AND_FILL:
+ /* Just a 4-byte fill to load. */
+ if (size < 4)
+ goto load_failed;
+ r_list->records [i].data.org.fill = *((uint32_t *) ptr);
+ ptr += 4;
+ size -= 4;
+ break;
+ case RECORD_ALIGN:
+ /* Just a 4-byte alignment to load. */
+ if (size < 4)
+ goto load_failed;
+ r_list->records [i].data.align.bytes = *((uint32_t *) ptr);
+ ptr += 4;
+ size -= 4;
+ /* Just initialise PRECEDING_DELETED field, this field is
+ used during linker relaxation. */
+ r_list->records [i].data.align.preceding_deleted = 0;
+ break;
+ case RECORD_ALIGN_AND_FILL:
+ /* A 4-byte alignment, and a 4-byte fill to load. */
+ if (size < 8)
+ goto load_failed;
+ r_list->records [i].data.align.bytes = *((uint32_t *) ptr);
+ ptr += 4;
+ r_list->records [i].data.align.fill = *((uint32_t *) ptr);
+ ptr += 4;
+ size -= 8;
+ /* Just initialise PRECEDING_DELETED field, this field is
+ used during linker relaxation. */
+ r_list->records [i].data.align.preceding_deleted = 0;
+ break;
+ default:
+ goto load_failed;
+ }
+ }
+
+ free (contents);
+ free (internal_relocs);
+ return r_list;
+
+ load_failed:
+ free (internal_relocs);
+ free (contents);
+ free (r_list);
+ return NULL;
+}
+
+/* Load all of the property records from ABFD. See
+ AVR_ELF32_LOAD_RECORDS_FROM_SECTION for details of the return value. */
+
+struct avr_property_record_list *
+avr_elf32_load_property_records (bfd *abfd)
+{
+ asection *sec;
+
+ /* Find the '.avr.prop' section and load the contents into memory. */
+ sec = bfd_get_section_by_name (abfd, AVR_PROPERTY_RECORD_SECTION_NAME);
+ if (sec == NULL)
+ return NULL;
+ return avr_elf32_load_records_from_section (abfd, sec);
+}
+
+const char *
+avr_elf32_property_record_name (struct avr_property_record *rec)
+{
+ const char *str;
+
+ switch (rec->type)
+ {
+ case RECORD_ORG:
+ str = "ORG";
+ break;
+ case RECORD_ORG_AND_FILL:
+ str = "ORG+FILL";
+ break;
+ case RECORD_ALIGN:
+ str = "ALIGN";
+ break;
+ case RECORD_ALIGN_AND_FILL:
+ str = "ALIGN+FILL";
+ break;
+ default:
+ str = "unknown";
+ }
+
+ return str;
+}
+
+
#define ELF_ARCH bfd_arch_avr
+#define ELF_TARGET_ID AVR_ELF_DATA
#define ELF_MACHINE_CODE EM_AVR
#define ELF_MACHINE_ALT1 EM_AVR_OLD
#define ELF_MAXPAGESIZE 1
-#define TARGET_LITTLE_SYM bfd_elf32_avr_vec
+#define TARGET_LITTLE_SYM avr_elf32_vec
#define TARGET_LITTLE_NAME "elf32-avr"
#define bfd_elf32_bfd_link_hash_table_create elf32_avr_link_hash_table_create
-#define bfd_elf32_bfd_link_hash_table_free elf32_avr_link_hash_table_free
#define elf_info_to_howto avr_info_to_howto_rela
#define elf_info_to_howto_rel NULL
#define elf_backend_relocate_section elf32_avr_relocate_section
-#define elf_backend_check_relocs elf32_avr_check_relocs
#define elf_backend_can_gc_sections 1
#define elf_backend_rela_normal 1
#define elf_backend_final_write_processing \
#define bfd_elf32_bfd_relax_section elf32_avr_relax_section
#define bfd_elf32_bfd_get_relocated_section_contents \
elf32_avr_get_relocated_section_contents
+#define bfd_elf32_new_section_hook elf_avr_new_section_hook
#include "elf32-target.h"