// arm.cc -- arm target support for gold.
-// Copyright 2009, 2010 Free Software Foundation, Inc.
+// Copyright (C) 2009-2017 Free Software Foundation, Inc.
// This file also contains borrowed and adapted code from
#include "gc.h"
#include "attributes.h"
#include "arm-reloc-property.h"
+#include "nacl.h"
namespace
{
template<bool big_endian>
class Output_data_plt_arm;
+template<bool big_endian>
+class Output_data_plt_arm_short;
+
+template<bool big_endian>
+class Output_data_plt_arm_long;
+
template<bool big_endian>
class Stub_table;
//
// This is a very simple port of gold for ARM-EABI. It is intended for
// supporting Android only for the time being.
-//
+//
// TODOs:
// - Implement all static relocation types documented in arm-reloc.def.
// - Make PLTs more flexible for different architecture features like
// Ideally we would like to avoid using global variables but this is used
// very in many places and sometimes in loops. If we use a function
-// returning a static instance of Arm_reloc_property_table, it will very
+// returning a static instance of Arm_reloc_property_table, it will be very
// slow in an threaded environment since the static instance needs to be
// locked. The pointer is below initialized in the
// Target::do_select_as_default_target() hook so that we do not spend time
// compilation time and generate a representation of it in PODs only. That
// way we can avoid initialization when the linker starts.
-Arm_reloc_property_table *arm_reloc_property_table = NULL;
+Arm_reloc_property_table* arm_reloc_property_table = NULL;
// Instruction template class. This class is similar to the insn_sequence
// struct in bfd/elf32-arm.c.
enum Type
{
THUMB16_TYPE = 1,
- // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
+ // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
// templates with class-specific semantics. Currently this is used
// only by the Cortex_a8_stub class for handling condition codes in
// conditional branches.
static const Insn_template
thumb16_insn(uint32_t data)
- { return Insn_template(data, THUMB16_TYPE, elfcpp::R_ARM_NONE, 0); }
+ { return Insn_template(data, THUMB16_TYPE, elfcpp::R_ARM_NONE, 0); }
// A Thumb conditional branch, in which the proper condition is inserted
// when we build the stub.
static const Insn_template
thumb16_bcond_insn(uint32_t data)
- { return Insn_template(data, THUMB16_SPECIAL_TYPE, elfcpp::R_ARM_NONE, 1); }
+ { return Insn_template(data, THUMB16_SPECIAL_TYPE, elfcpp::R_ARM_NONE, 1); }
static const Insn_template
thumb32_insn(uint32_t data)
- { return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_NONE, 0); }
+ { return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_NONE, 0); }
static const Insn_template
thumb32_b_insn(uint32_t data, int reloc_addend)
{
return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_THM_JUMP24,
reloc_addend);
- }
+ }
static const Insn_template
arm_insn(uint32_t data)
static const Insn_template
data_word(unsigned data, unsigned int r_type, int reloc_addend)
- { return Insn_template(data, DATA_TYPE, r_type, reloc_addend); }
+ { return Insn_template(data, DATA_TYPE, r_type, reloc_addend); }
// Accessors. This class is used for read-only objects so no modifiers
// are provided.
arm_stub_cortex_a8_first = arm_stub_a8_veneer_b_cond,
// Last Cortex-A8 stub type.
arm_stub_cortex_a8_last = arm_stub_a8_veneer_blx,
-
+
// Last stub type.
arm_stub_type_last = arm_stub_v4_veneer_bx
} Stub_type;
unsigned
alignment() const
{ return this->alignment_; }
-
+
// Return whether entry point is in thumb mode.
bool
entry_in_thumb_mode() const
// as possible.
Stub_template(const Stub_template&);
Stub_template& operator=(const Stub_template&);
-
+
// Stub type.
Stub_type type_;
// Points to an array of Insn_templates.
bool entry_in_thumb_mode_;
// A table of reloc instruction indices and offsets. We can find these by
// looking at the instruction templates but we pre-compute and then stash
- // them here for speed.
+ // them here for speed.
std::vector<Reloc> relocs_;
};
void
set_offset(section_offset_type offset)
{ this->offset_ = offset; }
-
+
// Return the relocation target address of the i-th relocation in the
// stub. This must be defined in a child class.
Arm_address
else
this->do_fixed_endian_write<false>(view, view_size);
}
-
+
// This must be overridden if a child class uses the THUMB16_SPECIAL_TYPE
// instruction template.
virtual uint16_t
// Whether this equals to another key k.
bool
- eq(const Key& k) const
+ eq(const Key& k) const
{
return ((this->stub_type_ == k.stub_type_)
&& (this->r_sym_ == k.r_sym_)
// Name of key. This is mainly for debugging.
std::string
- name() const;
+ name() const ATTRIBUTE_UNUSED;
private:
// Stub type.
// If this is a local symbol, this is the index in the defining object.
// Otherwise, it is invalid_index for a global symbol.
unsigned int r_sym_;
- // If r_sym_ is invalid index. This points to a global symbol.
- // Otherwise, this points a relobj. We used the unsized and target
- // independent Symbol and Relobj classes instead of Sized_symbol<32> and
- // Arm_relobj. This is done to avoid making the stub class a template
+ // If r_sym_ is an invalid index, this points to a global symbol.
+ // Otherwise, it points to a relobj. We used the unsized and target
+ // independent Symbol and Relobj classes instead of Sized_symbol<32> and
+ // Arm_relobj, in order to avoid making the stub class a template
// as most of the stub machinery is endianness-neutral. However, it
// may require a bit of casting done by users of this class.
union
// Cortex-A8 stub class. We need a Cortex-A8 stub to redirect any 32-bit
// THUMB branch that meets the following conditions:
-//
+//
// 1. The branch straddles across a page boundary. i.e. lower 12-bit of
// branch address is 0xffe.
// 2. The branch target address is in the same page as the first word of the
{
if (this->stub_template()->type() == arm_stub_a8_veneer_b_cond)
{
- // The conditional branch veneer has two relocations.
- gold_assert(i < 2);
+ // The conditional branch veneer has two relocations.
+ gold_assert(i < 2);
return i == 0 ? this->source_address_ + 4 : this->destination_address_;
}
else
{
- // All other Cortex-A8 stubs have only one relocation.
- gold_assert(i == 0);
- return this->destination_address_;
+ // All other Cortex-A8 stubs have only one relocation.
+ gold_assert(i == 0);
+ return this->destination_address_;
}
}
private:
// Constructor and destructor are protected since we only return a single
// instance created in Stub_factory::get_instance().
-
+
Stub_factory();
// A Stub_factory may not be copied since it is a singleton.
Stub_factory(const Stub_factory&);
Stub_factory& operator=(Stub_factory&);
-
+
// Stub templates. These are initialized in the constructor.
const Stub_template* stub_templates_[arm_stub_type_last+1];
};
current_data_size() const
{ return this->current_data_size_for_child(); }
- // Add a STUB with using KEY. Caller is reponsible for avoid adding
- // if already a STUB with the same key has been added.
+ // Add a STUB using KEY. The caller is responsible for avoiding addition
+ // if a STUB with the same key has already been added.
void
add_reloc_stub(Reloc_stub* stub, const Reloc_stub::Key& key)
{
}
// Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
- // Caller is reponsible for avoid adding if already a STUB with the same
- // address has been added.
+ // The caller is responsible for avoiding addition if a STUB with the same
+ // address has already been added.
void
add_cortex_a8_stub(Arm_address address, Cortex_a8_stub* stub)
{
// needing the Cortex-A8 workaround.
void
finalize_stubs();
-
+
// Apply Cortex-A8 workaround to an address range.
void
apply_cortex_a8_workaround_to_address_range(Target_arm<big_endian>*,
// Write out section contents.
void
do_write(Output_file*);
-
+
// Return the required alignment.
uint64_t
do_addralign() const
void
set_final_data_size()
{ this->set_data_size(this->current_data_size()); }
-
+
private:
// Relocate one stub.
void
this->do_fixed_endian_write<false>(of);
}
+ // Write to a map file.
+ void
+ do_print_to_mapfile(Mapfile* mapfile) const
+ { mapfile->print_output_data(this, _("** ARM cantunwind")); }
+
private:
// Implement do_write for a given endianness.
template<bool big_endian>
void inline
do_fixed_endian_write(Output_file*);
-
+
// The object containing the section pointed by this.
Relobj* relobj_;
// The section index of the section pointed by this.
// During EXIDX coverage fix-up, we compact an EXIDX section. The
// Offset map is used to map input section offset within the EXIDX section
-// to the output offset from the start of this EXIDX section.
+// to the output offset from the start of this EXIDX section.
typedef std::map<section_offset_type, section_offset_type>
Arm_exidx_section_offset_map;
const Arm_exidx_section_offset_map& section_offset_map,
uint32_t deleted_bytes);
+ // Build output contents.
+ void
+ build_contents(const unsigned char*, section_size_type);
+
// Return the original EXIDX input section.
const Arm_exidx_input_section&
exidx_input_section() const
const Arm_exidx_input_section& exidx_input_section_;
// Section offset map.
const Arm_exidx_section_offset_map& section_offset_map_;
+ // Merged section contents. We need to keep build the merged section
+ // and save it here to avoid accessing the original EXIDX section when
+ // we cannot lock the sections' object.
+ unsigned char* section_contents_;
};
// A class to wrap an ordinary input section containing executable code.
public:
Arm_input_section(Relobj* relobj, unsigned int shndx)
: Output_relaxed_input_section(relobj, shndx, 1),
- original_addralign_(1), original_size_(0), stub_table_(NULL)
+ original_addralign_(1), original_size_(0), stub_table_(NULL),
+ original_contents_(NULL)
{ }
~Arm_input_section()
- { }
+ { delete[] this->original_contents_; }
// Initialize.
void
init();
-
+
// Whether this is a stub table owner.
bool
is_stub_table_owner() const
bool
do_output_offset(const Relobj* object, unsigned int shndx,
section_offset_type offset,
- section_offset_type* poutput) const
+ section_offset_type* poutput) const
{
if ((object == this->relobj())
&& (shndx == this->shndx())
uint32_t original_size_;
// Stub table.
Stub_table<big_endian>* stub_table_;
+ // Original section contents. We have to make a copy here since the file
+ // containing the original section may not be locked when we need to access
+ // the contents.
+ unsigned char* original_contents_;
};
// Arm_exidx_fixup class. This is used to define a number of methods
~Arm_exidx_fixup()
{ delete this->section_offset_map_; }
- // Process an EXIDX section for entry merging. Return number of bytes to
- // be deleted in output. If parts of the input EXIDX section are merged
- // a heap allocated Arm_exidx_section_offset_map is store in the located
- // PSECTION_OFFSET_MAP. The caller owns the map and is reponsible for
- // releasing it.
+ // Process an EXIDX section for entry merging. SECTION_CONTENTS points
+ // to the EXIDX contents and SECTION_SIZE is the size of the contents. Return
+ // number of bytes to be deleted in output. If parts of the input EXIDX
+ // section are merged a heap allocated Arm_exidx_section_offset_map is store
+ // in the located PSECTION_OFFSET_MAP. The caller owns the map and is
+ // responsible for releasing it.
template<bool big_endian>
uint32_t
process_exidx_section(const Arm_exidx_input_section* exidx_input_section,
+ const unsigned char* section_contents,
+ section_size_type section_size,
Arm_exidx_section_offset_map** psection_offset_map);
-
+
// Append an EXIDX_CANTUNWIND entry pointing at the end of the last
// input section, if there is not one already.
void
public:
typedef std::vector<std::pair<Relobj*, unsigned int> > Text_section_list;
+ // We need to force SHF_LINK_ORDER in a SHT_ARM_EXIDX section.
Arm_output_section(const char* name, elfcpp::Elf_Word type,
elfcpp::Elf_Xword flags)
- : Output_section(name, type, flags)
+ : Output_section(name, type,
+ (type == elfcpp::SHT_ARM_EXIDX
+ ? flags | elfcpp::SHF_LINK_ORDER
+ : flags))
{
if (type == elfcpp::SHT_ARM_EXIDX)
this->set_always_keeps_input_sections();
~Arm_output_section()
{ }
-
+
// Group input sections for stub generation.
void
- group_sections(section_size_type, bool, Target_arm<big_endian>*);
+ group_sections(section_size_type, bool, Target_arm<big_endian>*, const Task*);
// Downcast a base pointer to an Arm_output_section pointer. This is
// not type-safe but we only use Arm_output_section not the base class.
fix_exidx_coverage(Layout* layout,
const Text_section_list& sorted_text_section,
Symbol_table* symtab,
- bool merge_exidx_entries);
+ bool merge_exidx_entries,
+ const Task* task);
// Link an EXIDX section into its corresponding text section.
void
Input_section_list::const_iterator,
Input_section_list::const_iterator,
Target_arm<big_endian>*,
- std::vector<Output_relaxed_input_section*>*);
+ std::vector<Output_relaxed_input_section*>*,
+ const Task* task);
};
// Arm_exidx_input_section class. This represents an EXIDX input section.
static_cast<section_offset_type>(-1);
Arm_exidx_input_section(Relobj* relobj, unsigned int shndx,
- unsigned int link, uint32_t size, uint32_t addralign)
+ unsigned int link, uint32_t size,
+ uint32_t addralign, uint32_t text_size)
: relobj_(relobj), shndx_(shndx), link_(link), size_(size),
- addralign_(addralign), has_errors_(false)
+ addralign_(addralign), text_size_(text_size), has_errors_(false)
{ }
~Arm_exidx_input_section()
{ }
-
+
// Accessors: This is a read-only class.
// Return the object containing this EXIDX input section.
size() const
{ return this->size_; }
- // Reutnr address alignment of EXIDX input section.
+ // Return address alignment of EXIDX input section.
uint32_t
addralign() const
{ return this->addralign_; }
+ // Return size of the associated text input section.
+ uint32_t
+ text_size() const
+ { return this->text_size_; }
+
// Whether there are any errors in the EXIDX input section.
bool
has_errors() const
uint32_t size_;
// Address alignment of this. For ARM 32-bit is sufficient.
uint32_t addralign_;
+ // Size of associated text section.
+ uint32_t text_size_;
// Whether this has any errors.
bool has_errors_;
};
// Arm_relobj class.
template<bool big_endian>
-class Arm_relobj : public Sized_relobj<32, big_endian>
+class Arm_relobj : public Sized_relobj_file<32, big_endian>
{
public:
static const Arm_address invalid_address = static_cast<Arm_address>(-1);
Arm_relobj(const std::string& name, Input_file* input_file, off_t offset,
- const typename elfcpp::Ehdr<32, big_endian>& ehdr)
- : Sized_relobj<32, big_endian>(name, input_file, offset, ehdr),
+ const typename elfcpp::Ehdr<32, big_endian>& ehdr)
+ : Sized_relobj_file<32, big_endian>(name, input_file, offset, ehdr),
stub_tables_(), local_symbol_is_thumb_function_(),
attributes_section_data_(NULL), mapping_symbols_info_(),
section_has_cortex_a8_workaround_(NULL), exidx_section_map_(),
~Arm_relobj()
{ delete this->attributes_section_data_; }
-
+
// Return the stub table of the SHNDX-th section if there is one.
Stub_table<big_endian>*
stub_table(unsigned int shndx) const
gold_assert(r_sym < this->local_symbol_is_thumb_function_.size());
return this->local_symbol_is_thumb_function_[r_sym];
}
-
+
// Scan all relocation sections for stub generation.
void
scan_sections_for_stubs(Target_arm<big_endian>*, const Symbol_table*,
|| (p1.first == p2.first && p1.second < p2.second));
}
};
-
+
// We only care about the first character of a mapping symbol, so
// we only store that instead of the whole symbol name.
typedef std::map<Mapping_symbol_position, char,
// Whether a section contains any Cortex-A8 workaround.
bool
section_has_cortex_a8_workaround(unsigned int shndx) const
- {
+ {
return (this->section_has_cortex_a8_workaround_ != NULL
&& (*this->section_has_cortex_a8_workaround_)[shndx]);
}
-
+
// Mark a section that has Cortex-A8 workaround.
void
mark_section_for_cortex_a8_workaround(unsigned int shndx)
void
set_output_local_symbol_count_needs_update()
{ this->output_local_symbol_count_needs_update_ = true; }
-
+
// Update output local symbol count at the end of relaxation.
void
update_output_local_symbol_count();
bool
merge_flags_and_attributes() const
{ return this->merge_flags_and_attributes_; }
-
+
// Export list of EXIDX section indices.
void
get_exidx_shndx_list(std::vector<unsigned int>* list) const
if (p->second->shndx() == p->first)
list->push_back(p->first);
}
- // Sort list to make result independent of implementation of map.
+ // Sort list to make result independent of implementation of map.
std::sort(list->begin(), list->end());
}
do_setup()
{
// Call parent's setup method.
- Sized_relobj<32, big_endian>::do_setup();
+ Sized_relobj_file<32, big_endian>::do_setup();
// Initialize look-up tables.
Stub_table_list empty_stub_table_list(this->shnum(), NULL);
// Count the local symbols.
void
do_count_local_symbols(Stringpool_template<char>*,
- Stringpool_template<char>*);
+ Stringpool_template<char>*);
void
- do_relocate_sections(const Symbol_table* symtab, const Layout* layout,
- const unsigned char* pshdrs,
- typename Sized_relobj<32, big_endian>::Views* pivews);
+ do_relocate_sections(
+ const Symbol_table* symtab, const Layout* layout,
+ const unsigned char* pshdrs, Output_file* of,
+ typename Sized_relobj_file<32, big_endian>::Views* pivews);
// Read the symbol information.
void
bool
section_needs_reloc_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
const Relobj::Output_sections&,
- const Symbol_table *, const unsigned char*);
+ const Symbol_table*, const unsigned char*);
// Whether a section is a scannable text section.
bool
section_is_scannable(const elfcpp::Shdr<32, big_endian>&, unsigned int,
- const Output_section*, const Symbol_table *);
+ const Output_section*, const Symbol_table*);
// Whether a section needs to be scanned for the Cortex-A8 erratum.
bool
section_needs_cortex_a8_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
unsigned int, Output_section*,
- const Symbol_table *);
+ const Symbol_table*);
// Scan a section for the Cortex-A8 erratum.
void
Target_arm<big_endian>*);
// Find the linked text section of an EXIDX section by looking at the
- // first reloction of the EXIDX section. PSHDR points to the section
+ // first relocation of the EXIDX section. PSHDR points to the section
// headers of a relocation section and PSYMS points to the local symbols.
// PSHNDX points to a location storing the text section index if found.
// Return whether we can find the linked section.
: Sized_dynobj<32, big_endian>(name, input_file, offset, ehdr),
processor_specific_flags_(0), attributes_section_data_(NULL)
{ }
-
+
~Arm_dynobj()
{ delete this->attributes_section_data_; }
{ }
// Accessors: This is a read-only class.
-
+
// Return the relocation stub associated with this relocation if there is
// one.
const Reloc_stub*
reloc_stub() const
- { return this->reloc_stub_; }
-
+ { return this->reloc_stub_; }
+
// Return the relocation type.
unsigned int
r_type() const
// relocation that needs to be applied in a static link.
void
add_static_reloc(unsigned int got_offset, unsigned int r_type,
- Sized_relobj<32, big_endian>* relobj, unsigned int index)
+ Sized_relobj_file<32, big_endian>* relobj,
+ unsigned int index)
{
this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
index));
// Same as the above but for a local symbol in OBJECT with INDEX.
void
add_tls_gd32_with_static_reloc(unsigned int got_type,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int index);
protected:
{ this->u_.global.symbol = gsym; }
Static_reloc(unsigned int got_offset, unsigned int r_type,
- Sized_relobj<32, big_endian>* relobj, unsigned int index)
+ Sized_relobj_file<32, big_endian>* relobj, unsigned int index)
: got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
{
this->u_.local.relobj = relobj;
}
// For a relocation against a local symbol, the defining object.
- Sized_relobj<32, big_endian>*
+ Sized_relobj_file<32, big_endian>*
relobj() const
{
gold_assert(!this->symbol_is_global_);
struct
{
// For a local symbol, the object defining object.
- Sized_relobj<32, big_endian>* relobj;
+ Sized_relobj_file<32, big_endian>* relobj;
// For a local symbol, the symbol index.
unsigned int index;
} local;
std::vector<Static_reloc> static_relocs_;
};
-// The ARM target has many relocation types with odd-sizes or incontigious
+// The ARM target has many relocation types with odd-sizes or noncontiguous
// bits. The default handling of relocatable relocation cannot process these
// relocations. So we have to extend the default code.
-template<bool big_endian, int sh_type, typename Classify_reloc>
+template<bool big_endian, typename Classify_reloc>
class Arm_scan_relocatable_relocs :
- public Default_scan_relocatable_relocs<sh_type, Classify_reloc>
+ public Default_scan_relocatable_relocs<Classify_reloc>
{
public:
// Return the strategy to use for a local symbol which is a section
inline Relocatable_relocs::Reloc_strategy
local_section_strategy(unsigned int r_type, Relobj*)
{
- if (sh_type == elfcpp::SHT_RELA)
+ if (Classify_reloc::sh_type == elfcpp::SHT_RELA)
return Relocatable_relocs::RELOC_ADJUST_FOR_SECTION_RELA;
else
{
case elfcpp::R_ARM_TARGET1:
case elfcpp::R_ARM_TARGET2:
gold_unreachable();
- // Relocations that write full 32 bits.
+ // Relocations that write full 32 bits and
+ // have alignment of 1.
case elfcpp::R_ARM_ABS32:
case elfcpp::R_ARM_REL32:
case elfcpp::R_ARM_SBREL32:
case elfcpp::R_ARM_TLS_LDO32:
case elfcpp::R_ARM_TLS_IE32:
case elfcpp::R_ARM_TLS_LE32:
- return Relocatable_relocs::RELOC_ADJUST_FOR_SECTION_4;
+ return Relocatable_relocs::RELOC_ADJUST_FOR_SECTION_4_UNALIGNED;
default:
// For all other static relocations, return RELOC_SPECIAL.
return Relocatable_relocs::RELOC_SPECIAL;
}
};
-// Utilities for manipulating integers of up to 32-bits
-
-namespace utils
-{
- // Sign extend an n-bit unsigned integer stored in an uint32_t into
- // an int32_t. NO_BITS must be between 1 to 32.
- template<int no_bits>
- static inline int32_t
- sign_extend(uint32_t bits)
- {
- gold_assert(no_bits >= 0 && no_bits <= 32);
- if (no_bits == 32)
- return static_cast<int32_t>(bits);
- uint32_t mask = (~((uint32_t) 0)) >> (32 - no_bits);
- bits &= mask;
- uint32_t top_bit = 1U << (no_bits - 1);
- int32_t as_signed = static_cast<int32_t>(bits);
- return (bits & top_bit) ? as_signed + (-top_bit * 2) : as_signed;
- }
-
- // Detects overflow of an NO_BITS integer stored in a uint32_t.
- template<int no_bits>
- static inline bool
- has_overflow(uint32_t bits)
- {
- gold_assert(no_bits >= 0 && no_bits <= 32);
- if (no_bits == 32)
- return false;
- int32_t max = (1 << (no_bits - 1)) - 1;
- int32_t min = -(1 << (no_bits - 1));
- int32_t as_signed = static_cast<int32_t>(bits);
- return as_signed > max || as_signed < min;
- }
-
- // Detects overflow of an NO_BITS integer stored in a uint32_t when it
- // fits in the given number of bits as either a signed or unsigned value.
- // For example, has_signed_unsigned_overflow<8> would check
- // -128 <= bits <= 255
- template<int no_bits>
- static inline bool
- has_signed_unsigned_overflow(uint32_t bits)
- {
- gold_assert(no_bits >= 2 && no_bits <= 32);
- if (no_bits == 32)
- return false;
- int32_t max = static_cast<int32_t>((1U << no_bits) - 1);
- int32_t min = -(1 << (no_bits - 1));
- int32_t as_signed = static_cast<int32_t>(bits);
- return as_signed > max || as_signed < min;
- }
-
- // Select bits from A and B using bits in MASK. For each n in [0..31],
- // the n-th bit in the result is chosen from the n-th bits of A and B.
- // A zero selects A and a one selects B.
- static inline uint32_t
- bit_select(uint32_t a, uint32_t b, uint32_t mask)
- { return (a & ~mask) | (b & mask); }
-};
-
template<bool big_endian>
class Target_arm : public Sized_target<32, big_endian>
{
// When were are relocating a stub, we pass this as the relocation number.
static const size_t fake_relnum_for_stubs = static_cast<size_t>(-1);
- Target_arm()
- : Sized_target<32, big_endian>(&arm_info),
- got_(NULL), plt_(NULL), got_plt_(NULL), rel_dyn_(NULL),
- copy_relocs_(elfcpp::R_ARM_COPY), dynbss_(NULL),
+ Target_arm(const Target::Target_info* info = &arm_info)
+ : Sized_target<32, big_endian>(info),
+ got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
+ rel_dyn_(NULL), rel_irelative_(NULL), copy_relocs_(elfcpp::R_ARM_COPY),
got_mod_index_offset_(-1U), tls_base_symbol_defined_(false),
stub_tables_(), stub_factory_(Stub_factory::get_instance()),
- may_use_blx_(false), should_force_pic_veneer_(false),
+ should_force_pic_veneer_(false),
arm_input_section_map_(), attributes_section_data_(NULL),
- fix_cortex_a8_(false), cortex_a8_relocs_info_()
+ fix_cortex_a8_(false), cortex_a8_relocs_info_(),
+ target1_reloc_(elfcpp::R_ARM_ABS32),
+ // This can be any reloc type but usually is R_ARM_GOT_PREL.
+ target2_reloc_(elfcpp::R_ARM_GOT_PREL)
{ }
- // Virtual function which is set to return true by a target if
- // it can use relocation types to determine if a function's
- // pointer is taken.
- virtual bool
- can_check_for_function_pointers() const
- { return true; }
-
- // Whether a section called SECTION_NAME may have function pointers to
- // sections not eligible for safe ICF folding.
- virtual bool
- section_may_have_icf_unsafe_pointers(const char* section_name) const
- {
- return (!is_prefix_of(".ARM.exidx", section_name)
- && !is_prefix_of(".ARM.extab", section_name)
- && Target::section_may_have_icf_unsafe_pointers(section_name));
- }
-
- // Whether we can use BLX.
- bool
- may_use_blx() const
- { return this->may_use_blx_; }
-
- // Set use-BLX flag.
- void
- set_may_use_blx(bool value)
- { this->may_use_blx_ = value; }
-
// Whether we force PCI branch veneers.
bool
should_force_pic_veneer() const
void
set_should_force_pic_veneer(bool value)
{ this->should_force_pic_veneer_ = value; }
-
+
// Whether we use THUMB-2 instructions.
bool
using_thumb2() const
|| arch == elfcpp::TAG_CPU_ARCH_V7
|| arch == elfcpp::TAG_CPU_ARCH_V7E_M);
}
-
- // Process the relocations to determine unreferenced sections for
+
+ // Whether we have v4T interworking instructions available.
+ bool
+ may_use_v4t_interworking() const
+ {
+ Object_attribute* attr =
+ this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
+ int arch = attr->int_value();
+ return (arch != elfcpp::TAG_CPU_ARCH_PRE_V4
+ && arch != elfcpp::TAG_CPU_ARCH_V4);
+ }
+
+ // Whether we have v5T interworking instructions available.
+ bool
+ may_use_v5t_interworking() const
+ {
+ Object_attribute* attr =
+ this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
+ int arch = attr->int_value();
+ if (parameters->options().fix_arm1176())
+ return (arch == elfcpp::TAG_CPU_ARCH_V6T2
+ || arch == elfcpp::TAG_CPU_ARCH_V7
+ || arch == elfcpp::TAG_CPU_ARCH_V6_M
+ || arch == elfcpp::TAG_CPU_ARCH_V6S_M
+ || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
+ else
+ return (arch != elfcpp::TAG_CPU_ARCH_PRE_V4
+ && arch != elfcpp::TAG_CPU_ARCH_V4
+ && arch != elfcpp::TAG_CPU_ARCH_V4T);
+ }
+
+ // Process the relocations to determine unreferenced sections for
// garbage collection.
void
gc_process_relocs(Symbol_table* symtab,
Layout* layout,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
unsigned int sh_type,
const unsigned char* prelocs,
void
scan_relocs(Symbol_table* symtab,
Layout* layout,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
unsigned int sh_type,
const unsigned char* prelocs,
uint64_t
do_dynsym_value(const Symbol*) const;
+ // Return the plt address for globals. Since we have irelative plt entries,
+ // address calculation is not as straightforward as plt_address + plt_offset.
+ uint64_t
+ do_plt_address_for_global(const Symbol* gsym) const
+ { return this->plt_section()->address_for_global(gsym); }
+
+ // Return the plt address for locals. Since we have irelative plt entries,
+ // address calculation is not as straightforward as plt_address + plt_offset.
+ uint64_t
+ do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
+ { return this->plt_section()->address_for_local(relobj, symndx); }
+
// Relocate a section.
void
relocate_section(const Relocate_info<32, big_endian>*,
void
scan_relocatable_relocs(Symbol_table* symtab,
Layout* layout,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
unsigned int sh_type,
const unsigned char* prelocs,
const unsigned char* plocal_symbols,
Relocatable_relocs*);
- // Relocate a section during a relocatable link.
+ // Scan the relocs for --emit-relocs.
+ void
+ emit_relocs_scan(Symbol_table* symtab,
+ Layout* layout,
+ Sized_relobj_file<32, big_endian>* object,
+ unsigned int data_shndx,
+ unsigned int sh_type,
+ const unsigned char* prelocs,
+ size_t reloc_count,
+ Output_section* output_section,
+ bool needs_special_offset_handling,
+ size_t local_symbol_count,
+ const unsigned char* plocal_syms,
+ Relocatable_relocs* rr);
+
+ // Emit relocations for a section.
void
- relocate_for_relocatable(const Relocate_info<32, big_endian>*,
- unsigned int sh_type,
- const unsigned char* prelocs,
- size_t reloc_count,
- Output_section* output_section,
- off_t offset_in_output_section,
- const Relocatable_relocs*,
- unsigned char* view,
- Arm_address view_address,
- section_size_type view_size,
- unsigned char* reloc_view,
- section_size_type reloc_view_size);
+ relocate_relocs(const Relocate_info<32, big_endian>*,
+ unsigned int sh_type,
+ const unsigned char* prelocs,
+ size_t reloc_count,
+ Output_section* output_section,
+ typename elfcpp::Elf_types<32>::Elf_Off
+ offset_in_output_section,
+ unsigned char* view,
+ Arm_address view_address,
+ section_size_type view_size,
+ unsigned char* reloc_view,
+ section_size_type reloc_view_size);
// Perform target-specific processing in a relocatable link. This is
// only used if we use the relocation strategy RELOC_SPECIAL.
const unsigned char* preloc_in,
size_t relnum,
Output_section* output_section,
- off_t offset_in_output_section,
+ typename elfcpp::Elf_types<32>::Elf_Off
+ offset_in_output_section,
unsigned char* view,
typename elfcpp::Elf_types<32>::Elf_Addr
view_address,
section_size_type view_size,
unsigned char* preloc_out);
-
+
// Return whether SYM is defined by the ABI.
bool
- do_is_defined_by_abi(Symbol* sym) const
+ do_is_defined_by_abi(const Symbol* sym) const
{ return strcmp(sym->name(), "__tls_get_addr") == 0; }
// Return whether there is a GOT section.
// Return the size of the GOT section.
section_size_type
- got_size()
+ got_size() const
{
gold_assert(this->got_ != NULL);
return this->got_->data_size();
}
+ // Return the number of entries in the GOT.
+ unsigned int
+ got_entry_count() const
+ {
+ if (!this->has_got_section())
+ return 0;
+ return this->got_size() / 4;
+ }
+
+ // Return the number of entries in the PLT.
+ unsigned int
+ plt_entry_count() const;
+
+ // Return the offset of the first non-reserved PLT entry.
+ unsigned int
+ first_plt_entry_offset() const;
+
+ // Return the size of each PLT entry.
+ unsigned int
+ plt_entry_size() const;
+
+ // Get the section to use for IRELATIVE relocations, create it if necessary.
+ Reloc_section*
+ rel_irelative_section(Layout*);
+
// Map platform-specific reloc types
- static unsigned int
- get_real_reloc_type (unsigned int r_type);
+ unsigned int
+ get_real_reloc_type(unsigned int r_type) const;
//
// Methods to support stub-generations.
//
-
+
// Return the stub factory
const Stub_factory&
stub_factory() const
bool, const unsigned char*, Arm_address,
section_size_type);
- // Relocate a stub.
+ // Relocate a stub.
void
relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
Output_section*, unsigned char*, Arm_address,
section_size_type);
-
+
// Get the default ARM target.
static Target_arm<big_endian>*
default_target()
unsigned char*, Arm_address);
protected:
+ // Make the PLT-generator object.
+ Output_data_plt_arm<big_endian>*
+ make_data_plt(Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ { return this->do_make_data_plt(layout, got, got_plt, got_irelative); }
+
// Make an ELF object.
Object*
do_make_elf_object(const std::string&, Input_file*, off_t,
{ return new Arm_output_section<big_endian>(name, type, flags); }
void
- do_adjust_elf_header(unsigned char* view, int len) const;
+ do_adjust_elf_header(unsigned char* view, int len);
// We only need to generate stubs, and hence perform relaxation if we are
// not doing relocatable linking.
{ return !parameters->options().relocatable(); }
bool
- do_relax(int, const Input_objects*, Symbol_table*, Layout*);
+ do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
// Determine whether an object attribute tag takes an integer, a
// string or both.
// as the default.
gold_assert(arm_reloc_property_table == NULL);
arm_reloc_property_table = new Arm_reloc_property_table();
+ if (parameters->options().user_set_target1_rel())
+ {
+ // FIXME: This is not strictly compatible with ld, which allows both
+ // --target1-abs and --target-rel to be given.
+ if (parameters->options().user_set_target1_abs())
+ gold_error(_("Cannot use both --target1-abs and --target1-rel."));
+ else
+ this->target1_reloc_ = elfcpp::R_ARM_REL32;
+ }
+ // We don't need to handle --target1-abs because target1_reloc_ is set
+ // to elfcpp::R_ARM_ABS32 in the member initializer list.
+
+ if (parameters->options().user_set_target2())
+ {
+ const char* target2 = parameters->options().target2();
+ if (strcmp(target2, "rel") == 0)
+ this->target2_reloc_ = elfcpp::R_ARM_REL32;
+ else if (strcmp(target2, "abs") == 0)
+ this->target2_reloc_ = elfcpp::R_ARM_ABS32;
+ else if (strcmp(target2, "got-rel") == 0)
+ this->target2_reloc_ = elfcpp::R_ARM_GOT_PREL;
+ else
+ gold_unreachable();
+ }
+ }
+
+ // Virtual function which is set to return true by a target if
+ // it can use relocation types to determine if a function's
+ // pointer is taken.
+ virtual bool
+ do_can_check_for_function_pointers() const
+ { return true; }
+
+ // Whether a section called SECTION_NAME may have function pointers to
+ // sections not eligible for safe ICF folding.
+ virtual bool
+ do_section_may_have_icf_unsafe_pointers(const char* section_name) const
+ {
+ return (!is_prefix_of(".ARM.exidx", section_name)
+ && !is_prefix_of(".ARM.extab", section_name)
+ && Target::do_section_may_have_icf_unsafe_pointers(section_name));
+ }
+
+ virtual void
+ do_define_standard_symbols(Symbol_table*, Layout*);
+
+ virtual Output_data_plt_arm<big_endian>*
+ do_make_data_plt(Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ {
+ gold_assert(got_plt != NULL && got_irelative != NULL);
+ if (parameters->options().long_plt())
+ return new Output_data_plt_arm_long<big_endian>(
+ layout, got, got_plt, got_irelative);
+ else
+ return new Output_data_plt_arm_short<big_endian>(
+ layout, got, got_plt, got_irelative);
}
private:
: issued_non_pic_error_(false)
{ }
+ static inline int
+ get_reference_flags(unsigned int r_type);
+
inline void
local(Symbol_table* symtab, Layout* layout, Target_arm* target,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
Output_section* output_section,
const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
- const elfcpp::Sym<32, big_endian>& lsym);
+ const elfcpp::Sym<32, big_endian>& lsym,
+ bool is_discarded);
inline void
global(Symbol_table* symtab, Layout* layout, Target_arm* target,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
Output_section* output_section,
const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
inline bool
local_reloc_may_be_function_pointer(Symbol_table* , Layout* , Target_arm* ,
- Sized_relobj<32, big_endian>* ,
- unsigned int ,
- Output_section* ,
- const elfcpp::Rel<32, big_endian>& ,
+ Sized_relobj_file<32, big_endian>* ,
unsigned int ,
- const elfcpp::Sym<32, big_endian>&);
+ Output_section* ,
+ const elfcpp::Rel<32, big_endian>& ,
+ unsigned int ,
+ const elfcpp::Sym<32, big_endian>&);
inline bool
global_reloc_may_be_function_pointer(Symbol_table* , Layout* , Target_arm* ,
- Sized_relobj<32, big_endian>* ,
- unsigned int ,
- Output_section* ,
- const elfcpp::Rel<32, big_endian>& ,
+ Sized_relobj_file<32, big_endian>* ,
+ unsigned int ,
+ Output_section* ,
+ const elfcpp::Rel<32, big_endian>& ,
unsigned int , Symbol*);
private:
static void
- unsupported_reloc_local(Sized_relobj<32, big_endian>*,
+ unsupported_reloc_local(Sized_relobj_file<32, big_endian>*,
unsigned int r_type);
static void
- unsupported_reloc_global(Sized_relobj<32, big_endian>*,
+ unsupported_reloc_global(Sized_relobj_file<32, big_endian>*,
unsigned int r_type, Symbol*);
void
if (sym->is_undefined() && !parameters->options().shared())
return false;
+ if (sym->type() == elfcpp::STT_GNU_IFUNC)
+ return true;
+
return (!parameters->doing_static_link()
&& (sym->type() == elfcpp::STT_FUNC
|| sym->type() == elfcpp::STT_ARM_TFUNC)
inline bool
possible_function_pointer_reloc(unsigned int r_type);
+ // Whether a plt entry is needed for ifunc.
+ bool
+ reloc_needs_plt_for_ifunc(Sized_relobj_file<32, big_endian>*,
+ unsigned int r_type);
+
// Whether we have issued an error about a non-PIC compilation.
bool issued_non_pic_error_;
};
// Return whether the static relocation needs to be applied.
inline bool
should_apply_static_reloc(const Sized_symbol<32>* gsym,
- int ref_flags,
+ unsigned int r_type,
bool is_32bit,
Output_section* output_section);
// Do a relocation. Return false if the caller should not issue
// any warnings about this relocation.
inline bool
- relocate(const Relocate_info<32, big_endian>*, Target_arm*,
- Output_section*, size_t relnum,
- const elfcpp::Rel<32, big_endian>&,
- unsigned int r_type, const Sized_symbol<32>*,
- const Symbol_value<32>*,
- unsigned char*, Arm_address,
- section_size_type);
+ relocate(const Relocate_info<32, big_endian>*, unsigned int,
+ Target_arm*, Output_section*, size_t, const unsigned char*,
+ const Sized_symbol<32>*, const Symbol_value<32>*,
+ unsigned char*, Arm_address, section_size_type);
// Return whether we want to pass flag NON_PIC_REF for this
// reloc. This means the relocation type accesses a symbol not via
// GOT or PLT.
static inline bool
- reloc_is_non_pic (unsigned int r_type)
+ reloc_is_non_pic(unsigned int r_type)
{
switch (r_type)
{
// Do a TLS relocation.
inline typename Arm_relocate_functions<big_endian>::Status
relocate_tls(const Relocate_info<32, big_endian>*, Target_arm<big_endian>*,
- size_t, const elfcpp::Rel<32, big_endian>&, unsigned int,
+ size_t, const elfcpp::Rel<32, big_endian>&, unsigned int,
const Sized_symbol<32>*, const Symbol_value<32>*,
unsigned char*, elfcpp::Elf_types<32>::Elf_Addr,
section_size_type);
};
- // A class which returns the size required for a relocation type,
- // used while scanning relocs during a relocatable link.
- class Relocatable_size_for_reloc
+ // A class for inquiring about properties of a relocation,
+ // used while scanning relocs during a relocatable link and
+ // garbage collection.
+ class Classify_reloc :
+ public gold::Default_classify_reloc<elfcpp::SHT_REL, 32, big_endian>
{
public:
- unsigned int
+ typedef typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc
+ Reltype;
+
+ // Return the explicit addend of the relocation (return 0 for SHT_REL).
+ static typename elfcpp::Elf_types<32>::Elf_Swxword
+ get_r_addend(const Reltype*)
+ { return 0; }
+
+ // Return the size of the addend of the relocation (only used for SHT_REL).
+ static unsigned int
get_size_for_reloc(unsigned int, Relobj*);
};
return this->got_plt_;
}
+ // Create the PLT section.
+ void
+ make_plt_section(Symbol_table* symtab, Layout* layout);
+
// Create a PLT entry for a global symbol.
void
make_plt_entry(Symbol_table*, Layout*, Symbol*);
+ // Create a PLT entry for a local STT_GNU_IFUNC symbol.
+ void
+ make_local_ifunc_plt_entry(Symbol_table*, Layout*,
+ Sized_relobj_file<32, big_endian>* relobj,
+ unsigned int local_sym_index);
+
// Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
void
define_tls_base_symbol(Symbol_table*, Layout*);
// Create a GOT entry for the TLS module index.
unsigned int
got_mod_index_entry(Symbol_table* symtab, Layout* layout,
- Sized_relobj<32, big_endian>* object);
+ Sized_relobj_file<32, big_endian>* object);
// Get the PLT section.
const Output_data_plt_arm<big_endian>*
// Add a potential copy relocation.
void
copy_reloc(Symbol_table* symtab, Layout* layout,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int shndx, Output_section* output_section,
Symbol* sym, const elfcpp::Rel<32, big_endian>& reloc)
{
+ unsigned int r_type = elfcpp::elf_r_type<32>(reloc.get_r_info());
this->copy_relocs_.copy_reloc(symtab, layout,
symtab->get_sized_symbol<32>(sym),
- object, shndx, output_section, reloc,
+ object, shndx, output_section,
+ r_type, reloc.get_r_offset(), 0,
this->rel_dyn_section(layout));
}
static std::string
tag_cpu_name_value(unsigned int);
+ // Query attributes object to see if integer divide instructions may be
+ // present in an object.
+ static bool
+ attributes_accept_div(int arch, int profile,
+ const Object_attribute* div_attr);
+
+ // Query attributes object to see if integer divide instructions are
+ // forbidden to be in the object. This is not the inverse of
+ // attributes_accept_div.
+ static bool
+ attributes_forbid_div(const Object_attribute* div_attr);
+
// Merge object attributes from input object and those in the output.
void
merge_object_attributes(const char*, const Attributes_section_data*);
// Group input sections for stub generation.
void
- group_sections(Layout*, section_size_type, bool);
+ group_sections(Layout*, section_size_type, bool, const Task*);
// Scan a relocation for stub generation.
void
// Fix .ARM.exidx section coverage.
void
fix_exidx_coverage(Layout*, const Input_objects*,
- Arm_output_section<big_endian>*, Symbol_table*);
+ Arm_output_section<big_endian>*, Symbol_table*,
+ const Task*);
// Functors for STL set.
struct output_section_address_less_than
static const Target::Target_info arm_info;
// The types of GOT entries needed for this platform.
+ // These values are exposed to the ABI in an incremental link.
+ // Do not renumber existing values without changing the version
+ // number of the .gnu_incremental_inputs section.
enum Got_type
{
GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
Arm_input_section<big_endian>*,
Section_id_hash>
Arm_input_section_map;
-
+
// Map output addresses to relocs for Cortex-A8 erratum.
typedef Unordered_map<Arm_address, const Cortex_a8_reloc*>
Cortex_a8_relocs_info;
Output_data_plt_arm<big_endian>* plt_;
// The GOT PLT section.
Output_data_space* got_plt_;
+ // The GOT section for IRELATIVE relocations.
+ Output_data_space* got_irelative_;
// The dynamic reloc section.
Reloc_section* rel_dyn_;
+ // The section to use for IRELATIVE relocs.
+ Reloc_section* rel_irelative_;
// Relocs saved to avoid a COPY reloc.
Copy_relocs<elfcpp::SHT_REL, 32, big_endian> copy_relocs_;
- // Space for variables copied with a COPY reloc.
- Output_data_space* dynbss_;
// Offset of the GOT entry for the TLS module index.
unsigned int got_mod_index_offset_;
// True if the _TLS_MODULE_BASE_ symbol has been defined.
Stub_table_list stub_tables_;
// Stub factory.
const Stub_factory &stub_factory_;
- // Whether we can use BLX.
- bool may_use_blx_;
// Whether we force PIC branch veneers.
bool should_force_pic_veneer_;
// Map for locating Arm_input_sections.
bool fix_cortex_a8_;
// Map addresses to relocs for Cortex-A8 erratum.
Cortex_a8_relocs_info cortex_a8_relocs_info_;
+ // What R_ARM_TARGET1 maps to. It can be R_ARM_REL32 or R_ARM_ABS32.
+ unsigned int target1_reloc_;
+ // What R_ARM_TARGET2 maps to. It should be one of R_ARM_REL32, R_ARM_ABS32
+ // and R_ARM_GOT_PREL.
+ unsigned int target2_reloc_;
};
template<bool big_endian>
false, // has_resolve
false, // has_code_fill
true, // is_default_stack_executable
+ false, // can_icf_inline_merge_sections
'\0', // wrap_char
"/usr/lib/libc.so.1", // dynamic_linker
0x8000, // default_text_segment_address
0x1000, // abi_pagesize (overridable by -z max-page-size)
0x1000, // common_pagesize (overridable by -z common-page-size)
+ false, // isolate_execinstr
+ 0, // rosegment_gap
elfcpp::SHN_UNDEF, // small_common_shndx
elfcpp::SHN_UNDEF, // large_common_shndx
0, // small_common_section_flags
0, // large_common_section_flags
".ARM.attributes", // attributes_section
- "aeabi" // attributes_vendor
+ "aeabi", // attributes_vendor
+ "_start", // entry_symbol_name
+ 32, // hash_entry_size
};
// Arm relocate functions class
typedef enum
{
STATUS_OKAY, // No error during relocation.
- STATUS_OVERFLOW, // Relocation oveflow.
+ STATUS_OVERFLOW, // Relocation overflow.
STATUS_BAD_RELOC // Relocation cannot be applied.
} Status;
// Encoding of imm16 argument for movt and movw ARM instructions
// from ARM ARM:
- //
+ //
// imm16 := imm4 | imm12
//
- // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
+ // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
// +-------+---------------+-------+-------+-----------------------+
// | | |imm4 | |imm12 |
// +-------+---------------+-------+-------+-----------------------+
{
// According to the Elf ABI for ARM Architecture the immediate
// field is sign-extended to form the addend.
- return utils::sign_extend<16>(((val >> 4) & 0xf000) | (val & 0xfff));
+ return Bits<16>::sign_extend32(((val >> 4) & 0xf000) | (val & 0xfff));
}
// Insert X into VAL based on the ARM instruction encoding described
// Encoding of imm16 argument for movt and movw Thumb2 instructions
// from ARM ARM:
- //
+ //
// imm16 := imm4 | i | imm3 | imm8
//
- // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
+ // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
// +---------+-+-----------+-------++-+-----+-------+---------------+
// | |i| |imm4 || |imm3 | |imm8 |
// +---------+-+-----------+-------++-+-----+-------+---------------+
{
// According to the Elf ABI for ARM Architecture the immediate
// field is sign-extended to form the addend.
- return utils::sign_extend<16>(((val >> 4) & 0xf000)
- | ((val >> 15) & 0x0800)
- | ((val >> 4) & 0x0700)
- | (val & 0x00ff));
+ return Bits<16>::sign_extend32(((val >> 4) & 0xf000)
+ | ((val >> 15) & 0x0800)
+ | ((val >> 4) & 0x0700)
+ | (val & 0x00ff));
}
// Insert X into VAL based on the Thumb2 instruction encoding
// Handle ARM long branches.
static typename This::Status
arm_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
- unsigned char *, const Sized_symbol<32>*,
+ unsigned char*, const Sized_symbol<32>*,
const Arm_relobj<big_endian>*, unsigned int,
const Symbol_value<32>*, Arm_address, Arm_address, bool);
// Handle THUMB long branches.
static typename This::Status
thumb_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
- unsigned char *, const Sized_symbol<32>*,
+ unsigned char*, const Sized_symbol<32>*,
const Arm_relobj<big_endian>*, unsigned int,
const Symbol_value<32>*, Arm_address, Arm_address, bool);
uint32_t i1 = j1 ^ s ? 0 : 1;
uint32_t i2 = j2 ^ s ? 0 : 1;
- return utils::sign_extend<25>((s << 24) | (i1 << 23) | (i2 << 22)
- | (upper << 12) | (lower << 1));
+ return Bits<25>::sign_extend32((s << 24) | (i1 << 23) | (i2 << 22)
+ | (upper << 12) | (lower << 1));
}
// Insert OFFSET to a 32-bit THUMB branch and return the upper instruction.
uint32_t s = offset < 0 ? 1 : 0;
uint32_t bits = static_cast<uint32_t>(offset);
return ((lower_insn & ~0x2fffU)
- | ((((bits >> 23) & 1) ^ !s) << 13)
- | ((((bits >> 22) & 1) ^ !s) << 11)
- | ((bits >> 1) & 0x7ffU));
+ | ((((bits >> 23) & 1) ^ !s) << 13)
+ | ((((bits >> 22) & 1) ^ !s) << 11)
+ | ((bits >> 1) & 0x7ffU));
}
// Return the branch offset of a 32-bit THUMB conditional branch.
uint32_t lower = (lower_insn & 0x07ffU);
uint32_t upper = (s << 8) | (j2 << 7) | (j1 << 6) | (upper_insn & 0x003fU);
- return utils::sign_extend<21>((upper << 12) | (lower << 1));
+ return Bits<21>::sign_extend32((upper << 12) | (lower << 1));
}
// Insert OFFSET to a 32-bit THUMB conditional branch and return the upper
// Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
// instruction. LOWER_INSN is the original lower instruction of the branch.
- // Caller is reponsible for overflow checking.
+ // The caller is responsible for overflow checking.
static inline uint16_t
thumb32_cond_branch_lower(uint16_t lower_insn, int32_t offset)
{
// R_ARM_ABS8: S + A
static inline typename This::Status
- abs8(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ abs8(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval)
{
typedef typename elfcpp::Swap<8, big_endian>::Valtype Valtype;
- typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
Valtype* wv = reinterpret_cast<Valtype*>(view);
Valtype val = elfcpp::Swap<8, big_endian>::readval(wv);
- Reltype addend = utils::sign_extend<8>(val);
- Reltype x = psymval->value(object, addend);
- val = utils::bit_select(val, x, 0xffU);
+ int32_t addend = Bits<8>::sign_extend32(val);
+ Arm_address x = psymval->value(object, addend);
+ val = Bits<32>::bit_select32(val, x, 0xffU);
elfcpp::Swap<8, big_endian>::writeval(wv, val);
// R_ARM_ABS8 permits signed or unsigned results.
- int signed_x = static_cast<int32_t>(x);
- return ((signed_x < -128 || signed_x > 255)
+ return (Bits<8>::has_signed_unsigned_overflow32(x)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_THM_ABS5: S + A
static inline typename This::Status
- thm_abs5(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ thm_abs5(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval)
{
typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
Reltype addend = (val & 0x7e0U) >> 6;
Reltype x = psymval->value(object, addend);
- val = utils::bit_select(val, x << 6, 0x7e0U);
+ val = Bits<32>::bit_select32(val, x << 6, 0x7e0U);
elfcpp::Swap<16, big_endian>::writeval(wv, val);
-
- // R_ARM_ABS16 permits signed or unsigned results.
- int signed_x = static_cast<int32_t>(x);
- return ((signed_x < -32768 || signed_x > 65535)
+ return (Bits<5>::has_overflow32(x)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_ABS12: S + A
static inline typename This::Status
- abs12(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ abs12(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval)
{
typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
Reltype addend = val & 0x0fffU;
Reltype x = psymval->value(object, addend);
- val = utils::bit_select(val, x, 0x0fffU);
+ val = Bits<32>::bit_select32(val, x, 0x0fffU);
elfcpp::Swap<32, big_endian>::writeval(wv, val);
- return (utils::has_overflow<12>(x)
+ return (Bits<12>::has_overflow32(x)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_ABS16: S + A
static inline typename This::Status
- abs16(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ abs16(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval)
{
- typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
- typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
- Valtype* wv = reinterpret_cast<Valtype*>(view);
- Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
- Reltype addend = utils::sign_extend<16>(val);
- Reltype x = psymval->value(object, addend);
- val = utils::bit_select(val, x, 0xffffU);
- elfcpp::Swap<16, big_endian>::writeval(wv, val);
- return (utils::has_signed_unsigned_overflow<16>(x)
+ typedef typename elfcpp::Swap_unaligned<16, big_endian>::Valtype Valtype;
+ Valtype val = elfcpp::Swap_unaligned<16, big_endian>::readval(view);
+ int32_t addend = Bits<16>::sign_extend32(val);
+ Arm_address x = psymval->value(object, addend);
+ val = Bits<32>::bit_select32(val, x, 0xffffU);
+ elfcpp::Swap_unaligned<16, big_endian>::writeval(view, val);
+
+ // R_ARM_ABS16 permits signed or unsigned results.
+ return (Bits<16>::has_signed_unsigned_overflow32(x)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_ABS32: (S + A) | T
static inline typename This::Status
- abs32(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ abs32(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address thumb_bit)
{
- typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
- Valtype* wv = reinterpret_cast<Valtype*>(view);
- Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
+ typedef typename elfcpp::Swap_unaligned<32, big_endian>::Valtype Valtype;
+ Valtype addend = elfcpp::Swap_unaligned<32, big_endian>::readval(view);
Valtype x = psymval->value(object, addend) | thumb_bit;
- elfcpp::Swap<32, big_endian>::writeval(wv, x);
+ elfcpp::Swap_unaligned<32, big_endian>::writeval(view, x);
return This::STATUS_OKAY;
}
// R_ARM_REL32: (S + A) | T - P
static inline typename This::Status
- rel32(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ rel32(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address,
Arm_address thumb_bit)
{
- typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
- Valtype* wv = reinterpret_cast<Valtype*>(view);
- Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
+ typedef typename elfcpp::Swap_unaligned<32, big_endian>::Valtype Valtype;
+ Valtype addend = elfcpp::Swap_unaligned<32, big_endian>::readval(view);
Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
- elfcpp::Swap<32, big_endian>::writeval(wv, x);
+ elfcpp::Swap_unaligned<32, big_endian>::writeval(view, x);
return This::STATUS_OKAY;
}
// R_ARM_THM_JUMP24: (S + A) | T - P
static typename This::Status
- thm_jump19(unsigned char *view, const Arm_relobj<big_endian>* object,
+ thm_jump19(unsigned char* view, const Arm_relobj<big_endian>* object,
const Symbol_value<32>* psymval, Arm_address address,
Arm_address thumb_bit);
- // R_ARM_THM_JUMP6: S + A – P
+ // R_ARM_THM_JUMP6: S + A - P
static inline typename This::Status
- thm_jump6(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ thm_jump6(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address)
{
typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
Valtype* wv = reinterpret_cast<Valtype*>(view);
Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
- // bit[9]:bit[7:3]:’0’ (mask: 0x02f8)
+ // bit[9]:bit[7:3]:'0' (mask: 0x02f8)
Reltype addend = (((val & 0x0200) >> 3) | ((val & 0x00f8) >> 2));
Reltype x = (psymval->value(object, addend) - address);
val = (val & 0xfd07) | ((x & 0x0040) << 3) | ((val & 0x003e) << 2);
: This::STATUS_OKAY);
}
- // R_ARM_THM_JUMP8: S + A – P
+ // R_ARM_THM_JUMP8: S + A - P
static inline typename This::Status
- thm_jump8(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ thm_jump8(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address)
{
typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
- typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
Valtype* wv = reinterpret_cast<Valtype*>(view);
Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
- Reltype addend = utils::sign_extend<8>((val & 0x00ff) << 1);
- Reltype x = (psymval->value(object, addend) - address);
- elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xff00) | ((x & 0x01fe) >> 1));
- return (utils::has_overflow<8>(x)
+ int32_t addend = Bits<8>::sign_extend32((val & 0x00ff) << 1);
+ int32_t x = (psymval->value(object, addend) - address);
+ elfcpp::Swap<16, big_endian>::writeval(wv, ((val & 0xff00)
+ | ((x & 0x01fe) >> 1)));
+ // We do a 9-bit overflow check because x is right-shifted by 1 bit.
+ return (Bits<9>::has_overflow32(x)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
- // R_ARM_THM_JUMP11: S + A – P
+ // R_ARM_THM_JUMP11: S + A - P
static inline typename This::Status
- thm_jump11(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ thm_jump11(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address)
{
typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
- typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
Valtype* wv = reinterpret_cast<Valtype*>(view);
Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
- Reltype addend = utils::sign_extend<11>((val & 0x07ff) << 1);
- Reltype x = (psymval->value(object, addend) - address);
- elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xf800) | ((x & 0x0ffe) >> 1));
- return (utils::has_overflow<11>(x)
+ int32_t addend = Bits<11>::sign_extend32((val & 0x07ff) << 1);
+ int32_t x = (psymval->value(object, addend) - address);
+ elfcpp::Swap<16, big_endian>::writeval(wv, ((val & 0xf800)
+ | ((x & 0x0ffe) >> 1)));
+ // We do a 12-bit overflow check because x is right-shifted by 1 bit.
+ return (Bits<12>::has_overflow32(x)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_GOT_PREL: GOT(S) + A - P
static inline typename This::Status
- got_prel(unsigned char *view,
+ got_prel(unsigned char* view,
Arm_address got_entry,
Arm_address address)
{
// R_ARM_PREL: (S + A) | T - P
static inline typename This::Status
- prel31(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ prel31(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address,
Arm_address thumb_bit)
{
- typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
- Valtype* wv = reinterpret_cast<Valtype*>(view);
- Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
- Valtype addend = utils::sign_extend<31>(val);
+ typedef typename elfcpp::Swap_unaligned<32, big_endian>::Valtype Valtype;
+ Valtype val = elfcpp::Swap_unaligned<32, big_endian>::readval(view);
+ Valtype addend = Bits<31>::sign_extend32(val);
Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
- val = utils::bit_select(val, x, 0x7fffffffU);
- elfcpp::Swap<32, big_endian>::writeval(wv, val);
- return (utils::has_overflow<31>(x) ?
- This::STATUS_OVERFLOW : This::STATUS_OKAY);
+ val = Bits<32>::bit_select32(val, x, 0x7fffffffU);
+ elfcpp::Swap_unaligned<32, big_endian>::writeval(view, val);
+ return (Bits<31>::has_overflow32(x)
+ ? This::STATUS_OVERFLOW
+ : This::STATUS_OKAY);
}
// R_ARM_MOVW_ABS_NC: (S + A) | T (relative address base is )
// R_ARM_MOVW_BREL: ((S + A) | T) - B(S)
static inline typename This::Status
movw(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address relative_address_base,
Arm_address thumb_bit,
- relative_address_base);
val = This::insert_val_arm_movw_movt(val, x);
elfcpp::Swap<32, big_endian>::writeval(wv, val);
- return ((check_overflow && utils::has_overflow<16>(x))
+ return ((check_overflow && Bits<16>::has_overflow32(x))
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_MOVT_BREL: S + A - B(S)
static inline typename This::Status
movt(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address relative_address_base)
{
// R_ARM_THM_MOVW_BREL_NC: ((S + A) | T) - B(S)
// R_ARM_THM_MOVW_BREL: ((S + A) | T) - B(S)
static inline typename This::Status
- thm_movw(unsigned char *view,
- const Sized_relobj<32, big_endian>* object,
+ thm_movw(unsigned char* view,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address relative_address_base,
Arm_address thumb_bit,
val = This::insert_val_thumb_movw_movt(val, x);
elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
- return ((check_overflow && utils::has_overflow<16>(x))
- ? This::STATUS_OVERFLOW
+ return ((check_overflow && Bits<16>::has_overflow32(x))
+ ? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
// R_ARM_THM_MOVT_BREL: S + A - B(S)
static inline typename This::Status
thm_movt(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address relative_address_base)
{
// R_ARM_THM_ALU_PREL_11_0: ((S + A) | T) - Pa (Thumb32)
static inline typename This::Status
thm_alu11(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address,
Arm_address thumb_bit)
elfcpp::Swap<16, big_endian>::writeval(wv, insn >> 16);
elfcpp::Swap<16, big_endian>::writeval(wv + 1, insn & 0xffff);
return ((val > 0xfff) ?
- This::STATUS_OVERFLOW : This::STATUS_OKAY);
+ This::STATUS_OVERFLOW : This::STATUS_OKAY);
}
// R_ARM_THM_PC8: S + A - Pa (Thumb)
static inline typename This::Status
thm_pc8(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address)
{
// R_ARM_THM_PC12: S + A - Pa (Thumb32)
static inline typename This::Status
thm_pc12(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address)
{
elfcpp::Swap<16, big_endian>::writeval(wv, insn >> 16);
elfcpp::Swap<16, big_endian>::writeval(wv + 1, insn & 0xffff);
return ((val > 0xfff) ?
- This::STATUS_OVERFLOW : This::STATUS_OKAY);
+ This::STATUS_OVERFLOW : This::STATUS_OKAY);
}
// R_ARM_V4BX
static inline typename This::Status
v4bx(const Relocate_info<32, big_endian>* relinfo,
- unsigned char *view,
+ unsigned char* view,
const Arm_relobj<big_endian>* object,
const Arm_address address,
const bool is_interworking)
// R_ARM_ALU_SB_G2: ((S + A) | T) - B(S)
static inline typename This::Status
arm_grp_alu(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
const int group,
Arm_address address,
// R_ARM_LDR_SB_G2: S + A - B(S)
static inline typename This::Status
arm_grp_ldr(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
const int group,
Arm_address address)
// R_ARM_LDRS_SB_G2: S + A - B(S)
static inline typename This::Status
arm_grp_ldrs(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
const int group,
Arm_address address)
// R_ARM_LDC_SB_G2: S + A - B(S)
static inline typename This::Status
arm_grp_ldc(unsigned char* view,
- const Sized_relobj<32, big_endian>* object,
+ const Sized_relobj_file<32, big_endian>* object,
const Symbol_value<32>* psymval,
const int group,
Arm_address address)
Arm_relocate_functions<big_endian>::arm_branch_common(
unsigned int r_type,
const Relocate_info<32, big_endian>* relinfo,
- unsigned char *view,
+ unsigned char* view,
const Sized_symbol<32>* gsym,
const Arm_relobj<big_endian>* object,
unsigned int r_sym,
typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
Valtype* wv = reinterpret_cast<Valtype*>(view);
Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
-
+
bool insn_is_b = (((val >> 28) & 0xf) <= 0xe)
- && ((val & 0x0f000000UL) == 0x0a000000UL);
+ && ((val & 0x0f000000UL) == 0x0a000000UL);
bool insn_is_uncond_bl = (val & 0xff000000UL) == 0xeb000000UL;
bool insn_is_cond_bl = (((val >> 28) & 0xf) < 0xe)
&& ((val & 0x0f000000UL) == 0x0b000000UL);
elfcpp::Swap<32, big_endian>::writeval(wv, val);
return This::STATUS_OKAY;
}
-
- Valtype addend = utils::sign_extend<26>(val << 2);
+
+ Valtype addend = Bits<26>::sign_extend32(val << 2);
Valtype branch_target = psymval->value(object, addend);
int32_t branch_offset = branch_target - address;
// We need a stub if the branch offset is too large or if we need
// to switch mode.
- bool may_use_blx = arm_target->may_use_blx();
+ bool may_use_blx = arm_target->may_use_v5t_interworking();
Reloc_stub* stub = NULL;
if (!parameters->options().relocatable()
- && (utils::has_overflow<26>(branch_offset)
+ && (Bits<26>::has_overflow32(branch_offset)
|| ((thumb_bit != 0)
&& !(may_use_blx && r_type == elfcpp::R_ARM_CALL))))
{
thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
branch_target = stub_table->address() + stub->offset() + addend;
branch_offset = branch_target - address;
- gold_assert(!utils::has_overflow<26>(branch_offset));
+ gold_assert(!Bits<26>::has_overflow32(branch_offset));
}
}
val = (val & 0xffffff) | 0xfa000000 | ((branch_offset & 2) << 23);
}
- val = utils::bit_select(val, (branch_offset >> 2), 0xffffffUL);
+ val = Bits<32>::bit_select32(val, (branch_offset >> 2), 0xffffffUL);
elfcpp::Swap<32, big_endian>::writeval(wv, val);
- return (utils::has_overflow<26>(branch_offset)
- ? This::STATUS_OVERFLOW : This::STATUS_OKAY);
+ return (Bits<26>::has_overflow32(branch_offset)
+ ? This::STATUS_OVERFLOW
+ : This::STATUS_OKAY);
}
// Relocate THUMB long branches. This handles relocation types
Arm_relocate_functions<big_endian>::thumb_branch_common(
unsigned int r_type,
const Relocate_info<32, big_endian>* relinfo,
- unsigned char *view,
+ unsigned char* view,
const Sized_symbol<32>* gsym,
const Arm_relobj<big_endian>* object,
unsigned int r_sym,
// into account.
bool is_bl_insn = (lower_insn & 0x1000U) == 0x1000U;
bool is_blx_insn = (lower_insn & 0x1000U) == 0x0000U;
-
+
// Check that the instruction is valid.
if (r_type == elfcpp::R_ARM_THM_CALL)
{
gold_warning(_("%s: Thumb BLX instruction targets "
"thumb function '%s'."),
object->name().c_str(),
- (gsym ? gsym->name() : "(local)"));
+ (gsym ? gsym->name() : "(local)"));
// Convert BLX to BL.
lower_insn |= 0x1000U;
}
}
return This::STATUS_OKAY;
}
-
+
int32_t addend = This::thumb32_branch_offset(upper_insn, lower_insn);
Arm_address branch_target = psymval->value(object, addend);
// For BLX, bit 1 of target address comes from bit 1 of base address.
- bool may_use_blx = arm_target->may_use_blx();
+ bool may_use_blx = arm_target->may_use_v5t_interworking();
if (thumb_bit == 0 && may_use_blx)
- branch_target = utils::bit_select(branch_target, address, 0x2);
+ branch_target = Bits<32>::bit_select32(branch_target, address, 0x2);
int32_t branch_offset = branch_target - address;
// to switch mode.
bool thumb2 = arm_target->using_thumb2();
if (!parameters->options().relocatable()
- && ((!thumb2 && utils::has_overflow<23>(branch_offset))
- || (thumb2 && utils::has_overflow<25>(branch_offset))
+ && ((!thumb2 && Bits<23>::has_overflow32(branch_offset))
+ || (thumb2 && Bits<25>::has_overflow32(branch_offset))
|| ((thumb_bit == 0)
&& (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
|| r_type == elfcpp::R_ARM_THM_JUMP24))))
gold_assert(stub != NULL);
thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
branch_target = stub_table->address() + stub->offset() + addend;
- if (thumb_bit == 0 && may_use_blx)
- branch_target = utils::bit_select(branch_target, address, 0x2);
+ if (thumb_bit == 0 && may_use_blx)
+ branch_target = Bits<32>::bit_select32(branch_target, address, 0x2);
branch_offset = branch_target - address;
}
}
elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
- gold_assert(!utils::has_overflow<25>(branch_offset));
+ gold_assert(!Bits<25>::has_overflow32(branch_offset));
return ((thumb2
- ? utils::has_overflow<25>(branch_offset)
- : utils::has_overflow<23>(branch_offset))
+ ? Bits<25>::has_overflow32(branch_offset)
+ : Bits<23>::has_overflow32(branch_offset))
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
template<bool big_endian>
typename Arm_relocate_functions<big_endian>::Status
Arm_relocate_functions<big_endian>::thm_jump19(
- unsigned char *view,
+ unsigned char* view,
const Arm_relobj<big_endian>* object,
const Symbol_value<32>* psymval,
Arm_address address,
elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
- return (utils::has_overflow<21>(branch_offset)
+ return (Bits<21>::has_overflow32(branch_offset)
? This::STATUS_OVERFLOW
: This::STATUS_OKAY);
}
{
gold_assert(symtab != NULL && layout != NULL);
+ // When using -z now, we can treat .got as a relro section.
+ // Without -z now, it is modified after program startup by lazy
+ // PLT relocations.
+ bool is_got_relro = parameters->options().now();
+ Output_section_order got_order = (is_got_relro
+ ? ORDER_RELRO_LAST
+ : ORDER_DATA);
+
+ // Unlike some targets (.e.g x86), ARM does not use separate .got and
+ // .got.plt sections in output. The output .got section contains both
+ // PLT and non-PLT GOT entries.
this->got_ = new Arm_output_data_got<big_endian>(symtab, layout);
layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
- (elfcpp::SHF_ALLOC
- | elfcpp::SHF_WRITE),
- this->got_, false, false, false, true);
+ (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
+ this->got_, got_order, is_got_relro);
+
// The old GNU linker creates a .got.plt section. We just
// create another set of data in the .got section. Note that we
// always create a PLT if we create a GOT, although the PLT
// might be empty.
this->got_plt_ = new Output_data_space(4, "** GOT PLT");
layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
- (elfcpp::SHF_ALLOC
- | elfcpp::SHF_WRITE),
- this->got_plt_, false, false, false,
- false);
+ (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
+ this->got_plt_, got_order, is_got_relro);
// The first three entries are reserved.
this->got_plt_->set_current_data_size(3 * 4);
elfcpp::STB_LOCAL,
elfcpp::STV_HIDDEN, 0,
false, false);
+
+ // If there are any IRELATIVE relocations, they get GOT entries
+ // in .got.plt after the jump slot entries.
+ this->got_irelative_ = new Output_data_space(4, "** GOT IRELATIVE PLT");
+ layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
+ (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
+ this->got_irelative_,
+ got_order, is_got_relro);
+
}
return this->got_;
}
if (this->rel_dyn_ == NULL)
{
gold_assert(layout != NULL);
+ // Create both relocation sections in the same place, so as to ensure
+ // their relative order in the output section.
this->rel_dyn_ = new Reloc_section(parameters->options().combreloc());
+ this->rel_irelative_ = new Reloc_section(false);
layout->add_output_section_data(".rel.dyn", elfcpp::SHT_REL,
- elfcpp::SHF_ALLOC, this->rel_dyn_, true,
- false, false, false);
+ elfcpp::SHF_ALLOC, this->rel_dyn_,
+ ORDER_DYNAMIC_RELOCS, false);
+ layout->add_output_section_data(".rel.dyn", elfcpp::SHT_REL,
+ elfcpp::SHF_ALLOC, this->rel_irelative_,
+ ORDER_DYNAMIC_RELOCS, false);
}
return this->rel_dyn_;
}
+
+// Get the section to use for IRELATIVE relocs, creating it if necessary. These
+// go in .rela.dyn, but only after all other dynamic relocations. They need to
+// follow the other dynamic relocations so that they can refer to global
+// variables initialized by those relocs.
+
+template<bool big_endian>
+typename Target_arm<big_endian>::Reloc_section*
+Target_arm<big_endian>::rel_irelative_section(Layout* layout)
+{
+ if (this->rel_irelative_ == NULL)
+ {
+ // Delegate the creation to rel_dyn_section so as to ensure their order in
+ // the output section.
+ this->rel_dyn_section(layout);
+ gold_assert(this->rel_irelative_ != NULL
+ && (this->rel_dyn_->output_section()
+ == this->rel_irelative_->output_section()));
+ }
+ return this->rel_irelative_;
+}
+
+
// Insn_template methods.
// Return byte size of an instruction template.
break;
case Insn_template::THUMB32_TYPE:
- if (insns[i].r_type() != elfcpp::R_ARM_NONE)
+ if (insns[i].r_type() != elfcpp::R_ARM_NONE)
this->relocs_.push_back(Reloc(i, offset));
if (i == 0)
this->entry_in_thumb_mode_ = true;
- break;
+ break;
case Insn_template::ARM_TYPE:
// Handle cases where the target is encoded within the
default:
gold_unreachable();
}
- offset += insn_size;
+ offset += insn_size;
}
this->size_ = offset;
}
elfcpp::Swap<16, big_endian>::writeval(pov, hi);
elfcpp::Swap<16, big_endian>::writeval(pov + 2, lo);
}
- break;
+ break;
case Insn_template::ARM_TYPE:
case Insn_template::DATA_TYPE:
elfcpp::Swap<32, big_endian>::writeval(pov, insns[i].data());
pov += insns[i].size();
}
gold_assert(static_cast<section_size_type>(pov - view) == view_size);
-}
+}
// Reloc_stub::Key methods.
// Determine the type of stub needed, if any, for a relocation of R_TYPE at
// LOCATION to DESTINATION.
// This code is based on the arm_type_of_stub function in
-// bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
+// bfd/elf32-arm.c. We have changed the interface a little to keep the Stub
// class simple.
Stub_type
// This is a bit ugly but we want to avoid using a templated class for
// big and little endianities.
bool may_use_blx;
- bool should_force_pic_veneer;
+ bool should_force_pic_veneer = parameters->options().pic_veneer();
bool thumb2;
bool thumb_only;
if (parameters->target().is_big_endian())
{
const Target_arm<true>* big_endian_target =
Target_arm<true>::default_target();
- may_use_blx = big_endian_target->may_use_blx();
- should_force_pic_veneer = big_endian_target->should_force_pic_veneer();
+ may_use_blx = big_endian_target->may_use_v5t_interworking();
+ should_force_pic_veneer |= big_endian_target->should_force_pic_veneer();
thumb2 = big_endian_target->using_thumb2();
thumb_only = big_endian_target->using_thumb_only();
}
{
const Target_arm<false>* little_endian_target =
Target_arm<false>::default_target();
- may_use_blx = little_endian_target->may_use_blx();
- should_force_pic_veneer = little_endian_target->should_force_pic_veneer();
+ may_use_blx = little_endian_target->may_use_v5t_interworking();
+ should_force_pic_veneer |=
+ little_endian_target->should_force_pic_veneer();
thumb2 = little_endian_target->using_thumb2();
thumb_only = little_endian_target->using_thumb_only();
}
int64_t branch_offset;
+ bool output_is_position_independent =
+ parameters->options().output_is_position_independent();
if (r_type == elfcpp::R_ARM_THM_CALL || r_type == elfcpp::R_ARM_THM_JUMP24)
{
// For THUMB BLX instruction, bit 1 of target comes from bit 1 of the
// base address (instruction address + 4).
if ((r_type == elfcpp::R_ARM_THM_CALL) && may_use_blx && !target_is_thumb)
- destination = utils::bit_select(destination, location, 0x2);
+ destination = Bits<32>::bit_select32(destination, location, 0x2);
branch_offset = static_cast<int64_t>(destination) - location;
-
+
// Handle cases where:
// - this call goes too far (different Thumb/Thumb2 max
// distance)
// Thumb to thumb.
if (!thumb_only)
{
- stub_type = (parameters->options().shared()
+ stub_type = (output_is_position_independent
|| should_force_pic_veneer)
// PIC stubs.
? ((may_use_blx
}
else
{
- stub_type = (parameters->options().shared()
+ stub_type = (output_is_position_independent
|| should_force_pic_veneer)
? arm_stub_long_branch_thumb_only_pic // PIC stub.
: arm_stub_long_branch_thumb_only; // non-PIC stub.
else
{
// Thumb to arm.
-
+
// FIXME: We should check that the input section is from an
// object that has interwork enabled.
- stub_type = (parameters->options().shared()
+ stub_type = (output_is_position_independent
|| should_force_pic_veneer)
// PIC stubs.
? ((may_use_blx
|| (r_type == elfcpp::R_ARM_JUMP24)
|| (r_type == elfcpp::R_ARM_PLT32))
{
- stub_type = (parameters->options().shared()
+ stub_type = (output_is_position_independent
|| should_force_pic_veneer)
// PIC stubs.
? (may_use_blx
if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
|| (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
{
- stub_type = (parameters->options().shared()
+ stub_type = (output_is_position_independent
|| should_force_pic_veneer)
? arm_stub_long_branch_any_arm_pic // PIC stubs.
: arm_stub_long_branch_any_any; /// non-PIC.
{
// The instruction template sequences are declared as static
// objects and initialized first time the constructor runs.
-
+
// Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
// to reach the stub if necessary.
static const Insn_template elf32_arm_stub_long_branch_any_any[] =
{
Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
- // dcd R_ARM_ABS32(X)
+ // dcd R_ARM_ABS32(X)
};
-
+
// V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
// available.
static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb[] =
Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
Insn_template::arm_insn(0xe12fff1c), // bx ip
Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
- // dcd R_ARM_ABS32(X)
+ // dcd R_ARM_ABS32(X)
};
-
+
// Thumb -> Thumb long branch stub. Used on M-profile architectures.
static const Insn_template elf32_arm_stub_long_branch_thumb_only[] =
{
Insn_template::thumb16_insn(0x4760), // bx ip
Insn_template::thumb16_insn(0xbf00), // nop
Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
- // dcd R_ARM_ABS32(X)
+ // dcd R_ARM_ABS32(X)
};
-
+
// V4T Thumb -> Thumb long branch stub. Using the stack is not
// allowed.
static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
Insn_template::arm_insn(0xe12fff1c), // bx ip
Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
- // dcd R_ARM_ABS32(X)
+ // dcd R_ARM_ABS32(X)
};
-
+
// V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
// available.
static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm[] =
Insn_template::thumb16_insn(0x46c0), // nop
Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
- // dcd R_ARM_ABS32(X)
+ // dcd R_ARM_ABS32(X)
};
-
+
// V4T Thumb -> ARM short branch stub. Shorter variant of the above
// one, when the destination is close enough.
static const Insn_template elf32_arm_stub_short_branch_v4t_thumb_arm[] =
Insn_template::thumb16_insn(0x46c0), // nop
Insn_template::arm_rel_insn(0xea000000, -8), // b (X-8)
};
-
+
// ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
// blx to reach the stub if necessary.
static const Insn_template elf32_arm_stub_long_branch_any_arm_pic[] =
Insn_template::arm_insn(0xe59fc000), // ldr r12, [pc]
Insn_template::arm_insn(0xe08ff00c), // add pc, pc, ip
Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
- // dcd R_ARM_REL32(X-4)
+ // dcd R_ARM_REL32(X-4)
};
-
+
// ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
// blx to reach the stub if necessary. We can not add into pc;
// it is not guaranteed to mode switch (different in ARMv6 and
Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
Insn_template::arm_insn(0xe12fff1c), // bx ip
Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
- // dcd R_ARM_REL32(X)
+ // dcd R_ARM_REL32(X)
};
-
+
// V4T ARM -> ARM long branch stub, PIC.
static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
{
Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
Insn_template::arm_insn(0xe12fff1c), // bx ip
Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
- // dcd R_ARM_REL32(X)
+ // dcd R_ARM_REL32(X)
};
-
+
// V4T Thumb -> ARM long branch stub, PIC.
static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
{
Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
Insn_template::arm_insn(0xe08cf00f), // add pc, ip, pc
Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
- // dcd R_ARM_REL32(X)
+ // dcd R_ARM_REL32(X)
};
-
+
// Thumb -> Thumb long branch stub, PIC. Used on M-profile
// architectures.
static const Insn_template elf32_arm_stub_long_branch_thumb_only_pic[] =
Insn_template::thumb16_insn(0xbc01), // pop {r0}
Insn_template::thumb16_insn(0x4760), // bx ip
Insn_template::data_word(0, elfcpp::R_ARM_REL32, 4),
- // dcd R_ARM_REL32(X)
+ // dcd R_ARM_REL32(X)
};
-
+
// V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
// allowed.
static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
Insn_template::arm_insn(0xe12fff1c), // bx ip
Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
- // dcd R_ARM_REL32(X)
+ // dcd R_ARM_REL32(X)
};
-
+
// Cortex-A8 erratum-workaround stubs.
-
+
// Stub used for conditional branches (which may be beyond +/-1MB away,
// so we can't use a conditional branch to reach this stub).
-
+
// original code:
//
// b<cond> X
Insn_template::thumb16_bcond_insn(0xd001), // b<cond>.n true
Insn_template::thumb32_b_insn(0xf000b800, -4), // b.w after
Insn_template::thumb32_b_insn(0xf000b800, -4) // true:
- // b.w X
+ // b.w X
};
-
+
// Stub used for b.w and bl.w instructions.
-
+
static const Insn_template elf32_arm_stub_a8_veneer_b[] =
{
Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
};
-
+
static const Insn_template elf32_arm_stub_a8_veneer_bl[] =
{
Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
};
-
+
// Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
// instruction (which switches to ARM mode) to point to this stub. Jump to
// the real destination using an ARM-mode branch.
// Stub_table methods.
-// Removel all Cortex-A8 stub.
+// Remove all Cortex-A8 stub.
template<bool big_endian>
void
// Update prev_data_size_ and prev_addralign_. These will be used
// as the current data size and address alignment for the next pass.
bool changed = size != this->prev_data_size_;
- this->prev_data_size_ = size;
+ this->prev_data_size_ = size;
if (addralign != this->prev_addralign_)
changed = true;
Relobj* relobj = this->relobj();
unsigned int shndx = this->shndx();
- // Cache these to speed up size and alignment queries. It is too slow
- // to call section_addraglin and section_size every time.
+ // We have to cache original size, alignment and contents to avoid locking
+ // the original file.
this->original_addralign_ =
convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
+
+ // This is not efficient but we expect only a small number of relaxed
+ // input sections for stubs.
+ section_size_type section_size;
+ const unsigned char* section_contents =
+ relobj->section_contents(shndx, §ion_size, false);
this->original_size_ =
convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
+ gold_assert(this->original_contents_ == NULL);
+ this->original_contents_ = new unsigned char[section_size];
+ memcpy(this->original_contents_, section_contents, section_size);
+
// We want to make this look like the original input section after
// output sections are finalized.
Output_section* os = relobj->output_section(shndx);
Arm_input_section<big_endian>::do_write(Output_file* of)
{
// We have to write out the original section content.
- section_size_type section_size;
- const unsigned char* section_contents =
- this->relobj()->section_contents(this->shndx(), §ion_size, false);
- of->write(this->offset(), section_contents, section_size);
+ gold_assert(this->original_contents_ != NULL);
+ of->write(this->offset(), this->original_contents_,
+ this->original_size_);
// If this owns a stub table and it is not empty, write it.
if (this->is_stub_table_owner() && !this->stub_table_->empty())
off_t offset = this->offset();
const section_size_type oview_size = 8;
unsigned char* const oview = of->get_output_view(offset, oview_size);
-
- typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
- Valtype* wv = reinterpret_cast<Valtype*>(oview);
Output_section* os = this->relobj_->output_section(this->shndx_);
gold_assert(os != NULL);
Arm_address output_offset =
arm_relobj->get_output_section_offset(this->shndx_);
Arm_address section_start;
+ section_size_type section_size;
+
+ // Find out the end of the text section referred by this.
if (output_offset != Arm_relobj<big_endian>::invalid_address)
- section_start = os->address() + output_offset;
+ {
+ section_start = os->address() + output_offset;
+ const Arm_exidx_input_section* exidx_input_section =
+ arm_relobj->exidx_input_section_by_link(this->shndx_);
+ gold_assert(exidx_input_section != NULL);
+ section_size =
+ convert_to_section_size_type(exidx_input_section->text_size());
+ }
else
{
// Currently this only happens for a relaxed section.
os->find_relaxed_input_section(this->relobj_, this->shndx_);
gold_assert(poris != NULL);
section_start = poris->address();
+ section_size = convert_to_section_size_type(poris->data_size());
}
// We always append this to the end of an EXIDX section.
- Arm_address output_address =
- section_start + this->relobj_->section_size(this->shndx_);
+ Arm_address output_address = section_start + section_size;
// Write out the entry. The first word either points to the beginning
// or after the end of a text section. The second word is the special
// EXIDX_CANTUNWIND value.
uint32_t prel31_offset = output_address - this->address();
- if (utils::has_overflow<31>(offset))
+ if (Bits<31>::has_overflow32(offset))
gold_error(_("PREL31 overflow in EXIDX_CANTUNWIND entry"));
- elfcpp::Swap<32, big_endian>::writeval(wv, prel31_offset & 0x7fffffffU);
- elfcpp::Swap<32, big_endian>::writeval(wv + 1, elfcpp::EXIDX_CANTUNWIND);
+ elfcpp::Swap_unaligned<32, big_endian>::writeval(oview,
+ prel31_offset & 0x7fffffffU);
+ elfcpp::Swap_unaligned<32, big_endian>::writeval(oview + 4,
+ elfcpp::EXIDX_CANTUNWIND);
of->write_output_view(this->offset(), oview_size, oview);
}
exidx_input_section_(exidx_input_section),
section_offset_map_(section_offset_map)
{
+ // If we retain or discard the whole EXIDX input section, we would
+ // not be here.
+ gold_assert(deleted_bytes != 0
+ && deleted_bytes != this->exidx_input_section_.size());
+
// Fix size here so that we do not need to implement set_final_data_size.
- this->set_data_size(exidx_input_section.size() - deleted_bytes);
+ uint32_t size = exidx_input_section.size() - deleted_bytes;
+ this->set_data_size(size);
this->fix_data_size();
+
+ // Allocate buffer for section contents and build contents.
+ this->section_contents_ = new unsigned char[size];
+}
+
+// Build the contents of a merged EXIDX output section.
+
+void
+Arm_exidx_merged_section::build_contents(
+ const unsigned char* original_contents,
+ section_size_type original_size)
+{
+ // Go over spans of input offsets and write only those that are not
+ // discarded.
+ section_offset_type in_start = 0;
+ section_offset_type out_start = 0;
+ section_offset_type in_max =
+ convert_types<section_offset_type>(original_size);
+ section_offset_type out_max =
+ convert_types<section_offset_type>(this->data_size());
+ for (Arm_exidx_section_offset_map::const_iterator p =
+ this->section_offset_map_.begin();
+ p != this->section_offset_map_.end();
+ ++p)
+ {
+ section_offset_type in_end = p->first;
+ gold_assert(in_end >= in_start);
+ section_offset_type out_end = p->second;
+ size_t in_chunk_size = convert_types<size_t>(in_end - in_start + 1);
+ if (out_end != -1)
+ {
+ size_t out_chunk_size =
+ convert_types<size_t>(out_end - out_start + 1);
+
+ gold_assert(out_chunk_size == in_chunk_size
+ && in_end < in_max && out_end < out_max);
+
+ memcpy(this->section_contents_ + out_start,
+ original_contents + in_start,
+ out_chunk_size);
+ out_start += out_chunk_size;
+ }
+ in_start += in_chunk_size;
+ }
}
// Given an input OBJECT, an input section index SHNDX within that
// Offset is discarded owing to EXIDX entry merging.
*poutput = -1;
}
-
+
return true;
}
void
Arm_exidx_merged_section::do_write(Output_file* of)
{
- // If we retain or discard the whole EXIDX input section, we would
- // not be here.
- gold_assert(this->data_size() != this->exidx_input_section_.size()
- && this->data_size() != 0);
-
off_t offset = this->offset();
const section_size_type oview_size = this->data_size();
unsigned char* const oview = of->get_output_view(offset, oview_size);
-
+
Output_section* os = this->relobj()->output_section(this->shndx());
gold_assert(os != NULL);
- // Get contents of EXIDX input section.
- section_size_type section_size;
- const unsigned char* section_contents =
- this->relobj()->section_contents(this->shndx(), §ion_size, false);
- gold_assert(section_size == this->exidx_input_section_.size());
-
- // Go over spans of input offsets and write only those that are not
- // discarded.
- section_offset_type in_start = 0;
- section_offset_type out_start = 0;
- for(Arm_exidx_section_offset_map::const_iterator p =
- this->section_offset_map_.begin();
- p != this->section_offset_map_.end();
- ++p)
- {
- section_offset_type in_end = p->first;
- gold_assert(in_end >= in_start);
- section_offset_type out_end = p->second;
- size_t in_chunk_size = convert_types<size_t>(in_end - in_start + 1);
- if (out_end != -1)
- {
- size_t out_chunk_size =
- convert_types<size_t>(out_end - out_start + 1);
- gold_assert(out_chunk_size == in_chunk_size);
- memcpy(oview + out_start, section_contents + in_start,
- out_chunk_size);
- out_start += out_chunk_size;
- }
- in_start += in_chunk_size;
- }
-
- gold_assert(convert_to_section_size_type(out_start) == oview_size);
+ memcpy(oview, this->section_contents_, oview_size);
of->write_output_view(this->offset(), oview_size, oview);
}
}
// Process EXIDX_INPUT_SECTION for EXIDX entry merging. Return the number of
-// bytes deleted. If some entries are merged, also store a pointer to a newly
-// created Arm_exidx_section_offset_map object in *PSECTION_OFFSET_MAP. The
-// caller owns the map and is responsible for releasing it after use.
+// bytes deleted. SECTION_CONTENTS points to the contents of the EXIDX
+// section and SECTION_SIZE is the number of bytes pointed by SECTION_CONTENTS.
+// If some entries are merged, also store a pointer to a newly created
+// Arm_exidx_section_offset_map object in *PSECTION_OFFSET_MAP. The caller
+// owns the map and is responsible for releasing it after use.
template<bool big_endian>
uint32_t
Arm_exidx_fixup::process_exidx_section(
const Arm_exidx_input_section* exidx_input_section,
+ const unsigned char* section_contents,
+ section_size_type section_size,
Arm_exidx_section_offset_map** psection_offset_map)
{
Relobj* relobj = exidx_input_section->relobj();
unsigned shndx = exidx_input_section->shndx();
- section_size_type section_size;
- const unsigned char* section_contents =
- relobj->section_contents(shndx, §ion_size, false);
if ((section_size % 8) != 0)
{
this->last_unwind_type_ = UT_NONE;
return 0;
}
-
+
uint32_t deleted_bytes = 0;
bool prev_delete_entry = false;
gold_assert(this->section_offset_map_ == NULL);
// dropping. If there is no entry (x0, y0) for an input offset x0,
// the output offset y0 of it is determined by the output offset y1 of
// the smallest input offset x1 > x0 that there is an (x1, y1) entry
- // in the map. If y1 is not -1, then y0 = y1 + x0 - x1. Othewise, y1
+ // in the map. If y1 is not -1, then y0 = y1 + x0 - x1. Otherwise, y1
// y0 is also -1.
if (delete_entry != prev_delete_entry && i != 0)
this->update_offset_map(i - 1, deleted_bytes, prev_delete_entry);
prev_delete_entry = delete_entry;
}
-
+
// If section offset map is not NULL, make an entry for the end of
// section.
if (this->section_offset_map_ != NULL)
*psection_offset_map = this->section_offset_map_;
this->section_offset_map_ = NULL;
this->last_input_section_ = exidx_input_section;
-
+
// Set the first output text section so that we can link the EXIDX output
// section to it. Ignore any EXIDX input section that is completely merged.
if (this->first_output_text_section_ == NULL
Input_section_list::const_iterator end,
Input_section_list::const_iterator owner,
Target_arm<big_endian>* target,
- std::vector<Output_relaxed_input_section*>* new_relaxed_sections)
+ std::vector<Output_relaxed_input_section*>* new_relaxed_sections,
+ const Task* task)
{
// We use a different kind of relaxed section in an EXIDX section.
// The static casting from Output_relaxed_input_section to
// Arm_input_section is invalid in an EXIDX section. We are okay
- // because we should not be calling this for an EXIDX section.
+ // because we should not be calling this for an EXIDX section.
gold_assert(this->type() != elfcpp::SHT_ARM_EXIDX);
// Currently we convert ordinary input sections into relaxed sections only
// at this point but we may want to support creating relaxed input section
// very early. So we check here to see if owner is already a relaxed
// section.
-
+
Arm_input_section<big_endian>* arm_input_section;
if (owner->is_relaxed_input_section())
{
else
{
gold_assert(owner->is_input_section());
- // Create a new relaxed input section.
+ // Create a new relaxed input section. We need to lock the original
+ // file.
+ Task_lock_obj<Object> tl(task, owner->relobj());
arm_input_section =
target->new_arm_input_section(owner->relobj(), owner->shndx());
new_relaxed_sections->push_back(arm_input_section);
target->new_stub_table(arm_input_section);
arm_input_section->set_stub_table(stub_table);
-
+
Input_section_list::const_iterator p = begin;
Input_section_list::const_iterator prev_p;
// size is just below GROUP_SIZE. The last input section will be converted
// into a stub table. If STUB_ALWAYS_AFTER_BRANCH is false, we also add
// input section after the stub table, effectively double the group size.
-//
+//
// This is similar to the group_sections() function in elf32-arm.c but is
// implemented differently.
Arm_output_section<big_endian>::group_sections(
section_size_type group_size,
bool stubs_always_after_branch,
- Target_arm<big_endian>* target)
+ Target_arm<big_endian>* target,
+ const Task* task)
{
- // We only care about sections containing code.
- if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
- return;
-
// States for grouping.
typedef enum
{
section_size_type section_begin_offset =
align_address(off, p->addralign());
section_size_type section_end_offset =
- section_begin_offset + p->data_size();
-
- // Check to see if we should group the previously seens sections.
+ section_begin_offset + p->data_size();
+
+ // Check to see if we should group the previously seen sections.
switch (state)
{
case NO_GROUP:
if (section_end_offset - group_begin_offset >= group_size)
{
if (stubs_always_after_branch)
- {
+ {
gold_assert(group_end != this->input_sections().end());
this->create_stub_group(group_begin, group_end, group_end,
- target, &new_relaxed_sections);
+ target, &new_relaxed_sections,
+ task);
state = NO_GROUP;
}
else
{
gold_assert(group_end != this->input_sections().end());
this->create_stub_group(group_begin, group_end, stub_table,
- target, &new_relaxed_sections);
+ target, &new_relaxed_sections, task);
state = NO_GROUP;
}
break;
default:
gold_unreachable();
- }
+ }
// If we see an input section and currently there is no group, start
- // a new one. Skip any empty sections.
+ // a new one. Skip any empty sections. We look at the data size
+ // instead of calling p->relobj()->section_size() to avoid locking.
if ((p->is_input_section() || p->is_relaxed_input_section())
- && (p->relobj()->section_size(p->shndx()) != 0))
+ && (p->data_size() != 0))
{
if (state == NO_GROUP)
{
(state == FINDING_STUB_SECTION
? group_end
: stub_table),
- target, &new_relaxed_sections);
+ target, &new_relaxed_sections, task);
}
// Convert input section into relaxed input section in a batch.
{
// We only care about plain or relaxed input sections. We also
// ignore any merged sections.
- if ((p->is_input_section() || p->is_relaxed_input_section())
- && p->data_size() != 0)
+ if (p->is_input_section() || p->is_relaxed_input_section())
list->push_back(Text_section_list::value_type(p->relobj(),
p->shndx()));
}
Layout* layout,
const Text_section_list& sorted_text_sections,
Symbol_table* symtab,
- bool merge_exidx_entries)
+ bool merge_exidx_entries,
+ const Task* task)
{
// We should only do this for the EXIDX output section.
gold_assert(this->type() == elfcpp::SHT_ARM_EXIDX);
if (!this->input_sections().empty())
gold_error(_("Found non-EXIDX input sections in EXIDX output section"));
-
+
// Go through all the known input sections and record them.
typedef Unordered_set<Section_id, Section_id_hash> Section_id_set;
typedef Unordered_map<Section_id, const Output_section::Input_section*,
continue;
}
+ // We need to access the contents of the EXIDX section, lock the
+ // object here.
+ Task_lock_obj<Object> tl(task, exidx_relobj);
+ section_size_type exidx_size;
+ const unsigned char* exidx_contents =
+ exidx_relobj->section_contents(exidx_shndx, &exidx_size, false);
+
// Fix up coverage and append input section to output data list.
Arm_exidx_section_offset_map* section_offset_map = NULL;
uint32_t deleted_bytes =
- exidx_fixup.process_exidx_section<big_endian>(exidx_input_section,
+ exidx_fixup.process_exidx_section<big_endian>(exidx_input_section,
+ exidx_contents,
+ exidx_size,
§ion_offset_map);
if (deleted_bytes == exidx_input_section->size())
// Some entries are merged. We need to convert this EXIDX input
// section into a relaxed section.
gold_assert(section_offset_map != NULL);
+
Arm_exidx_merged_section* merged_section =
new Arm_exidx_merged_section(*exidx_input_section,
*section_offset_map, deleted_bytes);
- this->add_relaxed_input_section(merged_section);
+ merged_section->build_contents(exidx_contents, exidx_size);
+
+ const std::string secname = exidx_relobj->section_name(exidx_shndx);
+ this->add_relaxed_input_section(layout, merged_section, secname);
arm_relobj->convert_input_section_to_relaxed_section(exidx_shndx);
// All local symbols defined in discarded portions of this input
this->add_script_input_section(*pis);
}
- processed_input_sections.insert(Section_id(exidx_relobj, exidx_shndx));
+ processed_input_sections.insert(Section_id(exidx_relobj, exidx_shndx));
}
// Insert an EXIDX_CANTUNWIND entry at the end of output if necessary.
arm_relobj->set_output_local_symbol_count_needs_update();
}
}
-
+
// Link exidx output section to the first seen output section and
// set correct entry size.
this->set_link_section(exidx_fixup.first_output_text_section());
const elfcpp::Shdr<32, big_endian>& shdr,
unsigned int shndx,
const Output_section* os,
- const Symbol_table *symtab)
+ const Symbol_table* symtab)
{
// Skip any empty sections, unallocated sections or sections whose
// type are not SHT_PROGBITS.
Arm_relobj<big_endian>::section_needs_reloc_stub_scanning(
const elfcpp::Shdr<32, big_endian>& shdr,
const Relobj::Output_sections& out_sections,
- const Symbol_table *symtab,
+ const Symbol_table* symtab,
const unsigned char* pshdrs)
{
unsigned int sh_type = shdr.get_sh_type();
this->mapping_symbols_info_.lower_bound(section_start);
// There are no mapping symbols for this section. Treat it as a data-only
- // section. Issue a warning if section is marked as containing
- // instructions.
+ // section.
if (p == this->mapping_symbols_info_.end() || p->first.first != shndx)
- {
- if ((this->section_flags(shndx) & elfcpp::SHF_EXECINSTR) != 0)
- gold_warning(_("cannot scan executable section %u of %s for Cortex-A8 "
- "erratum because it has no mapping symbols."),
- shndx, this->name().c_str());
- return;
- }
+ return;
Arm_address output_address =
this->simple_input_section_output_address(shndx, os);
// scan. There are two reasons. First, we should look at THUMB code and
// THUMB code only. Second, we only want to look at the 4K-page boundary
// to speed up the scanning.
-
+
while (p != this->mapping_symbols_info_.end()
&& p->first.first == shndx)
{
span_end = convert_to_section_size_type(next->first.second);
else
span_end = convert_to_section_size_type(shdr.get_sh_size());
-
+
if (((span_start + output_address) & ~0xfffUL)
!= ((span_end + output_address - 1) & ~0xfffUL))
{
}
}
- p = next;
+ p = next;
}
}
// harder because we cannot access this information. So we override the
// do_count_local_symbol in parent and scan local symbols to mark
// THUMB functions. This is not the most efficient way but I do not want to
-// slow down other ports by calling a per symbol targer hook inside
-// Sized_relobj<size, big_endian>::do_count_local_symbols.
+// slow down other ports by calling a per symbol target hook inside
+// Sized_relobj_file<size, big_endian>::do_count_local_symbols.
template<bool big_endian>
void
{
// We need to fix-up the values of any local symbols whose type are
// STT_ARM_TFUNC.
-
+
// Ask parent to count the local symbols.
- Sized_relobj<32, big_endian>::do_count_local_symbols(pool, dynpool);
+ Sized_relobj_file<32, big_endian>::do_count_local_symbols(pool, dynpool);
const unsigned int loccount = this->local_symbol_count();
if (loccount == 0)
return;
- // Intialize the thumb function bit-vector.
+ // Initialize the thumb function bit-vector.
std::vector<bool> empty_vector(loccount, false);
this->local_symbol_is_thumb_function_.swap(empty_vector);
if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
{
this->error(_("symbol table name section has wrong type: %u"),
- static_cast<unsigned int>(strtabshdr.get_sh_type()));
+ static_cast<unsigned int>(strtabshdr.get_sh_type()));
return;
}
const char* pnames =
// Skip the first dummy symbol.
psyms += sym_size;
- typename Sized_relobj<32, big_endian>::Local_values* plocal_values =
+ typename Sized_relobj_file<32, big_endian>::Local_values* plocal_values =
this->local_values();
for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
{
const Symbol_table* symtab,
const Layout* layout,
const unsigned char* pshdrs,
- typename Sized_relobj<32, big_endian>::Views* pviews)
+ Output_file* of,
+ typename Sized_relobj_file<32, big_endian>::Views* pviews)
{
- // Call parent to relocate sections.
- Sized_relobj<32, big_endian>::do_relocate_sections(symtab, layout, pshdrs,
- pviews);
+ // Relocate the section data.
+ this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
+ 1, this->shnum() - 1);
// We do not generate stubs if doing a relocatable link.
if (parameters->options().relocatable())
unsigned char* view = (*pviews)[i].view + offset;
Arm_address address = stub_table->address();
section_size_type view_size = stub_table->data_size();
-
+
stub_table->relocate_stubs(&relinfo, arm_target, os, view, address,
view_size);
}
section_address,
section_size);
}
+ // BE8 swapping
+ if (parameters->options().be8())
+ {
+ section_size_type span_start, span_end;
+ elfcpp::Shdr<32, big_endian>
+ shdr(pshdrs + i * elfcpp::Elf_sizes<32>::shdr_size);
+ Mapping_symbol_position section_start(i, 0);
+ typename Mapping_symbols_info::const_iterator p =
+ this->mapping_symbols_info_.lower_bound(section_start);
+ unsigned char* view = (*pviews)[i].view;
+ Arm_address view_address = (*pviews)[i].address;
+ section_size_type view_size = (*pviews)[i].view_size;
+ while (p != this->mapping_symbols_info_.end()
+ && p->first.first == i)
+ {
+ typename Mapping_symbols_info::const_iterator next =
+ this->mapping_symbols_info_.upper_bound(p->first);
+
+ // Only swap arm or thumb code.
+ if ((p->second == 'a') || (p->second == 't'))
+ {
+ Output_section* os = this->output_section(i);
+ gold_assert(os != NULL);
+ Arm_address section_address =
+ this->simple_input_section_output_address(i, os);
+ span_start = convert_to_section_size_type(p->first.second);
+ if (next != this->mapping_symbols_info_.end()
+ && next->first.first == i)
+ span_end =
+ convert_to_section_size_type(next->first.second);
+ else
+ span_end =
+ convert_to_section_size_type(shdr.get_sh_size());
+ unsigned char* section_view =
+ view + (section_address - view_address);
+ uint64_t section_size = this->section_size(i);
+
+ gold_assert(section_address >= view_address
+ && ((section_address + section_size)
+ <= (view_address + view_size)));
+
+ // Set Output view for swapping
+ unsigned char *oview = section_view + span_start;
+ unsigned int index = 0;
+ if (p->second == 'a')
+ {
+ while (index + 3 < (span_end - span_start))
+ {
+ typedef typename elfcpp::Swap<32, big_endian>
+ ::Valtype Valtype;
+ Valtype* wv =
+ reinterpret_cast<Valtype*>(oview+index);
+ uint32_t val = elfcpp::Swap<32, false>::readval(wv);
+ elfcpp::Swap<32, true>::writeval(wv, val);
+ index += 4;
+ }
+ }
+ else if (p->second == 't')
+ {
+ while (index + 1 < (span_end - span_start))
+ {
+ typedef typename elfcpp::Swap<16, big_endian>
+ ::Valtype Valtype;
+ Valtype* wv =
+ reinterpret_cast<Valtype*>(oview+index);
+ uint16_t val = elfcpp::Swap<16, false>::readval(wv);
+ elfcpp::Swap<16, true>::writeval(wv, val);
+ index += 2;
+ }
+ }
+ }
+ p = next;
+ }
+ }
}
}
-// Find the linked text section of an EXIDX section by looking the the first
+// Find the linked text section of an EXIDX section by looking at the first
// relocation. 4.4.1 of the EHABI specifications says that an EXIDX section
-// must be linked to to its associated code section via the sh_link field of
+// must be linked to its associated code section via the sh_link field of
// its section header. However, some tools are broken and the link is not
// always set. LD just drops such an EXIDX section silently, causing the
// associated code not unwindabled. Here we try a little bit harder to
unsigned int* pshndx)
{
elfcpp::Shdr<32, big_endian> shdr(pshdr);
-
+
// If there is no relocation, we cannot find the linked text section.
size_t reloc_size;
if (shdr.get_sh_type() == elfcpp::SHT_REL)
else
reloc_size = elfcpp::Elf_sizes<32>::rela_size;
size_t reloc_count = shdr.get_sh_size() / reloc_size;
-
+
// Get the relocations.
const unsigned char* prelocs =
- this->get_view(shdr.get_sh_offset(), shdr.get_sh_size(), true, false);
+ this->get_view(shdr.get_sh_offset(), shdr.get_sh_size(), true, false);
// Find the REL31 relocation for the first word of the first EXIDX entry.
for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
// Create an Arm_exidx_input_section object for this EXIDX section.
Arm_exidx_input_section* exidx_input_section =
new Arm_exidx_input_section(this, shndx, text_shndx, shdr.get_sh_size(),
- shdr.get_sh_addralign());
+ shdr.get_sh_addralign(),
+ text_shdr.get_sh_size());
gold_assert(this->exidx_section_map_[shndx] == NULL);
this->exidx_section_map_[shndx] = exidx_input_section;
this->section_name(shndx).c_str(), shndx, text_shndx,
this->name().c_str());
exidx_input_section->set_has_errors();
- }
+ }
else if (this->exidx_section_map_[text_shndx] != NULL)
{
unsigned other_exidx_shndx =
exidx_input_section->set_has_errors();
}
else if ((text_shdr.get_sh_flags() & elfcpp::SHF_EXECINSTR) == 0)
- // I would like to make this an error but currenlty ld just ignores
+ // I would like to make this an error but currently ld just ignores
// this.
gold_warning(_("EXIDX section %s(%u) links to non-executable section "
"%s(%u) in %s"),
Arm_relobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
{
// Call parent class to read symbol information.
- Sized_relobj<32, big_endian>::do_read_symbols(sd);
+ this->base_read_symbols(sd);
// If this input file is a binary file, it has no processor
// specific flags and attributes section.
std::vector<unsigned int> deferred_exidx_sections;
const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
const unsigned char* pshdrs = sd->section_headers->data();
- const unsigned char *ps = pshdrs + shdr_size;
+ const unsigned char* ps = pshdrs + shdr_size;
bool must_merge_flags_and_attributes = false;
for (unsigned int i = 1; i < this->shnum(); ++i, ps += shdr_size)
{
if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
{
- gold_assert(this->attributes_section_data_ == NULL);
+ gold_assert(this->attributes_section_data_ == NULL);
section_offset_type section_offset = shdr.get_sh_offset();
section_size_type section_size =
convert_to_section_size_type(shdr.get_sh_size());
- File_view* view = this->get_lasting_view(section_offset,
- section_size, true, false);
+ const unsigned char* view =
+ this->get_view(section_offset, section_size, true, false);
this->attributes_section_data_ =
- new Attributes_section_data(view->data(), section_size);
+ new Attributes_section_data(view, section_size);
}
else if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
{
+ text_shndx * shdr_size);
this->make_exidx_input_section(i, shdr, text_shndx, text_shdr);
}
+ // EHABI 4.4.1 requires that SHF_LINK_ORDER flag to be set.
+ if ((shdr.get_sh_flags() & elfcpp::SHF_LINK_ORDER) == 0)
+ gold_warning(_("SHF_LINK_ORDER not set in EXIDX section %s of %s"),
+ this->section_name(i).c_str(), this->name().c_str());
}
}
return;
}
- // Some tools are broken and they do not set the link of EXIDX sections.
+ // Some tools are broken and they do not set the link of EXIDX sections.
// We look at the first relocation to figure out the linked sections.
if (!deferred_exidx_sections.empty())
{
const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
locsize, true, true);
- // Process the deferred EXIDX sections.
- for(unsigned int i = 0; i < deferred_exidx_sections.size(); ++i)
+ // Process the deferred EXIDX sections.
+ for (unsigned int i = 0; i < deferred_exidx_sections.size(); ++i)
{
unsigned int shndx = deferred_exidx_sections[i];
elfcpp::Shdr<32, big_endian> shdr(pshdrs + shndx * shdr_size);
}
// Process relocations for garbage collection. The ARM target uses .ARM.exidx
-// sections for unwinding. These sections are referenced implicitly by
-// text sections linked in the section headers. If we ignore these implict
+// sections for unwinding. These sections are referenced implicitly by
+// text sections linked in the section headers. If we ignore these implicit
// references, the .ARM.exidx sections and any .ARM.extab sections they use
// will be garbage-collected incorrectly. Hence we override the same function
// in the base class to handle these implicit references.
Read_relocs_data* rd)
{
// First, call base class method to process relocations in this object.
- Sized_relobj<32, big_endian>::do_gc_process_relocs(symtab, layout, rd);
+ Sized_relobj_file<32, big_endian>::do_gc_process_relocs(symtab, layout, rd);
// If --gc-sections is not specified, there is nothing more to do.
// This happens when --icf is used but --gc-sections is not.
if (!parameters->options().gc_sections())
return;
-
+
unsigned int shnum = this->shnum();
const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
// Loop over the local symbols.
- typedef typename Sized_relobj<32, big_endian>::Output_sections
+ typedef typename Sized_relobj_file<32, big_endian>::Output_sections
Output_sections;
const Output_sections& out_sections(this->output_sections());
unsigned int shnum = this->shnum();
// that is discarded due to entry merging.
lv.set_no_output_symtab_entry();
continue;
- }
+ }
}
}
Arm_dynobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
{
// Call parent class to read symbol information.
- Sized_dynobj<32, big_endian>::do_read_symbols(sd);
+ this->base_read_symbols(sd);
// Read processor-specific flags in ELF file header.
const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
// We read from the end because gas seems to put it near the end of
// the section headers.
const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
- const unsigned char *ps =
+ const unsigned char* ps =
sd->section_headers->data() + shdr_size * (this->shnum() - 1);
for (unsigned int i = this->shnum(); i > 0; --i, ps -= shdr_size)
{
section_offset_type section_offset = shdr.get_sh_offset();
section_size_type section_size =
convert_to_section_size_type(shdr.get_sh_size());
- File_view* view = this->get_lasting_view(section_offset,
- section_size, true, false);
+ const unsigned char* view =
+ this->get_view(section_offset, section_size, true, false);
this->attributes_section_data_ =
- new Attributes_section_data(view->data(), section_size);
+ new Attributes_section_data(view, section_size);
break;
}
}
const unsigned char* view,
const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const
{
- typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
-
+ typedef class Arm_relocate_functions<big_endian> RelocFuncs;
+
switch (r_type)
{
case elfcpp::R_ARM_CALL:
typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
const Valtype* wv = reinterpret_cast<const Valtype*>(view);
Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
- return utils::sign_extend<26>(val << 2);
+ return Bits<26>::sign_extend32(val << 2);
}
case elfcpp::R_ARM_THM_CALL:
// We are doing a static link. Just mark it as belong to module 1,
// the executable.
unsigned int got_offset = this->add_constant(1);
- gsym->set_got_offset(got_type, got_offset);
+ gsym->set_got_offset(got_type, got_offset);
got_offset = this->add_constant(0);
this->static_relocs_.push_back(Static_reloc(got_offset,
elfcpp::R_ARM_TLS_DTPOFF32,
void
Arm_output_data_got<big_endian>::add_tls_gd32_with_static_reloc(
unsigned int got_type,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int index)
{
if (object->local_has_got_offset(index, got_type))
unsigned int got_offset = this->add_constant(1);
object->set_local_got_offset(index, got_type, got_offset);
got_offset = this->add_constant(0);
- this->static_relocs_.push_back(Static_reloc(got_offset,
- elfcpp::R_ARM_TLS_DTPOFF32,
+ this->static_relocs_.push_back(Static_reloc(got_offset,
+ elfcpp::R_ARM_TLS_DTPOFF32,
object, index));
}
Output_segment* tls_segment = this->layout_->tls_segment();
gold_assert(tls_segment != NULL);
-
+
// The thread pointer $tp points to the TCB, which is followed by the
// TLS. So we need to adjust $tp relative addressing by this amount.
Arm_address aligned_tcb_size =
for (size_t i = 0; i < this->static_relocs_.size(); ++i)
{
Static_reloc& reloc(this->static_relocs_[i]);
-
+
Arm_address value;
if (!reloc.symbol_is_global())
{
- Sized_relobj<32, big_endian>* object = reloc.relobj();
+ Sized_relobj_file<32, big_endian>* object = reloc.relobj();
const Symbol_value<32>* psymval =
reloc.relobj()->local_symbol(reloc.index());
reloc.index(), reloc.relobj()->name().c_str());
continue;
}
-
+
value = psymval->value(object, 0);
}
else
}
// A class to handle the PLT data.
+// This is an abstract base class that handles most of the linker details
+// but does not know the actual contents of PLT entries. The derived
+// classes below fill in those details.
template<bool big_endian>
class Output_data_plt_arm : public Output_section_data
{
public:
+ // Unlike aarch64, which records symbol value in "addend" field of relocations
+ // and could be done at the same time an IRelative reloc is created for the
+ // symbol, arm puts the symbol value into "GOT" table, which, however, is
+ // issued later in Output_data_plt_arm::do_write(). So we have a struct here
+ // to keep necessary symbol information for later use in do_write. We usually
+ // have only a very limited number of ifuncs, so the extra data required here
+ // is also limited.
+
+ struct IRelative_data
+ {
+ IRelative_data(Sized_symbol<32>* sized_symbol)
+ : symbol_is_global_(true)
+ {
+ u_.global = sized_symbol;
+ }
+
+ IRelative_data(Sized_relobj_file<32, big_endian>* relobj,
+ unsigned int index)
+ : symbol_is_global_(false)
+ {
+ u_.local.relobj = relobj;
+ u_.local.index = index;
+ }
+
+ union
+ {
+ Sized_symbol<32>* global;
+
+ struct
+ {
+ Sized_relobj_file<32, big_endian>* relobj;
+ unsigned int index;
+ } local;
+ } u_;
+
+ bool symbol_is_global_;
+ };
+
typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
Reloc_section;
- Output_data_plt_arm(Layout*, Output_data_space*);
+ Output_data_plt_arm(Layout* layout, uint64_t addralign,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative);
// Add an entry to the PLT.
void
- add_entry(Symbol* gsym);
+ add_entry(Symbol_table* symtab, Layout* layout, Symbol* gsym);
+
+ // Add the relocation for a plt entry.
+ void
+ add_relocation(Symbol_table* symtab, Layout* layout,
+ Symbol* gsym, unsigned int got_offset);
+
+ // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
+ unsigned int
+ add_local_ifunc_entry(Symbol_table* symtab, Layout*,
+ Sized_relobj_file<32, big_endian>* relobj,
+ unsigned int local_sym_index);
// Return the .rel.plt section data.
const Reloc_section*
rel_plt() const
{ return this->rel_; }
+ // Return the PLT relocation container for IRELATIVE.
+ Reloc_section*
+ rel_irelative(Symbol_table*, Layout*);
+
+ // Return the number of PLT entries.
+ unsigned int
+ entry_count() const
+ { return this->count_ + this->irelative_count_; }
+
+ // Return the offset of the first non-reserved PLT entry.
+ unsigned int
+ first_plt_entry_offset() const
+ { return this->do_first_plt_entry_offset(); }
+
+ // Return the size of a PLT entry.
+ unsigned int
+ get_plt_entry_size() const
+ { return this->do_get_plt_entry_size(); }
+
+ // Return the PLT address for globals.
+ uint32_t
+ address_for_global(const Symbol*) const;
+
+ // Return the PLT address for locals.
+ uint32_t
+ address_for_local(const Relobj*, unsigned int symndx) const;
+
protected:
+ // Fill in the first PLT entry.
+ void
+ fill_first_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address)
+ { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
+
+ void
+ fill_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset)
+ { do_fill_plt_entry(pov, got_address, plt_address, got_offset, plt_offset); }
+
+ virtual unsigned int
+ do_first_plt_entry_offset() const = 0;
+
+ virtual unsigned int
+ do_get_plt_entry_size() const = 0;
+
+ virtual void
+ do_fill_first_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address) = 0;
+
+ virtual void
+ do_fill_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset) = 0;
+
void
do_adjust_output_section(Output_section* os);
{ mapfile->print_output_data(this, _("** PLT")); }
private:
- // Template for the first PLT entry.
- static const uint32_t first_plt_entry[5];
-
- // Template for subsequent PLT entries.
- static const uint32_t plt_entry[3];
-
// Set the final size.
void
set_final_data_size()
{
- this->set_data_size(sizeof(first_plt_entry)
- + this->count_ * sizeof(plt_entry));
+ this->set_data_size(this->first_plt_entry_offset()
+ + ((this->count_ + this->irelative_count_)
+ * this->get_plt_entry_size()));
}
// Write out the PLT data.
void
do_write(Output_file*);
+ // Record irelative symbol data.
+ void insert_irelative_data(const IRelative_data& idata)
+ { irelative_data_vec_.push_back(idata); }
+
// The reloc section.
Reloc_section* rel_;
+ // The IRELATIVE relocs, if necessary. These must follow the
+ // regular PLT relocations.
+ Reloc_section* irelative_rel_;
+ // The .got section.
+ Arm_output_data_got<big_endian>* got_;
// The .got.plt section.
Output_data_space* got_plt_;
+ // The part of the .got.plt section used for IRELATIVE relocs.
+ Output_data_space* got_irelative_;
// The number of PLT entries.
unsigned int count_;
+ // Number of PLT entries with R_ARM_IRELATIVE relocs. These
+ // follow the regular PLT entries.
+ unsigned int irelative_count_;
+ // Vector for irelative data.
+ typedef std::vector<IRelative_data> IRelative_data_vec;
+ IRelative_data_vec irelative_data_vec_;
};
// Create the PLT section. The ordinary .got section is an argument,
// section just for PLT entries.
template<bool big_endian>
-Output_data_plt_arm<big_endian>::Output_data_plt_arm(Layout* layout,
- Output_data_space* got_plt)
- : Output_section_data(4), got_plt_(got_plt), count_(0)
+Output_data_plt_arm<big_endian>::Output_data_plt_arm(
+ Layout* layout, uint64_t addralign,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ : Output_section_data(addralign), irelative_rel_(NULL),
+ got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
+ count_(0), irelative_count_(0)
{
this->rel_ = new Reloc_section(false);
layout->add_output_section_data(".rel.plt", elfcpp::SHT_REL,
- elfcpp::SHF_ALLOC, this->rel_, true, false,
- false, false);
+ elfcpp::SHF_ALLOC, this->rel_,
+ ORDER_DYNAMIC_PLT_RELOCS, false);
}
template<bool big_endian>
template<bool big_endian>
void
-Output_data_plt_arm<big_endian>::add_entry(Symbol* gsym)
+Output_data_plt_arm<big_endian>::add_entry(Symbol_table* symtab,
+ Layout* layout,
+ Symbol* gsym)
{
gold_assert(!gsym->has_plt_offset());
- // Note that when setting the PLT offset we skip the initial
- // reserved PLT entry.
- gsym->set_plt_offset((this->count_) * sizeof(plt_entry)
- + sizeof(first_plt_entry));
+ unsigned int* entry_count;
+ Output_section_data_build* got;
+
+ // We have 2 different types of plt entry here, normal and ifunc.
+
+ // For normal plt, the offset begins with first_plt_entry_offset(20), and the
+ // 1st entry offset would be 20, the second 32, third 44 ... etc.
+
+ // For ifunc plt, the offset begins with 0. So the first offset would 0,
+ // second 12, third 24 ... etc.
- ++this->count_;
+ // IFunc plt entries *always* come after *normal* plt entries.
- section_offset_type got_offset = this->got_plt_->current_data_size();
+ // Notice, when computing the plt address of a certain symbol, "plt_address +
+ // plt_offset" is no longer correct. Use target->plt_address_for_global() or
+ // target->plt_address_for_local() instead.
+
+ int begin_offset = 0;
+ if (gsym->type() == elfcpp::STT_GNU_IFUNC
+ && gsym->can_use_relative_reloc(false))
+ {
+ entry_count = &this->irelative_count_;
+ got = this->got_irelative_;
+ // For irelative plt entries, offset is relative to the end of normal plt
+ // entries, so it starts from 0.
+ begin_offset = 0;
+ // Record symbol information.
+ this->insert_irelative_data(
+ IRelative_data(symtab->get_sized_symbol<32>(gsym)));
+ }
+ else
+ {
+ entry_count = &this->count_;
+ got = this->got_plt_;
+ // Note that for normal plt entries, when setting the PLT offset we skip
+ // the initial reserved PLT entry.
+ begin_offset = this->first_plt_entry_offset();
+ }
+
+ gsym->set_plt_offset(begin_offset
+ + (*entry_count) * this->get_plt_entry_size());
+
+ ++(*entry_count);
+
+ section_offset_type got_offset = got->current_data_size();
// Every PLT entry needs a GOT entry which points back to the PLT
// entry (this will be changed by the dynamic linker, normally
// lazily when the function is called).
- this->got_plt_->set_current_data_size(got_offset + 4);
+ got->set_current_data_size(got_offset + 4);
// Every PLT entry needs a reloc.
- gsym->set_needs_dynsym_entry();
- this->rel_->add_global(gsym, elfcpp::R_ARM_JUMP_SLOT, this->got_plt_,
- got_offset);
+ this->add_relocation(symtab, layout, gsym, got_offset);
// Note that we don't need to save the symbol. The contents of the
// PLT are independent of which symbols are used. The symbols only
// appear in the relocations.
}
-// ARM PLTs.
-// FIXME: This is not very flexible. Right now this has only been tested
-// on armv5te. If we are to support additional architecture features like
-// Thumb-2 or BE8, we need to make this more flexible like GNU ld.
+// Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
+// the PLT offset.
-// The first entry in the PLT.
template<bool big_endian>
-const uint32_t Output_data_plt_arm<big_endian>::first_plt_entry[5] =
+unsigned int
+Output_data_plt_arm<big_endian>::add_local_ifunc_entry(
+ Symbol_table* symtab,
+ Layout* layout,
+ Sized_relobj_file<32, big_endian>* relobj,
+ unsigned int local_sym_index)
{
- 0xe52de004, // str lr, [sp, #-4]!
- 0xe59fe004, // ldr lr, [pc, #4]
- 0xe08fe00e, // add lr, pc, lr
- 0xe5bef008, // ldr pc, [lr, #8]!
- 0x00000000, // &GOT[0] - .
-};
+ this->insert_irelative_data(IRelative_data(relobj, local_sym_index));
-// Subsequent entries in the PLT.
+ // Notice, when computingthe plt entry address, "plt_address + plt_offset" is
+ // no longer correct. Use target->plt_address_for_local() instead.
+ unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
+ ++this->irelative_count_;
-template<bool big_endian>
-const uint32_t Output_data_plt_arm<big_endian>::plt_entry[3] =
-{
- 0xe28fc600, // add ip, pc, #0xNN00000
- 0xe28cca00, // add ip, ip, #0xNN000
- 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
-};
+ section_offset_type got_offset = this->got_irelative_->current_data_size();
+
+ // Every PLT entry needs a GOT entry which points back to the PLT
+ // entry.
+ this->got_irelative_->set_current_data_size(got_offset + 4);
+
+
+ // Every PLT entry needs a reloc.
+ Reloc_section* rel = this->rel_irelative(symtab, layout);
+ rel->add_symbolless_local_addend(relobj, local_sym_index,
+ elfcpp::R_ARM_IRELATIVE,
+ this->got_irelative_, got_offset);
+ return plt_offset;
+}
-// Write out the PLT. This uses the hand-coded instructions above,
-// and adjusts them as needed. This is all specified by the arm ELF
-// Processor Supplement.
+
+// Add the relocation for a PLT entry.
template<bool big_endian>
void
-Output_data_plt_arm<big_endian>::do_write(Output_file* of)
+Output_data_plt_arm<big_endian>::add_relocation(
+ Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
{
- const off_t offset = this->offset();
- const section_size_type oview_size =
- convert_to_section_size_type(this->data_size());
- unsigned char* const oview = of->get_output_view(offset, oview_size);
+ if (gsym->type() == elfcpp::STT_GNU_IFUNC
+ && gsym->can_use_relative_reloc(false))
+ {
+ Reloc_section* rel = this->rel_irelative(symtab, layout);
+ rel->add_symbolless_global_addend(gsym, elfcpp::R_ARM_IRELATIVE,
+ this->got_irelative_, got_offset);
+ }
+ else
+ {
+ gsym->set_needs_dynsym_entry();
+ this->rel_->add_global(gsym, elfcpp::R_ARM_JUMP_SLOT, this->got_plt_,
+ got_offset);
+ }
+}
- const off_t got_file_offset = this->got_plt_->offset();
- const section_size_type got_size =
- convert_to_section_size_type(this->got_plt_->data_size());
- unsigned char* const got_view = of->get_output_view(got_file_offset,
- got_size);
- unsigned char* pov = oview;
- Arm_address plt_address = this->address();
- Arm_address got_address = this->got_plt_->address();
+// Create the irelative relocation data.
- // Write first PLT entry. All but the last word are constants.
- const size_t num_first_plt_words = (sizeof(first_plt_entry)
- / sizeof(plt_entry[0]));
- for (size_t i = 0; i < num_first_plt_words - 1; i++)
- elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
- // Last word in first PLT entry is &GOT[0] - .
- elfcpp::Swap<32, big_endian>::writeval(pov + 16,
- got_address - (plt_address + 16));
- pov += sizeof(first_plt_entry);
+template<bool big_endian>
+typename Output_data_plt_arm<big_endian>::Reloc_section*
+Output_data_plt_arm<big_endian>::rel_irelative(Symbol_table* symtab,
+ Layout* layout)
+{
+ if (this->irelative_rel_ == NULL)
+ {
+ // Since irelative relocations goes into 'rel.dyn', we delegate the
+ // creation of irelative_rel_ to where rel_dyn section gets created.
+ Target_arm<big_endian>* arm_target =
+ Target_arm<big_endian>::default_target();
+ this->irelative_rel_ = arm_target->rel_irelative_section(layout);
+
+ // Make sure we have a place for the TLSDESC relocations, in
+ // case we see any later on.
+ // this->rel_tlsdesc(layout);
+ if (parameters->doing_static_link())
+ {
+ // A statically linked executable will only have a .rel.plt section to
+ // hold R_ARM_IRELATIVE relocs for STT_GNU_IFUNC symbols. The library
+ // will use these symbols to locate the IRELATIVE relocs at program
+ // startup time.
+ symtab->define_in_output_data("__rel_iplt_start", NULL,
+ Symbol_table::PREDEFINED,
+ this->irelative_rel_, 0, 0,
+ elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
+ elfcpp::STV_HIDDEN, 0, false, true);
+ symtab->define_in_output_data("__rel_iplt_end", NULL,
+ Symbol_table::PREDEFINED,
+ this->irelative_rel_, 0, 0,
+ elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
+ elfcpp::STV_HIDDEN, 0, true, true);
+ }
+ }
+ return this->irelative_rel_;
+}
- unsigned char* got_pov = got_view;
- memset(got_pov, 0, 12);
- got_pov += 12;
+// Return the PLT address for a global symbol.
+
+template<bool big_endian>
+uint32_t
+Output_data_plt_arm<big_endian>::address_for_global(const Symbol* gsym) const
+{
+ uint64_t begin_offset = 0;
+ if (gsym->type() == elfcpp::STT_GNU_IFUNC
+ && gsym->can_use_relative_reloc(false))
+ {
+ begin_offset = (this->first_plt_entry_offset() +
+ this->count_ * this->get_plt_entry_size());
+ }
+ return this->address() + begin_offset + gsym->plt_offset();
+}
- const int rel_size = elfcpp::Elf_sizes<32>::rel_size;
- unsigned int plt_offset = sizeof(first_plt_entry);
- unsigned int plt_rel_offset = 0;
- unsigned int got_offset = 12;
- const unsigned int count = this->count_;
- for (unsigned int i = 0;
- i < count;
- ++i,
- pov += sizeof(plt_entry),
- got_pov += 4,
- plt_offset += sizeof(plt_entry),
- plt_rel_offset += rel_size,
- got_offset += 4)
- {
- // Set and adjust the PLT entry itself.
- int32_t offset = ((got_address + got_offset)
- - (plt_address + plt_offset + 8));
- gold_assert(offset >= 0 && offset < 0x0fffffff);
- uint32_t plt_insn0 = plt_entry[0] | ((offset >> 20) & 0xff);
+// Return the PLT address for a local symbol. These are always
+// IRELATIVE relocs.
+
+template<bool big_endian>
+uint32_t
+Output_data_plt_arm<big_endian>::address_for_local(
+ const Relobj* object,
+ unsigned int r_sym) const
+{
+ return (this->address()
+ + this->first_plt_entry_offset()
+ + this->count_ * this->get_plt_entry_size()
+ + object->local_plt_offset(r_sym));
+}
+
+
+template<bool big_endian>
+class Output_data_plt_arm_standard : public Output_data_plt_arm<big_endian>
+{
+ public:
+ Output_data_plt_arm_standard(Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ : Output_data_plt_arm<big_endian>(layout, 4, got, got_plt, got_irelative)
+ { }
+
+ protected:
+ // Return the offset of the first non-reserved PLT entry.
+ virtual unsigned int
+ do_first_plt_entry_offset() const
+ { return sizeof(first_plt_entry); }
+
+ virtual void
+ do_fill_first_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address);
+
+ private:
+ // Template for the first PLT entry.
+ static const uint32_t first_plt_entry[5];
+};
+
+// ARM PLTs.
+// FIXME: This is not very flexible. Right now this has only been tested
+// on armv5te. If we are to support additional architecture features like
+// Thumb-2 or BE8, we need to make this more flexible like GNU ld.
+
+// The first entry in the PLT.
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_standard<big_endian>::first_plt_entry[5] =
+{
+ 0xe52de004, // str lr, [sp, #-4]!
+ 0xe59fe004, // ldr lr, [pc, #4]
+ 0xe08fe00e, // add lr, pc, lr
+ 0xe5bef008, // ldr pc, [lr, #8]!
+ 0x00000000, // &GOT[0] - .
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_standard<big_endian>::do_fill_first_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address)
+{
+ // Write first PLT entry. All but the last word are constants.
+ const size_t num_first_plt_words = (sizeof(first_plt_entry)
+ / sizeof(first_plt_entry[0]));
+ for (size_t i = 0; i < num_first_plt_words - 1; i++)
+ {
+ if (parameters->options().be8())
+ {
+ elfcpp::Swap<32, false>::writeval(pov + i * 4,
+ first_plt_entry[i]);
+ }
+ else
+ {
+ elfcpp::Swap<32, big_endian>::writeval(pov + i * 4,
+ first_plt_entry[i]);
+ }
+ }
+ // Last word in first PLT entry is &GOT[0] - .
+ elfcpp::Swap<32, big_endian>::writeval(pov + 16,
+ got_address - (plt_address + 16));
+}
+
+// Subsequent entries in the PLT.
+// This class generates short (12-byte) entries, for displacements up to 2^28.
+
+template<bool big_endian>
+class Output_data_plt_arm_short : public Output_data_plt_arm_standard<big_endian>
+{
+ public:
+ Output_data_plt_arm_short(Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ : Output_data_plt_arm_standard<big_endian>(layout, got, got_plt, got_irelative)
+ { }
+
+ protected:
+ // Return the size of a PLT entry.
+ virtual unsigned int
+ do_get_plt_entry_size() const
+ { return sizeof(plt_entry); }
+
+ virtual void
+ do_fill_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset);
+
+ private:
+ // Template for subsequent PLT entries.
+ static const uint32_t plt_entry[3];
+};
+
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_short<big_endian>::plt_entry[3] =
+{
+ 0xe28fc600, // add ip, pc, #0xNN00000
+ 0xe28cca00, // add ip, ip, #0xNN000
+ 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_short<big_endian>::do_fill_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset)
+{
+ int32_t offset = ((got_address + got_offset)
+ - (plt_address + plt_offset + 8));
+ if (offset < 0 || offset > 0x0fffffff)
+ gold_error(_("PLT offset too large, try linking with --long-plt"));
+
+ uint32_t plt_insn0 = plt_entry[0] | ((offset >> 20) & 0xff);
+ uint32_t plt_insn1 = plt_entry[1] | ((offset >> 12) & 0xff);
+ uint32_t plt_insn2 = plt_entry[2] | (offset & 0xfff);
+
+ if (parameters->options().be8())
+ {
+ elfcpp::Swap<32, false>::writeval(pov, plt_insn0);
+ elfcpp::Swap<32, false>::writeval(pov + 4, plt_insn1);
+ elfcpp::Swap<32, false>::writeval(pov + 8, plt_insn2);
+ }
+ else
+ {
elfcpp::Swap<32, big_endian>::writeval(pov, plt_insn0);
- uint32_t plt_insn1 = plt_entry[1] | ((offset >> 12) & 0xff);
elfcpp::Swap<32, big_endian>::writeval(pov + 4, plt_insn1);
- uint32_t plt_insn2 = plt_entry[2] | (offset & 0xfff);
elfcpp::Swap<32, big_endian>::writeval(pov + 8, plt_insn2);
+ }
+}
+
+// This class generates long (16-byte) entries, for arbitrary displacements.
+
+template<bool big_endian>
+class Output_data_plt_arm_long : public Output_data_plt_arm_standard<big_endian>
+{
+ public:
+ Output_data_plt_arm_long(Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ : Output_data_plt_arm_standard<big_endian>(layout, got, got_plt, got_irelative)
+ { }
+
+ protected:
+ // Return the size of a PLT entry.
+ virtual unsigned int
+ do_get_plt_entry_size() const
+ { return sizeof(plt_entry); }
+
+ virtual void
+ do_fill_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset);
+
+ private:
+ // Template for subsequent PLT entries.
+ static const uint32_t plt_entry[4];
+};
- // Set the entry in the GOT.
- elfcpp::Swap<32, big_endian>::writeval(got_pov, plt_address);
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_long<big_endian>::plt_entry[4] =
+{
+ 0xe28fc200, // add ip, pc, #0xN0000000
+ 0xe28cc600, // add ip, ip, #0xNN00000
+ 0xe28cca00, // add ip, ip, #0xNN000
+ 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_long<big_endian>::do_fill_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset)
+{
+ int32_t offset = ((got_address + got_offset)
+ - (plt_address + plt_offset + 8));
+
+ uint32_t plt_insn0 = plt_entry[0] | (offset >> 28);
+ uint32_t plt_insn1 = plt_entry[1] | ((offset >> 20) & 0xff);
+ uint32_t plt_insn2 = plt_entry[2] | ((offset >> 12) & 0xff);
+ uint32_t plt_insn3 = plt_entry[3] | (offset & 0xfff);
+
+ if (parameters->options().be8())
+ {
+ elfcpp::Swap<32, false>::writeval(pov, plt_insn0);
+ elfcpp::Swap<32, false>::writeval(pov + 4, plt_insn1);
+ elfcpp::Swap<32, false>::writeval(pov + 8, plt_insn2);
+ elfcpp::Swap<32, false>::writeval(pov + 12, plt_insn3);
+ }
+ else
+ {
+ elfcpp::Swap<32, big_endian>::writeval(pov, plt_insn0);
+ elfcpp::Swap<32, big_endian>::writeval(pov + 4, plt_insn1);
+ elfcpp::Swap<32, big_endian>::writeval(pov + 8, plt_insn2);
+ elfcpp::Swap<32, big_endian>::writeval(pov + 12, plt_insn3);
+ }
+}
+
+// Write out the PLT. This uses the hand-coded instructions above,
+// and adjusts them as needed. This is all specified by the arm ELF
+// Processor Supplement.
+
+template<bool big_endian>
+void
+Output_data_plt_arm<big_endian>::do_write(Output_file* of)
+{
+ const off_t offset = this->offset();
+ const section_size_type oview_size =
+ convert_to_section_size_type(this->data_size());
+ unsigned char* const oview = of->get_output_view(offset, oview_size);
+
+ const off_t got_file_offset = this->got_plt_->offset();
+ gold_assert(got_file_offset + this->got_plt_->data_size()
+ == this->got_irelative_->offset());
+ const section_size_type got_size =
+ convert_to_section_size_type(this->got_plt_->data_size()
+ + this->got_irelative_->data_size());
+ unsigned char* const got_view = of->get_output_view(got_file_offset,
+ got_size);
+ unsigned char* pov = oview;
+
+ Arm_address plt_address = this->address();
+ Arm_address got_address = this->got_plt_->address();
+
+ // Write first PLT entry.
+ this->fill_first_plt_entry(pov, got_address, plt_address);
+ pov += this->first_plt_entry_offset();
+
+ unsigned char* got_pov = got_view;
+
+ memset(got_pov, 0, 12);
+ got_pov += 12;
+
+ unsigned int plt_offset = this->first_plt_entry_offset();
+ unsigned int got_offset = 12;
+ const unsigned int count = this->count_ + this->irelative_count_;
+ gold_assert(this->irelative_count_ == this->irelative_data_vec_.size());
+ for (unsigned int i = 0;
+ i < count;
+ ++i,
+ pov += this->get_plt_entry_size(),
+ got_pov += 4,
+ plt_offset += this->get_plt_entry_size(),
+ got_offset += 4)
+ {
+ // Set and adjust the PLT entry itself.
+ this->fill_plt_entry(pov, got_address, plt_address,
+ got_offset, plt_offset);
+
+ Arm_address value;
+ if (i < this->count_)
+ {
+ // For non-irelative got entries, the value is the beginning of plt.
+ value = plt_address;
+ }
+ else
+ {
+ // For irelative got entries, the value is the (global/local) symbol
+ // address.
+ const IRelative_data& idata =
+ this->irelative_data_vec_[i - this->count_];
+ if (idata.symbol_is_global_)
+ {
+ // Set the entry in the GOT for irelative symbols. The content is
+ // the address of the ifunc, not the address of plt start.
+ const Sized_symbol<32>* sized_symbol = idata.u_.global;
+ gold_assert(sized_symbol->type() == elfcpp::STT_GNU_IFUNC);
+ value = sized_symbol->value();
+ }
+ else
+ {
+ value = idata.u_.local.relobj->local_symbol_value(
+ idata.u_.local.index, 0);
+ }
+ }
+ elfcpp::Swap<32, big_endian>::writeval(got_pov, value);
}
gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
of->write_output_view(got_file_offset, got_size, got_view);
}
+
// Create a PLT entry for a global symbol.
template<bool big_endian>
if (gsym->has_plt_offset())
return;
+ if (this->plt_ == NULL)
+ this->make_plt_section(symtab, layout);
+
+ this->plt_->add_entry(symtab, layout, gsym);
+}
+
+
+// Create the PLT section.
+template<bool big_endian>
+void
+Target_arm<big_endian>::make_plt_section(
+ Symbol_table* symtab, Layout* layout)
+{
if (this->plt_ == NULL)
{
- // Create the GOT sections first.
+ // Create the GOT section first.
this->got_section(symtab, layout);
- this->plt_ = new Output_data_plt_arm<big_endian>(layout, this->got_plt_);
+ // GOT for irelatives is create along with got.plt.
+ gold_assert(this->got_ != NULL
+ && this->got_plt_ != NULL
+ && this->got_irelative_ != NULL);
+ this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
+ this->got_irelative_);
+
layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
(elfcpp::SHF_ALLOC
| elfcpp::SHF_EXECINSTR),
- this->plt_, false, false, false, false);
+ this->plt_, ORDER_PLT, false);
+ symtab->define_in_output_data("$a", NULL,
+ Symbol_table::PREDEFINED,
+ this->plt_,
+ 0, 0, elfcpp::STT_NOTYPE,
+ elfcpp::STB_LOCAL,
+ elfcpp::STV_DEFAULT, 0,
+ false, false);
}
- this->plt_->add_entry(gsym);
+}
+
+
+// Make a PLT entry for a local STT_GNU_IFUNC symbol.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::make_local_ifunc_plt_entry(
+ Symbol_table* symtab, Layout* layout,
+ Sized_relobj_file<32, big_endian>* relobj,
+ unsigned int local_sym_index)
+{
+ if (relobj->local_has_plt_offset(local_sym_index))
+ return;
+ if (this->plt_ == NULL)
+ this->make_plt_section(symtab, layout);
+ unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
+ relobj,
+ local_sym_index);
+ relobj->set_local_plt_offset(local_sym_index, plt_offset);
+}
+
+
+// Return the number of entries in the PLT.
+
+template<bool big_endian>
+unsigned int
+Target_arm<big_endian>::plt_entry_count() const
+{
+ if (this->plt_ == NULL)
+ return 0;
+ return this->plt_->entry_count();
+}
+
+// Return the offset of the first non-reserved PLT entry.
+
+template<bool big_endian>
+unsigned int
+Target_arm<big_endian>::first_plt_entry_offset() const
+{
+ return this->plt_->first_plt_entry_offset();
+}
+
+// Return the size of each PLT entry.
+
+template<bool big_endian>
+unsigned int
+Target_arm<big_endian>::plt_entry_size() const
+{
+ return this->plt_->get_plt_entry_size();
}
// Get the section to use for TLS_DESC relocations.
Target_arm<big_endian>::got_mod_index_entry(
Symbol_table* symtab,
Layout* layout,
- Sized_relobj<32, big_endian>* object)
+ Sized_relobj_file<32, big_endian>* object)
{
if (this->got_mod_index_offset_ == -1U)
{
return tls::TLSOPT_NONE;
}
+// Get the Reference_flags for a particular relocation.
+
+template<bool big_endian>
+int
+Target_arm<big_endian>::Scan::get_reference_flags(unsigned int r_type)
+{
+ switch (r_type)
+ {
+ case elfcpp::R_ARM_NONE:
+ case elfcpp::R_ARM_V4BX:
+ case elfcpp::R_ARM_GNU_VTENTRY:
+ case elfcpp::R_ARM_GNU_VTINHERIT:
+ // No symbol reference.
+ return 0;
+
+ case elfcpp::R_ARM_ABS32:
+ case elfcpp::R_ARM_ABS16:
+ case elfcpp::R_ARM_ABS12:
+ case elfcpp::R_ARM_THM_ABS5:
+ case elfcpp::R_ARM_ABS8:
+ case elfcpp::R_ARM_BASE_ABS:
+ case elfcpp::R_ARM_MOVW_ABS_NC:
+ case elfcpp::R_ARM_MOVT_ABS:
+ case elfcpp::R_ARM_THM_MOVW_ABS_NC:
+ case elfcpp::R_ARM_THM_MOVT_ABS:
+ case elfcpp::R_ARM_ABS32_NOI:
+ return Symbol::ABSOLUTE_REF;
+
+ case elfcpp::R_ARM_REL32:
+ case elfcpp::R_ARM_LDR_PC_G0:
+ case elfcpp::R_ARM_SBREL32:
+ case elfcpp::R_ARM_THM_PC8:
+ case elfcpp::R_ARM_BASE_PREL:
+ case elfcpp::R_ARM_MOVW_PREL_NC:
+ case elfcpp::R_ARM_MOVT_PREL:
+ case elfcpp::R_ARM_THM_MOVW_PREL_NC:
+ case elfcpp::R_ARM_THM_MOVT_PREL:
+ case elfcpp::R_ARM_THM_ALU_PREL_11_0:
+ case elfcpp::R_ARM_THM_PC12:
+ case elfcpp::R_ARM_REL32_NOI:
+ case elfcpp::R_ARM_ALU_PC_G0_NC:
+ case elfcpp::R_ARM_ALU_PC_G0:
+ case elfcpp::R_ARM_ALU_PC_G1_NC:
+ case elfcpp::R_ARM_ALU_PC_G1:
+ case elfcpp::R_ARM_ALU_PC_G2:
+ case elfcpp::R_ARM_LDR_PC_G1:
+ case elfcpp::R_ARM_LDR_PC_G2:
+ case elfcpp::R_ARM_LDRS_PC_G0:
+ case elfcpp::R_ARM_LDRS_PC_G1:
+ case elfcpp::R_ARM_LDRS_PC_G2:
+ case elfcpp::R_ARM_LDC_PC_G0:
+ case elfcpp::R_ARM_LDC_PC_G1:
+ case elfcpp::R_ARM_LDC_PC_G2:
+ case elfcpp::R_ARM_ALU_SB_G0_NC:
+ case elfcpp::R_ARM_ALU_SB_G0:
+ case elfcpp::R_ARM_ALU_SB_G1_NC:
+ case elfcpp::R_ARM_ALU_SB_G1:
+ case elfcpp::R_ARM_ALU_SB_G2:
+ case elfcpp::R_ARM_LDR_SB_G0:
+ case elfcpp::R_ARM_LDR_SB_G1:
+ case elfcpp::R_ARM_LDR_SB_G2:
+ case elfcpp::R_ARM_LDRS_SB_G0:
+ case elfcpp::R_ARM_LDRS_SB_G1:
+ case elfcpp::R_ARM_LDRS_SB_G2:
+ case elfcpp::R_ARM_LDC_SB_G0:
+ case elfcpp::R_ARM_LDC_SB_G1:
+ case elfcpp::R_ARM_LDC_SB_G2:
+ case elfcpp::R_ARM_MOVW_BREL_NC:
+ case elfcpp::R_ARM_MOVT_BREL:
+ case elfcpp::R_ARM_MOVW_BREL:
+ case elfcpp::R_ARM_THM_MOVW_BREL_NC:
+ case elfcpp::R_ARM_THM_MOVT_BREL:
+ case elfcpp::R_ARM_THM_MOVW_BREL:
+ case elfcpp::R_ARM_GOTOFF32:
+ case elfcpp::R_ARM_GOTOFF12:
+ case elfcpp::R_ARM_SBREL31:
+ return Symbol::RELATIVE_REF;
+
+ case elfcpp::R_ARM_PLT32:
+ case elfcpp::R_ARM_CALL:
+ case elfcpp::R_ARM_JUMP24:
+ case elfcpp::R_ARM_THM_CALL:
+ case elfcpp::R_ARM_THM_JUMP24:
+ case elfcpp::R_ARM_THM_JUMP19:
+ case elfcpp::R_ARM_THM_JUMP6:
+ case elfcpp::R_ARM_THM_JUMP11:
+ case elfcpp::R_ARM_THM_JUMP8:
+ // R_ARM_PREL31 is not used to relocate call/jump instructions but
+ // in unwind tables. It may point to functions via PLTs.
+ // So we treat it like call/jump relocations above.
+ case elfcpp::R_ARM_PREL31:
+ return Symbol::FUNCTION_CALL | Symbol::RELATIVE_REF;
+
+ case elfcpp::R_ARM_GOT_BREL:
+ case elfcpp::R_ARM_GOT_ABS:
+ case elfcpp::R_ARM_GOT_PREL:
+ // Absolute in GOT.
+ return Symbol::ABSOLUTE_REF;
+
+ case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
+ case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
+ case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
+ case elfcpp::R_ARM_TLS_IE32: // Initial-exec
+ case elfcpp::R_ARM_TLS_LE32: // Local-exec
+ return Symbol::TLS_REF;
+
+ case elfcpp::R_ARM_TARGET1:
+ case elfcpp::R_ARM_TARGET2:
+ case elfcpp::R_ARM_COPY:
+ case elfcpp::R_ARM_GLOB_DAT:
+ case elfcpp::R_ARM_JUMP_SLOT:
+ case elfcpp::R_ARM_RELATIVE:
+ case elfcpp::R_ARM_PC24:
+ case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
+ case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
+ case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
+ default:
+ // Not expected. We will give an error later.
+ return 0;
+ }
+}
+
// Report an unsupported relocation against a local symbol.
template<bool big_endian>
void
Target_arm<big_endian>::Scan::unsupported_reloc_local(
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int r_type)
{
gold_error(_("%s: unsupported reloc %u against local symbol"),
case elfcpp::R_ARM_JUMP_SLOT:
case elfcpp::R_ARM_ABS32:
case elfcpp::R_ARM_ABS32_NOI:
+ case elfcpp::R_ARM_IRELATIVE:
case elfcpp::R_ARM_PC24:
// FIXME: The following 3 types are not supported by Android's dynamic
// linker.
}
}
+
+// Return whether we need to make a PLT entry for a relocation of the
+// given type against a STT_GNU_IFUNC symbol.
+
+template<bool big_endian>
+bool
+Target_arm<big_endian>::Scan::reloc_needs_plt_for_ifunc(
+ Sized_relobj_file<32, big_endian>* object,
+ unsigned int r_type)
+{
+ int flags = Scan::get_reference_flags(r_type);
+ if (flags & Symbol::TLS_REF)
+ {
+ gold_error(_("%s: unsupported TLS reloc %u for IFUNC symbol"),
+ object->name().c_str(), r_type);
+ return false;
+ }
+ return flags != 0;
+}
+
+
// Scan a relocation for a local symbol.
// FIXME: This only handles a subset of relocation types used by Android
// on ARM v5te devices.
Target_arm<big_endian>::Scan::local(Symbol_table* symtab,
Layout* layout,
Target_arm* target,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
Output_section* output_section,
const elfcpp::Rel<32, big_endian>& reloc,
unsigned int r_type,
- const elfcpp::Sym<32, big_endian>& lsym)
+ const elfcpp::Sym<32, big_endian>& lsym,
+ bool is_discarded)
{
- r_type = get_real_reloc_type(r_type);
+ if (is_discarded)
+ return;
+
+ r_type = target->get_real_reloc_type(r_type);
+
+ // A local STT_GNU_IFUNC symbol may require a PLT entry.
+ bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
+ if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
+ {
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
+ target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
+ }
+
switch (r_type)
{
case elfcpp::R_ARM_NONE:
{
Reloc_section* rel_dyn = target->rel_dyn_section(layout);
unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
- // If we are to add more other reloc types than R_ARM_ABS32,
- // we need to add check_non_pic(object, r_type) here.
+ // If we are to add more other reloc types than R_ARM_ABS32,
+ // we need to add check_non_pic(object, r_type) here.
rel_dyn->add_local_relative(object, r_sym, elfcpp::R_ARM_RELATIVE,
output_section, data_shndx,
- reloc.get_r_offset());
+ reloc.get_r_offset(), is_ifunc);
}
break;
// data section, we need to be careful not to apply this
// relocation statically.
if (parameters->options().output_is_position_independent())
- {
+ {
check_non_pic(object, r_type);
- Reloc_section* rel_dyn = target->rel_dyn_section(layout);
+ Reloc_section* rel_dyn = target->rel_dyn_section(layout);
unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
- if (lsym.get_st_type() != elfcpp::STT_SECTION)
+ if (lsym.get_st_type() != elfcpp::STT_SECTION)
rel_dyn->add_local(object, r_sym, r_type, output_section,
data_shndx, reloc.get_r_offset());
- else
- {
- gold_assert(lsym.get_st_value() == 0);
+ else
+ {
+ gold_assert(lsym.get_st_value() == 0);
unsigned int shndx = lsym.get_st_shndx();
bool is_ordinary;
shndx = object->adjust_sym_shndx(r_sym, shndx,
rel_dyn->add_local_section(object, shndx,
r_type, output_section,
data_shndx, reloc.get_r_offset());
- }
- }
+ }
+ }
break;
- case elfcpp::R_ARM_PC24:
case elfcpp::R_ARM_REL32:
case elfcpp::R_ARM_LDR_PC_G0:
case elfcpp::R_ARM_SBREL32:
case elfcpp::R_ARM_CALL:
case elfcpp::R_ARM_JUMP24:
case elfcpp::R_ARM_THM_JUMP24:
- case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
- case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
- case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
case elfcpp::R_ARM_SBREL31:
case elfcpp::R_ARM_PREL31:
case elfcpp::R_ARM_MOVW_PREL_NC:
{
bool output_is_shared = parameters->options().shared();
const tls::Tls_optimization optimized_type
- = Target_arm<big_endian>::optimize_tls_reloc(!output_is_shared,
+ = Target_arm<big_endian>::optimize_tls_reloc(!output_is_shared,
r_type);
switch (r_type)
{
case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
if (optimized_type == tls::TLSOPT_NONE)
{
- // Create a pair of GOT entries for the module index and
- // dtv-relative offset.
- Arm_output_data_got<big_endian>* got
- = target->got_section(symtab, layout);
- unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
+ // Create a pair of GOT entries for the module index and
+ // dtv-relative offset.
+ Arm_output_data_got<big_endian>* got
+ = target->got_section(symtab, layout);
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
unsigned int shndx = lsym.get_st_shndx();
bool is_ordinary;
shndx = object->adjust_sym_shndx(r_sym, shndx, &is_ordinary);
got->add_local_pair_with_rel(object, r_sym, shndx,
GOT_TYPE_TLS_PAIR,
target->rel_dyn_section(layout),
- elfcpp::R_ARM_TLS_DTPMOD32, 0);
+ elfcpp::R_ARM_TLS_DTPMOD32);
else
got->add_tls_gd32_with_static_reloc(GOT_TYPE_TLS_PAIR,
object, r_sym);
case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
if (optimized_type == tls::TLSOPT_NONE)
{
- // Create a GOT entry for the module index.
- target->got_mod_index_entry(symtab, layout, object);
+ // Create a GOT entry for the module index.
+ target->got_mod_index_entry(symtab, layout, object);
}
else
// FIXME: TLS optimization not supported yet.
layout->set_has_static_tls();
if (output_is_shared)
{
- // We need to create a dynamic relocation.
- gold_assert(lsym.get_st_type() != elfcpp::STT_SECTION);
- unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
+ // We need to create a dynamic relocation.
+ gold_assert(lsym.get_st_type() != elfcpp::STT_SECTION);
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
Reloc_section* rel_dyn = target->rel_dyn_section(layout);
rel_dyn->add_local(object, r_sym, elfcpp::R_ARM_TLS_TPOFF32,
output_section, data_shndx,
}
break;
+ case elfcpp::R_ARM_PC24:
+ case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
+ case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
+ case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
default:
unsupported_reloc_local(object, r_type);
break;
template<bool big_endian>
void
Target_arm<big_endian>::Scan::unsupported_reloc_global(
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int r_type,
Symbol* gsym)
{
Symbol_table*,
Layout*,
Target_arm<big_endian>* target,
- Sized_relobj<32, big_endian>*,
+ Sized_relobj_file<32, big_endian>*,
unsigned int,
Output_section*,
const elfcpp::Rel<32, big_endian>&,
Symbol_table*,
Layout*,
Target_arm<big_endian>* target,
- Sized_relobj<32, big_endian>*,
+ Sized_relobj_file<32, big_endian>*,
unsigned int,
Output_section*,
const elfcpp::Rel<32, big_endian>&,
Target_arm<big_endian>::Scan::global(Symbol_table* symtab,
Layout* layout,
Target_arm* target,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
Output_section* output_section,
const elfcpp::Rel<32, big_endian>& reloc,
&& strcmp(gsym->name(), "_GLOBAL_OFFSET_TABLE_") == 0)
target->got_section(symtab, layout);
- r_type = get_real_reloc_type(r_type);
+ // A STT_GNU_IFUNC symbol may require a PLT entry.
+ if (gsym->type() == elfcpp::STT_GNU_IFUNC
+ && this->reloc_needs_plt_for_ifunc(object, r_type))
+ target->make_plt_entry(symtab, layout, gsym);
+
+ r_type = target->get_real_reloc_type(r_type);
switch (r_type)
{
case elfcpp::R_ARM_NONE:
case elfcpp::R_ARM_ABS32_NOI:
// Absolute addressing relocations.
{
- // Make a PLT entry if necessary.
- if (this->symbol_needs_plt_entry(gsym))
- {
- target->make_plt_entry(symtab, layout, gsym);
- // Since this is not a PC-relative relocation, we may be
- // taking the address of a function. In that case we need to
- // set the entry in the dynamic symbol table to the address of
- // the PLT entry.
- if (gsym->is_from_dynobj() && !parameters->options().shared())
- gsym->set_needs_dynsym_value();
- }
- // Make a dynamic relocation if necessary.
- if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF))
- {
- if (gsym->may_need_copy_reloc())
- {
- target->copy_reloc(symtab, layout, object,
- data_shndx, output_section, gsym, reloc);
- }
- else if ((r_type == elfcpp::R_ARM_ABS32
+ // Make a PLT entry if necessary.
+ if (this->symbol_needs_plt_entry(gsym))
+ {
+ target->make_plt_entry(symtab, layout, gsym);
+ // Since this is not a PC-relative relocation, we may be
+ // taking the address of a function. In that case we need to
+ // set the entry in the dynamic symbol table to the address of
+ // the PLT entry.
+ if (gsym->is_from_dynobj() && !parameters->options().shared())
+ gsym->set_needs_dynsym_value();
+ }
+ // Make a dynamic relocation if necessary.
+ if (gsym->needs_dynamic_reloc(Scan::get_reference_flags(r_type)))
+ {
+ if (!parameters->options().output_is_position_independent()
+ && gsym->may_need_copy_reloc())
+ {
+ target->copy_reloc(symtab, layout, object,
+ data_shndx, output_section, gsym, reloc);
+ }
+ else if ((r_type == elfcpp::R_ARM_ABS32
+ || r_type == elfcpp::R_ARM_ABS32_NOI)
+ && gsym->type() == elfcpp::STT_GNU_IFUNC
+ && gsym->can_use_relative_reloc(false)
+ && !gsym->is_from_dynobj()
+ && !gsym->is_undefined()
+ && !gsym->is_preemptible())
+ {
+ // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
+ // symbol. This makes a function address in a PIE executable
+ // match the address in a shared library that it links against.
+ Reloc_section* rel_irelative =
+ target->rel_irelative_section(layout);
+ unsigned int r_type = elfcpp::R_ARM_IRELATIVE;
+ rel_irelative->add_symbolless_global_addend(
+ gsym, r_type, output_section, object,
+ data_shndx, reloc.get_r_offset());
+ }
+ else if ((r_type == elfcpp::R_ARM_ABS32
|| r_type == elfcpp::R_ARM_ABS32_NOI)
- && gsym->can_use_relative_reloc(false))
- {
- Reloc_section* rel_dyn = target->rel_dyn_section(layout);
- rel_dyn->add_global_relative(gsym, elfcpp::R_ARM_RELATIVE,
- output_section, object,
- data_shndx, reloc.get_r_offset());
- }
- else
- {
+ && gsym->can_use_relative_reloc(false))
+ {
+ Reloc_section* rel_dyn = target->rel_dyn_section(layout);
+ rel_dyn->add_global_relative(gsym, elfcpp::R_ARM_RELATIVE,
+ output_section, object,
+ data_shndx, reloc.get_r_offset());
+ }
+ else
+ {
check_non_pic(object, r_type);
- Reloc_section* rel_dyn = target->rel_dyn_section(layout);
- rel_dyn->add_global(gsym, r_type, output_section, object,
- data_shndx, reloc.get_r_offset());
- }
- }
+ Reloc_section* rel_dyn = target->rel_dyn_section(layout);
+ rel_dyn->add_global(gsym, r_type, output_section, object,
+ data_shndx, reloc.get_r_offset());
+ }
+ }
}
break;
// We need a GOT section.
target->got_section(symtab, layout);
break;
-
+
case elfcpp::R_ARM_REL32:
case elfcpp::R_ARM_LDR_PC_G0:
case elfcpp::R_ARM_SBREL32:
case elfcpp::R_ARM_THM_PC8:
case elfcpp::R_ARM_BASE_PREL:
- case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
- case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
- case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
case elfcpp::R_ARM_MOVW_PREL_NC:
case elfcpp::R_ARM_MOVT_PREL:
case elfcpp::R_ARM_THM_MOVW_PREL_NC:
// Relative addressing relocations.
{
// Make a dynamic relocation if necessary.
- int flags = Symbol::NON_PIC_REF;
- if (gsym->needs_dynamic_reloc(flags))
+ if (gsym->needs_dynamic_reloc(Scan::get_reference_flags(r_type)))
{
- if (target->may_need_copy_reloc(gsym))
+ if (parameters->options().output_is_executable()
+ && target->may_need_copy_reloc(gsym))
{
target->copy_reloc(symtab, layout, object,
data_shndx, output_section, gsym, reloc);
}
break;
- case elfcpp::R_ARM_PC24:
case elfcpp::R_ARM_THM_CALL:
case elfcpp::R_ARM_PLT32:
case elfcpp::R_ARM_CALL:
// All the relocation above are branches except for the PREL31 ones.
// A PREL31 relocation can point to a personality function in a shared
// library. In that case we want to use a PLT because we want to
- // call the personality routine and the dyanmic linkers we care about
+ // call the personality routine and the dynamic linkers we care about
// do not support dynamic PREL31 relocations. An REL31 relocation may
// point to a function whose unwinding behaviour is being described but
// we will not mistakenly generate a PLT for that because we should use
Arm_output_data_got<big_endian>* got =
target->got_section(symtab, layout);
if (gsym->final_value_is_known())
- got->add_global(gsym, GOT_TYPE_STANDARD);
+ {
+ // For a STT_GNU_IFUNC symbol we want the PLT address.
+ if (gsym->type() == elfcpp::STT_GNU_IFUNC)
+ got->add_global_plt(gsym, GOT_TYPE_STANDARD);
+ else
+ got->add_global(gsym, GOT_TYPE_STANDARD);
+ }
else
{
// If this symbol is not fully resolved, we need to add a
Reloc_section* rel_dyn = target->rel_dyn_section(layout);
if (gsym->is_from_dynobj()
|| gsym->is_undefined()
- || gsym->is_preemptible())
+ || gsym->is_preemptible()
+ || (gsym->visibility() == elfcpp::STV_PROTECTED
+ && parameters->options().shared())
+ || (gsym->type() == elfcpp::STT_GNU_IFUNC
+ && parameters->options().output_is_position_independent()))
got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
rel_dyn, elfcpp::R_ARM_GLOB_DAT);
else
{
- if (got->add_global(gsym, GOT_TYPE_STANDARD))
+ // For a STT_GNU_IFUNC symbol we want to write the PLT
+ // offset into the GOT, so that function pointer
+ // comparisons work correctly.
+ bool is_new;
+ if (gsym->type() != elfcpp::STT_GNU_IFUNC)
+ is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
+ else
+ {
+ is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
+ // Tell the dynamic linker to use the PLT address
+ // when resolving relocations.
+ if (gsym->is_from_dynobj()
+ && !parameters->options().shared())
+ gsym->set_needs_dynsym_value();
+ }
+ if (is_new)
rel_dyn->add_global_relative(
gsym, elfcpp::R_ARM_RELATIVE, got,
gsym->got_offset(GOT_TYPE_STANDARD));
{
const bool is_final = gsym->final_value_is_known();
const tls::Tls_optimization optimized_type
- = Target_arm<big_endian>::optimize_tls_reloc(is_final, r_type);
+ = Target_arm<big_endian>::optimize_tls_reloc(is_final, r_type);
switch (r_type)
{
case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
if (optimized_type == tls::TLSOPT_NONE)
{
- // Create a pair of GOT entries for the module index and
- // dtv-relative offset.
- Arm_output_data_got<big_endian>* got
- = target->got_section(symtab, layout);
+ // Create a pair of GOT entries for the module index and
+ // dtv-relative offset.
+ Arm_output_data_got<big_endian>* got
+ = target->got_section(symtab, layout);
if (!parameters->doing_static_link())
got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
target->rel_dyn_section(layout),
case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
if (optimized_type == tls::TLSOPT_NONE)
{
- // Create a GOT entry for the module index.
- target->got_mod_index_entry(symtab, layout, object);
+ // Create a GOT entry for the module index.
+ target->got_mod_index_entry(symtab, layout, object);
}
else
// FIXME: TLS optimization not supported yet.
layout->set_has_static_tls();
if (parameters->options().shared())
{
- // We need to create a dynamic relocation.
- Reloc_section* rel_dyn = target->rel_dyn_section(layout);
- rel_dyn->add_global(gsym, elfcpp::R_ARM_TLS_TPOFF32,
+ // We need to create a dynamic relocation.
+ Reloc_section* rel_dyn = target->rel_dyn_section(layout);
+ rel_dyn->add_global(gsym, elfcpp::R_ARM_TLS_TPOFF32,
output_section, object,
- data_shndx, reloc.get_r_offset());
+ data_shndx, reloc.get_r_offset());
}
break;
}
break;
+ case elfcpp::R_ARM_PC24:
+ case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
+ case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
+ case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
default:
unsupported_reloc_global(object, r_type, gsym);
break;
template<bool big_endian>
void
-Target_arm<big_endian>::gc_process_relocs(Symbol_table* symtab,
- Layout* layout,
- Sized_relobj<32, big_endian>* object,
- unsigned int data_shndx,
- unsigned int,
- const unsigned char* prelocs,
- size_t reloc_count,
- Output_section* output_section,
- bool needs_special_offset_handling,
- size_t local_symbol_count,
- const unsigned char* plocal_symbols)
+Target_arm<big_endian>::gc_process_relocs(
+ Symbol_table* symtab,
+ Layout* layout,
+ Sized_relobj_file<32, big_endian>* object,
+ unsigned int data_shndx,
+ unsigned int,
+ const unsigned char* prelocs,
+ size_t reloc_count,
+ Output_section* output_section,
+ bool needs_special_offset_handling,
+ size_t local_symbol_count,
+ const unsigned char* plocal_symbols)
{
typedef Target_arm<big_endian> Arm;
typedef typename Target_arm<big_endian>::Scan Scan;
- gold::gc_process_relocs<32, big_endian, Arm, elfcpp::SHT_REL, Scan>(
+ gold::gc_process_relocs<32, big_endian, Arm, Scan, Classify_reloc>(
symtab,
layout,
this,
void
Target_arm<big_endian>::scan_relocs(Symbol_table* symtab,
Layout* layout,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
unsigned int sh_type,
const unsigned char* prelocs,
size_t local_symbol_count,
const unsigned char* plocal_symbols)
{
- typedef typename Target_arm<big_endian>::Scan Scan;
if (sh_type == elfcpp::SHT_RELA)
{
gold_error(_("%s: unsupported RELA reloc section"),
return;
}
- gold::scan_relocs<32, big_endian, Target_arm, elfcpp::SHT_REL, Scan>(
+ gold::scan_relocs<32, big_endian, Target_arm, Scan, Classify_reloc>(
symtab,
layout,
this,
Target_arm<big_endian>::do_finalize_sections(
Layout* layout,
const Input_objects* input_objects,
- Symbol_table* symtab)
+ Symbol_table*)
{
bool merged_any_attributes = false;
// Merge processor-specific flags.
arm_relobj->attributes_section_data());
merged_any_attributes = true;
}
- }
+ }
for (Input_objects::Dynobj_iterator p = input_objects->dynobj_begin();
p != input_objects->dynobj_end();
if (this->attributes_section_data_ == NULL)
this->attributes_section_data_ = new Attributes_section_data(NULL, 0);
- // Check BLX use.
const Object_attribute* cpu_arch_attr =
this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
- if (cpu_arch_attr->int_value() > elfcpp::TAG_CPU_ARCH_V4)
- this->set_may_use_blx(true);
-
// Check if we need to use Cortex-A8 workaround.
if (parameters->options().user_set_fix_cortex_a8())
this->fix_cortex_a8_ = parameters->options().fix_cortex_a8();
{
// If neither --fix-cortex-a8 nor --no-fix-cortex-a8 is used, turn on
// Cortex-A8 erratum workaround for ARMv7-A or ARMv7 with unknown
- // profile.
+ // profile.
const Object_attribute* cpu_arch_profile_attr =
this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
this->fix_cortex_a8_ =
(cpu_arch_attr->int_value() == elfcpp::TAG_CPU_ARCH_V7
- && (cpu_arch_profile_attr->int_value() == 'A'
- || cpu_arch_profile_attr->int_value() == 0));
+ && (cpu_arch_profile_attr->int_value() == 'A'
+ || cpu_arch_profile_attr->int_value() == 0));
}
-
+
// Check if we can use V4BX interworking.
// The V4BX interworking stub contains BX instruction,
// which is not specified for some profiles.
if (this->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING
- && !this->may_use_blx())
+ && !this->may_use_v4t_interworking())
gold_error(_("unable to provide V4BX reloc interworking fix up; "
- "the target profile does not support BX instruction"));
+ "the target profile does not support BX instruction"));
// Fill in some more dynamic tags.
const Reloc_section* rel_plt = (this->plt_ == NULL
// Handle the .ARM.exidx section.
Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
- if (exidx_section != NULL
- && exidx_section->type() == elfcpp::SHT_ARM_EXIDX
- && !parameters->options().relocatable())
- {
- // Create __exidx_start and __exdix_end symbols.
- symtab->define_in_output_data("__exidx_start", NULL,
- Symbol_table::PREDEFINED,
- exidx_section, 0, 0, elfcpp::STT_OBJECT,
- elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
- false, true);
- symtab->define_in_output_data("__exidx_end", NULL,
- Symbol_table::PREDEFINED,
- exidx_section, 0, 0, elfcpp::STT_OBJECT,
- elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
- true, true);
- // For the ARM target, we need to add a PT_ARM_EXIDX segment for
- // the .ARM.exidx section.
- if (!layout->script_options()->saw_phdrs_clause())
+ if (!parameters->options().relocatable())
+ {
+ if (exidx_section != NULL
+ && exidx_section->type() == elfcpp::SHT_ARM_EXIDX)
{
- gold_assert(layout->find_output_segment(elfcpp::PT_ARM_EXIDX, 0, 0)
- == NULL);
- Output_segment* exidx_segment =
- layout->make_output_segment(elfcpp::PT_ARM_EXIDX, elfcpp::PF_R);
- exidx_segment->add_output_section(exidx_section, elfcpp::PF_R,
- false);
+ // For the ARM target, we need to add a PT_ARM_EXIDX segment for
+ // the .ARM.exidx section.
+ if (!layout->script_options()->saw_phdrs_clause())
+ {
+ gold_assert(layout->find_output_segment(elfcpp::PT_ARM_EXIDX, 0,
+ 0)
+ == NULL);
+ Output_segment* exidx_segment =
+ layout->make_output_segment(elfcpp::PT_ARM_EXIDX, elfcpp::PF_R);
+ exidx_segment->add_output_section_to_nonload(exidx_section,
+ elfcpp::PF_R);
+ }
}
}
new Output_attributes_section_data(*this->attributes_section_data_);
layout->add_output_section_data(".ARM.attributes",
elfcpp::SHT_ARM_ATTRIBUTES, 0,
- attributes_section, false, false, false,
+ attributes_section, ORDER_INVALID,
false);
}
inline bool
Target_arm<big_endian>::Relocate::should_apply_static_reloc(
const Sized_symbol<32>* gsym,
- int ref_flags,
+ unsigned int r_type,
bool is_32bit,
Output_section* output_section)
{
if ((output_section->flags() & elfcpp::SHF_ALLOC) == 0)
return true;
+ int ref_flags = Scan::get_reference_flags(r_type);
+
// For local symbols, we will have created a non-RELATIVE dynamic
// relocation only if (a) the output is position independent,
// (b) the relocation is absolute (not pc- or segment-relative), and
inline bool
Target_arm<big_endian>::Relocate::relocate(
const Relocate_info<32, big_endian>* relinfo,
+ unsigned int,
Target_arm* target,
- Output_section *output_section,
+ Output_section* output_section,
size_t relnum,
- const elfcpp::Rel<32, big_endian>& rel,
- unsigned int r_type,
+ const unsigned char* preloc,
const Sized_symbol<32>* gsym,
const Symbol_value<32>* psymval,
unsigned char* view,
Arm_address address,
section_size_type view_size)
{
+ if (view == NULL)
+ return true;
+
typedef Arm_relocate_functions<big_endian> Arm_relocate_functions;
- r_type = get_real_reloc_type(r_type);
+ const elfcpp::Rel<32, big_endian> rel(preloc);
+ unsigned int r_type = elfcpp::elf_r_type<32>(rel.get_r_info());
+ r_type = target->get_real_reloc_type(r_type);
const Arm_reloc_property* reloc_property =
arm_reloc_property_table->get_implemented_static_reloc_property(r_type);
if (reloc_property == NULL)
Arm_address thumb_bit = 0;
Symbol_value<32> symval;
bool is_weakly_undefined_without_plt = false;
- if (relnum != Target_arm<big_endian>::fake_relnum_for_stubs)
+ bool have_got_offset = false;
+ unsigned int got_offset = 0;
+
+ // If the relocation uses the GOT entry of a symbol instead of the symbol
+ // itself, we don't care about whether the symbol is defined or what kind
+ // of symbol it is.
+ if (reloc_property->uses_got_entry())
+ {
+ // Get the GOT offset.
+ // The GOT pointer points to the end of the GOT section.
+ // We need to subtract the size of the GOT section to get
+ // the actual offset to use in the relocation.
+ // TODO: We should move GOT offset computing code in TLS relocations
+ // to here.
+ switch (r_type)
+ {
+ case elfcpp::R_ARM_GOT_BREL:
+ case elfcpp::R_ARM_GOT_PREL:
+ if (gsym != NULL)
+ {
+ gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
+ got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
+ - target->got_size());
+ }
+ else
+ {
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
+ gold_assert(object->local_has_got_offset(r_sym,
+ GOT_TYPE_STANDARD));
+ got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
+ - target->got_size());
+ }
+ have_got_offset = true;
+ break;
+
+ default:
+ break;
+ }
+ }
+ else if (relnum != Target_arm<big_endian>::fake_relnum_for_stubs)
{
if (gsym != NULL)
{
// This is a global symbol. Determine if we use PLT and if the
// final target is THUMB.
- if (gsym->use_plt_offset(reloc_is_non_pic(r_type)))
+ if (gsym->use_plt_offset(Scan::get_reference_flags(r_type)))
{
// This uses a PLT, change the symbol value.
- symval.set_output_value(target->plt_section()->address()
- + gsym->plt_offset());
+ symval.set_output_value(target->plt_address_for_global(gsym));
psymval = &symval;
}
else if (gsym->is_weak_undefined())
}
else
{
- // This is a local symbol. Determine if the final target is THUMB.
- // We saved this information when all the local symbols were read.
+ // This is a local symbol. Determine if the final target is THUMB.
+ // We saved this information when all the local symbols were read.
elfcpp::Elf_types<32>::Elf_WXword r_info = rel.get_r_info();
unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
thumb_bit = object->local_symbol_is_thumb_function(r_sym) ? 1 : 0;
+
+ if (psymval->is_ifunc_symbol() && object->local_has_plt_offset(r_sym))
+ {
+ symval.set_output_value(
+ target->plt_address_for_local(object, r_sym));
+ psymval = &symval;
+ }
}
}
else
// Strip LSB if this points to a THUMB target.
if (thumb_bit != 0
- && reloc_property->uses_thumb_bit()
+ && reloc_property->uses_thumb_bit()
&& ((psymval->value(object, 0) & 1) != 0))
- {
- Arm_address stripped_value =
- psymval->value(object, 0) & ~static_cast<Arm_address>(1);
- symval.set_output_value(stripped_value);
- psymval = &symval;
- }
-
- // Get the GOT offset if needed.
- // The GOT pointer points to the end of the GOT section.
- // We need to subtract the size of the GOT section to get
- // the actual offset to use in the relocation.
- bool have_got_offset = false;
- unsigned int got_offset = 0;
- switch (r_type)
- {
- case elfcpp::R_ARM_GOT_BREL:
- case elfcpp::R_ARM_GOT_PREL:
- if (gsym != NULL)
- {
- gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
- got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
- - target->got_size());
- }
- else
- {
- unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
- gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
- got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
- - target->got_size());
- }
- have_got_offset = true;
- break;
-
- default:
- break;
+ {
+ Arm_address stripped_value =
+ psymval->value(object, 0) & ~static_cast<Arm_address>(1);
+ symval.set_output_value(stripped_value);
+ psymval = &symval;
}
// To look up relocation stubs, we need to pass the symbol table index of
relative_address_base = address & 0xfffffffcU;
break;
default:
- gold_unreachable();
+ gold_unreachable();
}
-
+
typename Arm_relocate_functions::Status reloc_status =
Arm_relocate_functions::STATUS_OKAY;
bool check_overflow = reloc_property->checks_overflow();
break;
case elfcpp::R_ARM_ABS8:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::abs8(view, object, psymval);
break;
case elfcpp::R_ARM_ABS12:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::abs12(view, object, psymval);
break;
case elfcpp::R_ARM_ABS16:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::abs16(view, object, psymval);
break;
case elfcpp::R_ARM_ABS32:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, true, output_section))
reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
thumb_bit);
break;
case elfcpp::R_ARM_ABS32_NOI:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, true, output_section))
// No thumb bit for this relocation: (S + A)
reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
0);
break;
case elfcpp::R_ARM_MOVW_ABS_NC:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::movw(view, object, psymval,
0, thumb_bit,
check_overflow);
break;
case elfcpp::R_ARM_MOVT_ABS:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::movt(view, object, psymval, 0);
break;
case elfcpp::R_ARM_THM_MOVW_ABS_NC:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::thm_movw(view, object, psymval,
- 0, thumb_bit, false);
+ 0, thumb_bit, false);
break;
case elfcpp::R_ARM_THM_MOVT_ABS:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::thm_movt(view, object,
psymval, 0);
break;
Arm_relocate_functions::thm_movt(view, object, psymval,
relative_address_base);
break;
-
+
case elfcpp::R_ARM_REL32:
reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
address, thumb_bit);
break;
case elfcpp::R_ARM_THM_ABS5:
- if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::thm_abs5(view, object, psymval);
break;
break;
case elfcpp::R_ARM_BASE_ABS:
- {
- if (!should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
- output_section))
- break;
-
+ if (should_apply_static_reloc(gsym, r_type, false, output_section))
reloc_status = Arm_relocate_functions::base_abs(view, sym_origin);
- }
break;
case elfcpp::R_ARM_GOT_BREL:
&& !gsym->is_from_dynobj()
&& !gsym->is_preemptible()));
reloc_status =
- Arm_relocate_functions::arm_branch_common(
+ Arm_relocate_functions::arm_branch_common(
r_type, relinfo, view, gsym, object, r_sym, psymval, address,
thumb_bit, is_weakly_undefined_without_plt);
break;
view, address, view_size);
break;
+ // The known and unknown unsupported and/or deprecated relocations.
+ case elfcpp::R_ARM_PC24:
+ case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
+ case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
+ case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
default:
- gold_unreachable();
+ // Just silently leave the method. We should get an appropriate error
+ // message in the scan methods.
+ break;
}
// Report any errors.
typedef Relocate_functions<32, big_endian> RelocFuncs;
Output_segment* tls_segment = relinfo->layout->tls_segment();
- const Sized_relobj<32, big_endian>* object = relinfo->object;
+ const Sized_relobj_file<32, big_endian>* object = relinfo->object;
elfcpp::Elf_types<32>::Elf_Addr value = psymval->value(object, 0);
switch (r_type)
{
case elfcpp::R_ARM_TLS_GD32: // Global-dynamic
- {
- unsigned int got_type = GOT_TYPE_TLS_PAIR;
- unsigned int got_offset;
- if (gsym != NULL)
- {
- gold_assert(gsym->has_got_offset(got_type));
- got_offset = gsym->got_offset(got_type) - target->got_size();
- }
- else
- {
- unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
- gold_assert(object->local_has_got_offset(r_sym, got_type));
- got_offset = (object->local_got_offset(r_sym, got_type)
+ {
+ unsigned int got_type = GOT_TYPE_TLS_PAIR;
+ unsigned int got_offset;
+ if (gsym != NULL)
+ {
+ gold_assert(gsym->has_got_offset(got_type));
+ got_offset = gsym->got_offset(got_type) - target->got_size();
+ }
+ else
+ {
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
+ gold_assert(object->local_has_got_offset(r_sym, got_type));
+ got_offset = (object->local_got_offset(r_sym, got_type)
- target->got_size());
- }
- if (optimized_type == tls::TLSOPT_NONE)
- {
+ }
+ if (optimized_type == tls::TLSOPT_NONE)
+ {
Arm_address got_entry =
target->got_plt_section()->address() + got_offset;
-
- // Relocate the field with the PC relative offset of the pair of
- // GOT entries.
- RelocFuncs::pcrel32(view, got_entry, address);
- return ArmRelocFuncs::STATUS_OKAY;
- }
- }
+
+ // Relocate the field with the PC relative offset of the pair of
+ // GOT entries.
+ RelocFuncs::pcrel32_unaligned(view, got_entry, address);
+ return ArmRelocFuncs::STATUS_OKAY;
+ }
+ }
break;
case elfcpp::R_ARM_TLS_LDM32: // Local-dynamic
if (optimized_type == tls::TLSOPT_NONE)
- {
- // Relocate the field with the offset of the GOT entry for
- // the module index.
- unsigned int got_offset;
- got_offset = (target->got_mod_index_entry(NULL, NULL, NULL)
+ {
+ // Relocate the field with the offset of the GOT entry for
+ // the module index.
+ unsigned int got_offset;
+ got_offset = (target->got_mod_index_entry(NULL, NULL, NULL)
- target->got_size());
Arm_address got_entry =
target->got_plt_section()->address() + got_offset;
- // Relocate the field with the PC relative offset of the pair of
- // GOT entries.
- RelocFuncs::pcrel32(view, got_entry, address);
+ // Relocate the field with the PC relative offset of the pair of
+ // GOT entries.
+ RelocFuncs::pcrel32_unaligned(view, got_entry, address);
return ArmRelocFuncs::STATUS_OKAY;
- }
+ }
break;
case elfcpp::R_ARM_TLS_LDO32: // Alternate local-dynamic
- RelocFuncs::rel32(view, value);
+ RelocFuncs::rel32_unaligned(view, value);
return ArmRelocFuncs::STATUS_OKAY;
case elfcpp::R_ARM_TLS_IE32: // Initial-exec
if (optimized_type == tls::TLSOPT_NONE)
- {
- // Relocate the field with the offset of the GOT entry for
- // the tp-relative offset of the symbol.
+ {
+ // Relocate the field with the offset of the GOT entry for
+ // the tp-relative offset of the symbol.
unsigned int got_type = GOT_TYPE_TLS_OFFSET;
- unsigned int got_offset;
- if (gsym != NULL)
- {
- gold_assert(gsym->has_got_offset(got_type));
- got_offset = gsym->got_offset(got_type);
- }
- else
- {
- unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
- gold_assert(object->local_has_got_offset(r_sym, got_type));
- got_offset = object->local_got_offset(r_sym, got_type);
- }
-
- // All GOT offsets are relative to the end of the GOT.
- got_offset -= target->got_size();
+ unsigned int got_offset;
+ if (gsym != NULL)
+ {
+ gold_assert(gsym->has_got_offset(got_type));
+ got_offset = gsym->got_offset(got_type);
+ }
+ else
+ {
+ unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
+ gold_assert(object->local_has_got_offset(r_sym, got_type));
+ got_offset = object->local_got_offset(r_sym, got_type);
+ }
+
+ // All GOT offsets are relative to the end of the GOT.
+ got_offset -= target->got_size();
Arm_address got_entry =
target->got_plt_section()->address() + got_offset;
- // Relocate the field with the PC relative offset of the GOT entry.
- RelocFuncs::pcrel32(view, got_entry, address);
+ // Relocate the field with the PC relative offset of the GOT entry.
+ RelocFuncs::pcrel32_unaligned(view, got_entry, address);
return ArmRelocFuncs::STATUS_OKAY;
- }
+ }
break;
case elfcpp::R_ARM_TLS_LE32: // Local-exec
// If we're creating a shared library, a dynamic relocation will
// have been created for this location, so do not apply it now.
if (!parameters->options().shared())
- {
- gold_assert(tls_segment != NULL);
+ {
+ gold_assert(tls_segment != NULL);
// $tp points to the TCB, which is followed by the TLS, so we
// need to add TCB size to the offset.
Arm_address aligned_tcb_size =
align_address(ARM_TCB_SIZE, tls_segment->maximum_alignment());
- RelocFuncs::rel32(view, value + aligned_tcb_size);
+ RelocFuncs::rel32_unaligned(view, value + aligned_tcb_size);
- }
+ }
return ArmRelocFuncs::STATUS_OKAY;
-
+
default:
gold_unreachable();
}
}
}
- gold::relocate_section<32, big_endian, Target_arm, elfcpp::SHT_REL,
- Arm_relocate>(
+ gold::relocate_section<32, big_endian, Target_arm, Arm_relocate,
+ gold::Default_comdat_behavior, Classify_reloc>(
relinfo,
this,
prelocs,
template<bool big_endian>
unsigned int
-Target_arm<big_endian>::Relocatable_size_for_reloc::get_size_for_reloc(
+Target_arm<big_endian>::Classify_reloc::get_size_for_reloc(
unsigned int r_type,
Relobj* object)
{
- r_type = get_real_reloc_type(r_type);
+ Target_arm<big_endian>* arm_target =
+ Target_arm<big_endian>::default_target();
+ r_type = arm_target->get_real_reloc_type(r_type);
const Arm_reloc_property* arp =
arm_reloc_property_table->get_implemented_static_reloc_property(r_type);
if (arp != NULL)
Target_arm<big_endian>::scan_relocatable_relocs(
Symbol_table* symtab,
Layout* layout,
- Sized_relobj<32, big_endian>* object,
+ Sized_relobj_file<32, big_endian>* object,
unsigned int data_shndx,
unsigned int sh_type,
const unsigned char* prelocs,
const unsigned char* plocal_symbols,
Relocatable_relocs* rr)
{
- gold_assert(sh_type == elfcpp::SHT_REL);
+ typedef Arm_scan_relocatable_relocs<big_endian, Classify_reloc>
+ Scan_relocatable_relocs;
- typedef Arm_scan_relocatable_relocs<big_endian, elfcpp::SHT_REL,
- Relocatable_size_for_reloc> Scan_relocatable_relocs;
+ gold_assert(sh_type == elfcpp::SHT_REL);
- gold::scan_relocatable_relocs<32, big_endian, elfcpp::SHT_REL,
- Scan_relocatable_relocs>(
+ gold::scan_relocatable_relocs<32, big_endian, Scan_relocatable_relocs>(
symtab,
layout,
object,
rr);
}
-// Relocate a section during a relocatable link.
+// Scan the relocs for --emit-relocs.
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::emit_relocs_scan(Symbol_table* symtab,
+ Layout* layout,
+ Sized_relobj_file<32, big_endian>* object,
+ unsigned int data_shndx,
+ unsigned int sh_type,
+ const unsigned char* prelocs,
+ size_t reloc_count,
+ Output_section* output_section,
+ bool needs_special_offset_handling,
+ size_t local_symbol_count,
+ const unsigned char* plocal_syms,
+ Relocatable_relocs* rr)
+{
+ typedef gold::Default_classify_reloc<elfcpp::SHT_REL, 32, big_endian>
+ Classify_reloc;
+ typedef gold::Default_emit_relocs_strategy<Classify_reloc>
+ Emit_relocs_strategy;
+
+ gold_assert(sh_type == elfcpp::SHT_REL);
+
+ gold::scan_relocatable_relocs<32, big_endian, Emit_relocs_strategy>(
+ symtab,
+ layout,
+ object,
+ data_shndx,
+ prelocs,
+ reloc_count,
+ output_section,
+ needs_special_offset_handling,
+ local_symbol_count,
+ plocal_syms,
+ rr);
+}
+
+// Emit relocations for a section.
template<bool big_endian>
void
-Target_arm<big_endian>::relocate_for_relocatable(
+Target_arm<big_endian>::relocate_relocs(
const Relocate_info<32, big_endian>* relinfo,
unsigned int sh_type,
const unsigned char* prelocs,
size_t reloc_count,
Output_section* output_section,
- off_t offset_in_output_section,
- const Relocatable_relocs* rr,
+ typename elfcpp::Elf_types<32>::Elf_Off offset_in_output_section,
unsigned char* view,
Arm_address view_address,
section_size_type view_size,
{
gold_assert(sh_type == elfcpp::SHT_REL);
- gold::relocate_for_relocatable<32, big_endian, elfcpp::SHT_REL>(
+ gold::relocate_relocs<32, big_endian, Classify_reloc>(
relinfo,
prelocs,
reloc_count,
output_section,
offset_in_output_section,
- rr,
view,
view_address,
view_size,
const unsigned char* preloc_in,
size_t relnum,
Output_section* output_section,
- off_t offset_in_output_section,
+ typename elfcpp::Elf_types<32>::Elf_Off offset_in_output_section,
unsigned char* view,
elfcpp::Elf_types<32>::Elf_Addr view_address,
section_size_type,
else
{
section_offset_type sot_offset =
- convert_types<section_offset_type, Arm_address>(offset);
+ convert_types<section_offset_type, Arm_address>(offset);
section_offset_type new_sot_offset =
- output_section->output_offset(object, relinfo->data_shndx,
- sot_offset);
+ output_section->output_offset(object, relinfo->data_shndx,
+ sot_offset);
gold_assert(new_sot_offset != -1);
new_offset = new_sot_offset;
}
{
new_offset += view_address;
if (offset_in_output_section != invalid_address)
- new_offset -= offset_in_output_section;
+ new_offset -= offset_in_output_section;
}
reloc_write.put_r_offset(new_offset);
Arm_address thumb_bit =
object->local_symbol_is_thumb_function(r_sym) ? 1 : 0;
if (thumb_bit != 0
- && arp->uses_thumb_bit()
+ && arp->uses_thumb_bit()
&& ((psymval->value(object, 0) & 1) != 0))
{
Arm_address stripped_value =
psymval->value(object, 0) & ~static_cast<Arm_address>(1);
symval.set_output_value(stripped_value);
psymval = &symval;
- }
+ }
unsigned char* paddend = view + offset;
typename Arm_relocate_functions<big_endian>::Status reloc_status =
case elfcpp::R_ARM_JUMP24:
case elfcpp::R_ARM_XPC25:
reloc_status =
- Arm_relocate_functions<big_endian>::arm_branch_common(
+ Arm_relocate_functions<big_endian>::arm_branch_common(
r_type, relinfo, paddend, NULL, object, 0, psymval, 0, thumb_bit,
false);
break;
Target_arm<big_endian>::do_dynsym_value(const Symbol* gsym) const
{
gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
- return this->plt_section()->address() + gsym->plt_offset();
+ return this->plt_address_for_global(gsym);
}
// Map platform-specific relocs to real relocs
//
template<bool big_endian>
unsigned int
-Target_arm<big_endian>::get_real_reloc_type (unsigned int r_type)
+Target_arm<big_endian>::get_real_reloc_type(unsigned int r_type) const
{
switch (r_type)
{
case elfcpp::R_ARM_TARGET1:
- // This is either R_ARM_ABS32 or R_ARM_REL32;
- return elfcpp::R_ARM_ABS32;
+ return this->target1_reloc_;
case elfcpp::R_ARM_TARGET2:
- // This can be any reloc type but ususally is R_ARM_GOT_PREL
- return elfcpp::R_ARM_GOT_PREL;
+ return this->target2_reloc_;
default:
return r_type;
void
Target_arm<big_endian>::do_adjust_elf_header(
unsigned char* view,
- int len) const
+ int len)
{
gold_assert(len == elfcpp::Elf_sizes<32>::ehdr_size);
elfcpp::Ehdr<32, big_endian> ehdr(view);
+ elfcpp::Elf_Word flags = this->processor_specific_flags();
unsigned char e_ident[elfcpp::EI_NIDENT];
memcpy(e_ident, ehdr.get_e_ident(), elfcpp::EI_NIDENT);
- if (elfcpp::arm_eabi_version(this->processor_specific_flags())
+ if (elfcpp::arm_eabi_version(flags)
== elfcpp::EF_ARM_EABI_UNKNOWN)
e_ident[elfcpp::EI_OSABI] = elfcpp::ELFOSABI_ARM;
else
e_ident[elfcpp::EI_OSABI] = 0;
e_ident[elfcpp::EI_ABIVERSION] = 0;
- // FIXME: Do EF_ARM_BE8 adjustment.
+ // Do EF_ARM_BE8 adjustment.
+ if (parameters->options().be8() && !big_endian)
+ gold_error("BE8 images only valid in big-endian mode.");
+ if (parameters->options().be8())
+ {
+ flags |= elfcpp::EF_ARM_BE8;
+ this->set_processor_specific_flags(flags);
+ }
+ // If we're working in EABI_VER5, set the hard/soft float ABI flags
+ // as appropriate.
+ if (elfcpp::arm_eabi_version(flags) == elfcpp::EF_ARM_EABI_VER5)
+ {
+ elfcpp::Elf_Half type = ehdr.get_e_type();
+ if (type == elfcpp::ET_EXEC || type == elfcpp::ET_DYN)
+ {
+ Object_attribute* attr = this->get_aeabi_object_attribute(elfcpp::Tag_ABI_VFP_args);
+ if (attr->int_value() == elfcpp::AEABI_VFP_args_vfp)
+ flags |= elfcpp::EF_ARM_ABI_FLOAT_HARD;
+ else
+ flags |= elfcpp::EF_ARM_ABI_FLOAT_SOFT;
+ this->set_processor_specific_flags(flags);
+ }
+ }
elfcpp::Ehdr_write<32, big_endian> oehdr(view);
oehdr.put_e_ident(e_ident);
+ oehdr.put_e_flags(this->processor_specific_flags());
}
// do_make_elf_object to override the same function in the base class.
-// We need to use a target-specific sub-class of Sized_relobj<32, big_endian>
-// to store ARM specific information. Hence we need to have our own
-// ELF object creation.
+// We need to use a target-specific sub-class of
+// Sized_relobj_file<32, big_endian> to store ARM specific information.
+// Hence we need to have our own ELF object creation.
template<bool big_endian>
Object*
off_t offset, const elfcpp::Ehdr<32, big_endian>& ehdr)
{
int et = ehdr.get_e_type();
- if (et == elfcpp::ET_REL)
+ // ET_EXEC files are valid input for --just-symbols/-R,
+ // and we treat them as relocatable objects.
+ if (et == elfcpp::ET_REL
+ || (et == elfcpp::ET_EXEC && input_file->just_symbols()))
{
Arm_relobj<big_endian>* obj =
- new Arm_relobj<big_endian>(name, input_file, offset, ehdr);
+ new Arm_relobj<big_endian>(name, input_file, offset, ehdr);
obj->setup();
return obj;
}
else if (et == elfcpp::ET_DYN)
{
Sized_dynobj<32, big_endian>* obj =
- new Arm_dynobj<big_endian>(name, input_file, offset, ehdr);
+ new Arm_dynobj<big_endian>(name, input_file, offset, ehdr);
obj->setup();
return obj;
}
else
{
gold_error(_("%s: unsupported ELF file type %d"),
- name.c_str(), et);
+ name.c_str(), et);
return NULL;
}
}
Target_arm<big_endian>::get_secondary_compatible_arch(
const Attributes_section_data* pasd)
{
- const Object_attribute *known_attributes =
+ const Object_attribute* known_attributes =
pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
// Note: the tag and its argument below are uleb128 values, though
Attributes_section_data* pasd,
int arch)
{
- Object_attribute *known_attributes =
+ Object_attribute* known_attributes =
pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
if (arch == -1)
T(V7E_M), // V6S_M.
T(V7E_M) // V7E_M.
};
+ static const int v8[] =
+ {
+ T(V8), // PRE_V4.
+ T(V8), // V4.
+ T(V8), // V4T.
+ T(V8), // V5T.
+ T(V8), // V5TE.
+ T(V8), // V5TEJ.
+ T(V8), // V6.
+ T(V8), // V6KZ.
+ T(V8), // V6T2.
+ T(V8), // V6K.
+ T(V8), // V7.
+ T(V8), // V6_M.
+ T(V8), // V6S_M.
+ T(V8), // V7E_M.
+ T(V8) // V8.
+ };
static const int v4t_plus_v6_m[] =
{
-1, // PRE_V4.
T(V6_M), // V6_M.
T(V6S_M), // V6S_M.
T(V7E_M), // V7E_M.
+ T(V8), // V8.
T(V4T_PLUS_V6_M) // V4T plus V6_M.
};
- static const int *comb[] =
+ static const int* comb[] =
{
v6t2,
v6k,
v6_m,
v6s_m,
v7e_m,
+ v8,
// Pseudo-architecture.
v4t_plus_v6_m
};
// Check we've not got a higher architecture than we know about.
- if (oldtag >= elfcpp::MAX_TAG_CPU_ARCH || newtag >= elfcpp::MAX_TAG_CPU_ARCH)
+ if (oldtag > elfcpp::MAX_TAG_CPU_ARCH || newtag > elfcpp::MAX_TAG_CPU_ARCH)
{
gold_error(_("%s: unknown CPU architecture"), name);
return -1;
std::string
Target_arm<big_endian>::aeabi_enum_name(unsigned int value)
{
- static const char *aeabi_enum_names[] =
+ static const char* aeabi_enum_names[] =
{ "", "variable-size", "32-bit", "" };
const size_t aeabi_enum_names_size =
sizeof(aeabi_enum_names) / sizeof(aeabi_enum_names[0]);
std::string
Target_arm<big_endian>::tag_cpu_name_value(unsigned int value)
{
- static const char *name_table[] = {
+ static const char* name_table[] = {
// These aren't real CPU names, but we can't guess
// that from the architecture version alone.
"Pre v4",
"ARM v7",
"ARM v6-M",
"ARM v6S-M",
- "ARM v7E-M"
+ "ARM v7E-M",
+ "ARM v8"
};
const size_t name_table_size = sizeof(name_table) / sizeof(name_table[0]);
char buffer[100];
sprintf(buffer, "<unknown CPU value %u>", value);
return std::string(buffer);
- }
+ }
+}
+
+// Query attributes object to see if integer divide instructions may be
+// present in an object.
+
+template<bool big_endian>
+bool
+Target_arm<big_endian>::attributes_accept_div(int arch, int profile,
+ const Object_attribute* div_attr)
+{
+ switch (div_attr->int_value())
+ {
+ case 0:
+ // Integer divide allowed if instruction contained in
+ // architecture.
+ if (arch == elfcpp::TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
+ return true;
+ else if (arch >= elfcpp::TAG_CPU_ARCH_V7E_M)
+ return true;
+ else
+ return false;
+
+ case 1:
+ // Integer divide explicitly prohibited.
+ return false;
+
+ default:
+ // Unrecognised case - treat as allowing divide everywhere.
+ case 2:
+ // Integer divide allowed in ARM state.
+ return true;
+ }
+}
+
+// Query attributes object to see if integer divide instructions are
+// forbidden to be in the object. This is not the inverse of
+// attributes_accept_div.
+
+template<bool big_endian>
+bool
+Target_arm<big_endian>::attributes_forbid_div(const Object_attribute* div_attr)
+{
+ return div_attr->int_value() == 1;
}
// Merge object attributes from input file called NAME with those of the
{
if (out_attr[elfcpp::Tag_MPextension_use].int_value() != 0
&& out_attr[elfcpp::Tag_MPextension_use_legacy].int_value()
- != out_attr[elfcpp::Tag_MPextension_use].int_value())
+ != out_attr[elfcpp::Tag_MPextension_use].int_value())
{
gold_error(_("%s has both the current and legacy "
"Tag_MPextension_use attributes"),
!= out_attr[elfcpp::Tag_ABI_VFP_args].int_value())
{
// Ignore mismatches if the object doesn't use floating point. */
- if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value() == 0)
+ if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value()
+ == elfcpp::AEABI_FP_number_model_none
+ || (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value()
+ != elfcpp::AEABI_FP_number_model_none
+ && out_attr[elfcpp::Tag_ABI_VFP_args].int_value()
+ == elfcpp::AEABI_VFP_args_compatible))
out_attr[elfcpp::Tag_ABI_VFP_args].set_int_value(
in_attr[elfcpp::Tag_ABI_VFP_args].int_value());
- else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value() != 0
+ else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value()
+ != elfcpp::AEABI_FP_number_model_none
+ && in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
+ != elfcpp::AEABI_VFP_args_compatible
&& parameters->options().warn_mismatch())
- gold_error(_("%s uses VFP register arguments, output does not"),
+ gold_error(_("%s uses VFP register arguments, output does not"),
name);
}
|| (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
== 0)))
{
- // This error message should be enabled once all non-conformant
+ // This error message should be enabled once all non-conforming
// binaries in the toolchain have had the attributes set
// properly.
// gold_error(_("output 8-byte data alignment conflicts with %s"),
out_attr[i].set_int_value(in_attr[i].int_value());
break;
case elfcpp::Tag_ABI_PCS_wchar_t:
- // FIXME: Make it possible to turn off this warning.
if (out_attr[i].int_value()
&& in_attr[i].int_value()
&& out_attr[i].int_value() != in_attr[i].int_value()
- && parameters->options().warn_mismatch())
+ && parameters->options().warn_mismatch()
+ && parameters->options().wchar_size_warning())
{
gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
"use %u-byte wchar_t; use of wchar_t values "
// Use whatever requirements the new object has.
out_attr[i].set_int_value(in_attr[i].int_value());
}
- // FIXME: Make it possible to turn off this warning.
else if (in_attr[i].int_value() != elfcpp::AEABI_enum_forced_wide
&& out_attr[i].int_value() != in_attr[i].int_value()
- && parameters->options().warn_mismatch())
+ && parameters->options().warn_mismatch()
+ && parameters->options().enum_size_warning())
{
unsigned int in_value = in_attr[i].int_value();
unsigned int out_value = out_attr[i].int_value();
}
break;
case elfcpp::Tag_ABI_VFP_args:
- // Aready done.
+ // Already done.
break;
case elfcpp::Tag_ABI_WMMX_args:
if (in_attr[i].int_value() != out_attr[i].int_value()
break;
case elfcpp::Tag_DIV_use:
- // This tag is set to zero if we can use UDIV and SDIV in Thumb
- // mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
- // SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
- // CPU. We will merge as follows: If the input attribute's value
- // is one then the output attribute's value remains unchanged. If
- // the input attribute's value is zero or two then if the output
- // attribute's value is one the output value is set to the input
- // value, otherwise the output value must be the same as the
- // inputs. */
- if (in_attr[i].int_value() != 1 && out_attr[i].int_value() != 1)
- {
- if (in_attr[i].int_value() != out_attr[i].int_value())
- {
- gold_error(_("DIV usage mismatch between %s and output"),
- name);
- }
- }
-
- if (in_attr[i].int_value() != 1)
- out_attr[i].set_int_value(in_attr[i].int_value());
-
+ {
+ // A value of zero on input means that the divide
+ // instruction may be used if available in the base
+ // architecture as specified via Tag_CPU_arch and
+ // Tag_CPU_arch_profile. A value of 1 means that the user
+ // did not want divide instructions. A value of 2
+ // explicitly means that divide instructions were allowed
+ // in ARM and Thumb state.
+ int arch = this->
+ get_aeabi_object_attribute(elfcpp::Tag_CPU_arch)->
+ int_value();
+ int profile = this->
+ get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile)->
+ int_value();
+ if (in_attr[i].int_value() == out_attr[i].int_value())
+ {
+ // Do nothing.
+ }
+ else if (attributes_forbid_div(&in_attr[i])
+ && !attributes_accept_div(arch, profile, &out_attr[i]))
+ out_attr[i].set_int_value(1);
+ else if (attributes_forbid_div(&out_attr[i])
+ && attributes_accept_div(arch, profile, &in_attr[i]))
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ else if (in_attr[i].int_value() == 2)
+ out_attr[i].set_int_value(in_attr[i].int_value());
+ }
break;
case elfcpp::Tag_MPextension_use_legacy:
!= in_attr[i].int_value())
{
gold_error(_("%s has has both the current and legacy "
- "Tag_MPextension_use attributes"),
+ "Tag_MPextension_use attributes"),
name);
}
}
err_tag = out_iter->first;
int saved_tag = out_iter->first;
delete out_iter->second;
- out_other_attributes->erase(out_iter);
+ out_other_attributes->erase(out_iter);
out_iter = out_other_attributes->upper_bound(saved_tag);
}
else if (in_iter != in_other_attributes->end()
// for this input section already.
gold_assert(ins.second);
- return arm_input_section;
+ return arm_input_section;
}
// Find the Arm_input_section object corresponding to the SHNDX-th input
elfcpp::Elf_types<32>::Elf_Swxword addend,
Arm_address address)
{
- typedef typename Target_arm<big_endian>::Relocate Relocate;
-
const Arm_relobj<big_endian>* arm_relobj =
Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
{
// This is a global symbol. Determine if we use PLT and if the
// final target is THUMB.
- if (gsym->use_plt_offset(Relocate::reloc_is_non_pic(r_type)))
+ if (gsym->use_plt_offset(Scan::get_reference_flags(r_type)))
{
// This uses a PLT, change the symbol value.
- symval.set_output_value(this->plt_section()->address()
- + gsym->plt_offset());
+ symval.set_output_value(this->plt_address_for_global(gsym));
psymval = &symval;
target_is_thumb = false;
}
psymval->value(arm_relobj, 0) & ~static_cast<Arm_address>(1);
symval.set_output_value(stripped_value);
psymval = &symval;
- }
+ }
// Get the symbol value.
Symbol_value<32>::Value value = psymval->value(arm_relobj, 0);
if (stub_type != arm_stub_none)
{
// Try looking up an existing stub from a stub table.
- Stub_table<big_endian>* stub_table =
+ Stub_table<big_endian>* stub_table =
arm_relobj->stub_table(relinfo->data_shndx);
gold_assert(stub_table != NULL);
-
+
// Locate stub by destination.
Reloc_stub::Key stub_key(stub_type, gsym, arm_relobj, r_sym, addend);
Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
unsigned int local_count = arm_object->local_symbol_count();
+ gold::Default_comdat_behavior default_comdat_behavior;
Comdat_behavior comdat_behavior = CB_UNDETERMINED;
for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
// Only a few relocation types need stubs.
if ((r_type != elfcpp::R_ARM_CALL)
- && (r_type != elfcpp::R_ARM_JUMP24)
- && (r_type != elfcpp::R_ARM_PLT32)
- && (r_type != elfcpp::R_ARM_THM_CALL)
- && (r_type != elfcpp::R_ARM_THM_XPC22)
- && (r_type != elfcpp::R_ARM_THM_JUMP24)
- && (r_type != elfcpp::R_ARM_THM_JUMP19)
- && (r_type != elfcpp::R_ARM_V4BX))
+ && (r_type != elfcpp::R_ARM_JUMP24)
+ && (r_type != elfcpp::R_ARM_PLT32)
+ && (r_type != elfcpp::R_ARM_THM_CALL)
+ && (r_type != elfcpp::R_ARM_THM_XPC22)
+ && (r_type != elfcpp::R_ARM_THM_JUMP24)
+ && (r_type != elfcpp::R_ARM_THM_JUMP19)
+ && (r_type != elfcpp::R_ARM_V4BX))
continue;
section_offset_type offset =
{
// create a new stub and add it to stub table.
Arm_v4bx_stub* stub =
- this->stub_factory().make_arm_v4bx_stub(reg);
+ this->stub_factory().make_arm_v4bx_stub(reg);
gold_assert(stub != NULL);
stub_table->add_arm_v4bx_stub(stub);
}
Symbol_value<32> symval;
const Symbol_value<32> *psymval;
+ bool is_defined_in_discarded_section;
+ unsigned int shndx;
if (r_sym < local_count)
{
sym = NULL;
psymval = arm_object->local_symbol(r_sym);
- // If the local symbol belongs to a section we are discarding,
- // and that section is a debug section, try to find the
- // corresponding kept section and map this symbol to its
- // counterpart in the kept section. The symbol must not
- // correspond to a section we are folding.
+ // If the local symbol belongs to a section we are discarding,
+ // and that section is a debug section, try to find the
+ // corresponding kept section and map this symbol to its
+ // counterpart in the kept section. The symbol must not
+ // correspond to a section we are folding.
bool is_ordinary;
- unsigned int shndx = psymval->input_shndx(&is_ordinary);
- if (is_ordinary
- && shndx != elfcpp::SHN_UNDEF
- && !arm_object->is_section_included(shndx)
- && !(relinfo->symtab->is_section_folded(arm_object, shndx)))
+ shndx = psymval->input_shndx(&is_ordinary);
+ is_defined_in_discarded_section =
+ (is_ordinary
+ && shndx != elfcpp::SHN_UNDEF
+ && !arm_object->is_section_included(shndx)
+ && !relinfo->symtab->is_section_folded(arm_object, shndx));
+
+ // We need to compute the would-be final value of this local
+ // symbol.
+ if (!is_defined_in_discarded_section)
{
- if (comdat_behavior == CB_UNDETERMINED)
- {
- std::string name =
- arm_object->section_name(relinfo->data_shndx);
- comdat_behavior = get_comdat_behavior(name.c_str());
- }
- if (comdat_behavior == CB_PRETEND)
- {
- bool found;
- typename elfcpp::Elf_types<32>::Elf_Addr value =
- arm_object->map_to_kept_section(shndx, &found);
- if (found)
- symval.set_output_value(value + psymval->input_value());
- else
- symval.set_output_value(0);
- }
+ typedef Sized_relobj_file<32, big_endian> ObjType;
+ if (psymval->is_section_symbol())
+ symval.set_is_section_symbol();
+ typename ObjType::Compute_final_local_value_status status =
+ arm_object->compute_final_local_value(r_sym, psymval, &symval,
+ relinfo->symtab);
+ if (status == ObjType::CFLV_OK)
+ {
+ // Currently we cannot handle a branch to a target in
+ // a merged section. If this is the case, issue an error
+ // and also free the merge symbol value.
+ if (!symval.has_output_value())
+ {
+ const std::string& section_name =
+ arm_object->section_name(shndx);
+ arm_object->error(_("cannot handle branch to local %u "
+ "in a merged section %s"),
+ r_sym, section_name.c_str());
+ }
+ psymval = &symval;
+ }
else
- {
- symval.set_output_value(0);
- }
- symval.set_no_output_symtab_entry();
- psymval = &symval;
+ {
+ // We cannot determine the final value.
+ continue;
+ }
}
}
else
{
- const Symbol* gsym = arm_object->global_symbol(r_sym);
+ const Symbol* gsym;
+ gsym = arm_object->global_symbol(r_sym);
gold_assert(gsym != NULL);
if (gsym->is_forwarder())
gsym = relinfo->symtab->resolve_forwards(gsym);
sym = static_cast<const Sized_symbol<32>*>(gsym);
- if (sym->has_symtab_index())
+ if (sym->has_symtab_index() && sym->symtab_index() != -1U)
symval.set_output_symtab_index(sym->symtab_index());
else
symval.set_no_output_symtab_entry();
// Skip this if the symbol has not output section.
if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
continue;
-
symval.set_output_value(value);
+
+ if (gsym->type() == elfcpp::STT_TLS)
+ symval.set_is_tls_symbol();
+ else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
+ symval.set_is_ifunc_symbol();
psymval = &symval;
+
+ is_defined_in_discarded_section =
+ (gsym->is_defined_in_discarded_section()
+ && gsym->is_undefined());
+ shndx = 0;
+ }
+
+ Symbol_value<32> symval2;
+ if (is_defined_in_discarded_section)
+ {
+ if (comdat_behavior == CB_UNDETERMINED)
+ {
+ std::string name = arm_object->section_name(relinfo->data_shndx);
+ comdat_behavior = default_comdat_behavior.get(name.c_str());
+ }
+ if (comdat_behavior == CB_PRETEND)
+ {
+ // FIXME: This case does not work for global symbols.
+ // We have no place to store the original section index.
+ // Fortunately this does not matter for comdat sections,
+ // only for sections explicitly discarded by a linker
+ // script.
+ bool found;
+ typename elfcpp::Elf_types<32>::Elf_Addr value =
+ arm_object->map_to_kept_section(shndx, &found);
+ if (found)
+ symval2.set_output_value(value + psymval->input_value());
+ else
+ symval2.set_output_value(0);
+ }
+ else
+ {
+ if (comdat_behavior == CB_WARNING)
+ gold_warning_at_location(relinfo, i, offset,
+ _("relocation refers to discarded "
+ "section"));
+ symval2.set_output_value(0);
+ }
+ symval2.set_no_output_symtab_entry();
+ psymval = &symval2;
}
// If symbol is a section symbol, we don't know the actual type of
// Group input sections for stub generation.
//
-// We goup input sections in an output sections so that the total size,
+// We group input sections in an output section so that the total size,
// including any padding space due to alignment is smaller than GROUP_SIZE
// unless the only input section in group is bigger than GROUP_SIZE already.
// Then an ARM stub table is created to follow the last input section
// in group. For each group an ARM stub table is created an is placed
-// after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
+// after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
// extend the group after the stub table.
template<bool big_endian>
Target_arm<big_endian>::group_sections(
Layout* layout,
section_size_type group_size,
- bool stubs_always_after_branch)
+ bool stubs_always_after_branch,
+ const Task* task)
{
// Group input sections and insert stub table
Layout::Section_list section_list;
- layout->get_allocated_sections(§ion_list);
+ layout->get_executable_sections(§ion_list);
for (Layout::Section_list::const_iterator p = section_list.begin();
p != section_list.end();
++p)
Arm_output_section<big_endian>* output_section =
Arm_output_section<big_endian>::as_arm_output_section(*p);
output_section->group_sections(group_size, stubs_always_after_branch,
- this);
+ this, task);
}
}
int pass,
const Input_objects* input_objects,
Symbol_table* symtab,
- Layout* layout)
+ Layout* layout,
+ const Task* task)
{
// No need to generate stubs if this is a relocatable link.
gold_assert(!parameters->options().relocatable());
{
// Determine the stub group size. The group size is the absolute
// value of the parameter --stub-group-size. If --stub-group-size
- // is passed a negative value, we restict stubs to be always after
+ // is passed a negative value, we restrict stubs to be always after
// the stubbed branches.
int32_t stub_group_size_param =
parameters->options().stub_group_size();
stub_group_size = std::max(stub_group_size, cortex_a8_group_size);
}
- group_sections(layout, stub_group_size, stubs_always_after_branch);
-
+ group_sections(layout, stub_group_size, stubs_always_after_branch, task);
+
// Also fix .ARM.exidx section coverage.
Arm_output_section<big_endian>* exidx_output_section = NULL;
for (Layout::Section_list::const_iterator p =
if (exidx_output_section != NULL)
{
this->fix_exidx_coverage(layout, input_objects, exidx_output_section,
- symtab);
+ symtab, task);
done_exidx_fixup = true;
}
}
++sp)
(*sp)->remove_all_cortex_a8_stubs();
}
-
+
// Scan relocs for relocation stubs
for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
op != input_objects->relobj_end();
{
Arm_relobj<big_endian>* arm_relobj =
Arm_relobj<big_endian>::as_arm_relobj(*op);
+ // Lock the object so we can read from it. This is only called
+ // single-threaded from Layout::finalize, so it is OK to lock.
+ Task_lock_obj<Object> tl(task, arm_relobj);
arm_relobj->scan_sections_for_stubs(this, symtab, layout);
}
// need to update output sections, so we record all output sections needing
// update above and scan the sections here to find out what sections need
// to be updated.
- for(Layout::Section_list::const_iterator p = layout->section_list().begin();
+ for (Layout::Section_list::const_iterator p = layout->section_list().begin();
p != layout->section_list().end();
++p)
{
// symbols defined in parts of input sections that are discarded by
// relaxation.
if (arm_relobj->output_local_symbol_count_needs_update())
- arm_relobj->update_output_local_symbol_count();
+ {
+ // We need to lock the object's file to update it.
+ Task_lock_obj<Object> tl(task, arm_relobj);
+ arm_relobj->update_output_local_symbol_count();
+ }
}
}
elfcpp::Rel_write<32, big_endian> reloc_write(reloc_buffer);
reloc_write.put_r_offset(reloc_offset);
reloc_write.put_r_info(elfcpp::elf_r_info<32>(0, r_type));
- elfcpp::Rel<32, big_endian> rel(reloc_buffer);
- relocate.relocate(relinfo, this, output_section,
- this->fake_relnum_for_stubs, rel, r_type,
+ relocate.relocate(relinfo, elfcpp::SHT_REL, this, output_section,
+ this->fake_relnum_for_stubs, reloc_buffer,
NULL, &symval, view + reloc_offset,
address + reloc_offset, reloc_size);
}
{
// Reorder the known object attributes in output. We want to move
// Tag_conformance to position 4 and Tag_conformance to position 5
- // and shift eveything between 4 .. Tag_conformance - 1 to make room.
+ // and shift everything between 4 .. Tag_conformance - 1 to make room.
if (num == 4)
return elfcpp::Tag_conformance;
if (num == 5)
// Encoding T4: B<c>.W.
is_b = (insn & 0xf800d000U) == 0xf0009000U;
// Encoding T1: BL<c>.W.
- is_bl = (insn & 0xf800d000U) == 0xf000d000U;
- // Encoding T2: BLX<c>.W.
- is_blx = (insn & 0xf800d000U) == 0xf000c000U;
+ is_bl = (insn & 0xf800d000U) == 0xf000d000U;
+ // Encoding T2: BLX<c>.W.
+ is_blx = (insn & 0xf800d000U) == 0xf000c000U;
// Encoding T3: B<c>.W (not permitted in IT block).
is_bcc = ((insn & 0xf800d000U) == 0xf0008000U
&& (insn & 0x07f00000U) != 0x03800000U);
}
bool is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
-
+
// If this instruction is a 32-bit THUMB branch that crosses a 4K
// page boundary and it follows 32-bit non-branch instruction,
// we need to work around.
// Check if we have an offending branch instruction.
uint16_t upper_insn = (insn >> 16) & 0xffffU;
uint16_t lower_insn = insn & 0xffffU;
- typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
+ typedef class Arm_relocate_functions<big_endian> RelocFuncs;
if (cortex_a8_reloc != NULL
&& cortex_a8_reloc->reloc_stub() != NULL)
offset = RelocFuncs::thumb32_branch_offset(upper_insn,
lower_insn);
if (is_blx)
- offset &= ~3;
+ offset &= ~3;
stub_type = (is_blx
? arm_stub_a8_veneer_blx
// an ARM instruction. If we were not making a stub,
// the BL would have been converted to a BLX. Use the
// BLX stub instead in that case.
- if (this->may_use_blx() && force_target_arm
+ if (this->may_use_v5t_interworking() && force_target_arm
&& stub_type == arm_stub_a8_veneer_bl)
{
stub_type = arm_stub_a8_veneer_blx;
if (is_blx)
pc_for_insn &= ~3;
- // If we found a relocation, use the proper destination,
+ // If we found a relocation, use the proper destination,
// not the offset in the (unrelocated) instruction.
// Note this is always done if we switched the stub type above.
- if (cortex_a8_reloc != NULL)
- offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
+ if (cortex_a8_reloc != NULL)
+ offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
- Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
+ Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
// Add a new stub if destination address in in the same page.
- if (((address + i) & ~0xfffU) == (target & ~0xfffU))
- {
+ if (((address + i) & ~0xfffU) == (target & ~0xfffU))
+ {
Cortex_a8_stub* stub =
this->stub_factory_.make_cortex_a8_stub(stub_type,
arm_relobj, shndx,
arm_relobj->stub_table(shndx);
gold_assert(stub_table != NULL);
stub_table->add_cortex_a8_stub(address + i, stub);
- }
- }
- }
+ }
+ }
+ }
i += insn_32bit ? 4 : 2;
last_was_32bit = insn_32bit;
Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
off_t branch_offset = stub_address - (insn_address + 4);
- typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
+ typedef class Arm_relocate_functions<big_endian> RelocFuncs;
switch (stub->stub_template()->type())
{
case arm_stub_a8_veneer_b_cond:
- // For a conditional branch, we re-write it to be a uncondition
+ // For a conditional branch, we re-write it to be an unconditional
// branch to the stub. We use the THUMB-2 encoding here.
upper_insn = 0xf000U;
lower_insn = 0xb800U;
- // Fall through
+ // Fall through.
case arm_stub_a8_veneer_b:
case arm_stub_a8_veneer_bl:
case arm_stub_a8_veneer_blx:
branch_offset = (branch_offset + 2) & ~3;
// Put BRANCH_OFFSET back into the insn.
- gold_assert(!utils::has_overflow<25>(branch_offset));
+ gold_assert(!Bits<25>::has_overflow32(branch_offset));
upper_insn = RelocFuncs::thumb32_branch_upper(upper_insn, branch_offset);
lower_insn = RelocFuncs::thumb32_branch_lower(lower_insn, branch_offset);
break;
elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
}
+// Target selector for ARM. Note this is never instantiated directly.
+// It's only used in Target_selector_arm_nacl, below.
+
template<bool big_endian>
class Target_selector_arm : public Target_selector
{
public:
Target_selector_arm()
: Target_selector(elfcpp::EM_ARM, 32, big_endian,
- (big_endian ? "elf32-bigarm" : "elf32-littlearm"))
+ (big_endian ? "elf32-bigarm" : "elf32-littlearm"),
+ (big_endian ? "armelfb" : "armelf"))
{ }
Target*
Layout* layout,
const Input_objects* input_objects,
Arm_output_section<big_endian>* exidx_section,
- Symbol_table* symtab)
+ Symbol_table* symtab,
+ const Task* task)
{
// We need to look at all the input sections in output in ascending
// order of of output address. We do that by building a sorted list
if (!exidx_input_section->has_errors())
{
unsigned int text_shndx = exidx_input_section->link();
- Output_section *os = arm_relobj->output_section(text_shndx);
+ Output_section* os = arm_relobj->output_section(text_shndx);
if (os != NULL && (os->flags() & elfcpp::SHF_ALLOC) != 0)
sorted_output_sections.insert(os);
}
typedef typename Arm_output_section<big_endian>::Text_section_list
Text_section_list;
Text_section_list sorted_text_sections;
- for(typename Sorted_output_section_list::iterator p =
+ for (typename Sorted_output_section_list::iterator p =
sorted_output_sections.begin();
p != sorted_output_sections.end();
++p)
Arm_output_section<big_endian>* arm_output_section =
Arm_output_section<big_endian>::as_arm_output_section(*p);
arm_output_section->append_text_sections_to_list(&sorted_text_sections);
- }
+ }
exidx_section->fix_exidx_coverage(layout, sorted_text_sections, symtab,
- merge_exidx_entries());
+ merge_exidx_entries(), task);
+}
+
+template<bool big_endian>
+void
+Target_arm<big_endian>::do_define_standard_symbols(
+ Symbol_table* symtab,
+ Layout* layout)
+{
+ // Handle the .ARM.exidx section.
+ Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
+
+ if (exidx_section != NULL)
+ {
+ // Create __exidx_start and __exidx_end symbols.
+ symtab->define_in_output_data("__exidx_start",
+ NULL, // version
+ Symbol_table::PREDEFINED,
+ exidx_section,
+ 0, // value
+ 0, // symsize
+ elfcpp::STT_NOTYPE,
+ elfcpp::STB_GLOBAL,
+ elfcpp::STV_HIDDEN,
+ 0, // nonvis
+ false, // offset_is_from_end
+ true); // only_if_ref
+
+ symtab->define_in_output_data("__exidx_end",
+ NULL, // version
+ Symbol_table::PREDEFINED,
+ exidx_section,
+ 0, // value
+ 0, // symsize
+ elfcpp::STT_NOTYPE,
+ elfcpp::STB_GLOBAL,
+ elfcpp::STV_HIDDEN,
+ 0, // nonvis
+ true, // offset_is_from_end
+ true); // only_if_ref
+ }
+ else
+ {
+ // Define __exidx_start and __exidx_end even when .ARM.exidx
+ // section is missing to match ld's behaviour.
+ symtab->define_as_constant("__exidx_start", NULL,
+ Symbol_table::PREDEFINED,
+ 0, 0, elfcpp::STT_OBJECT,
+ elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
+ true, false);
+ symtab->define_as_constant("__exidx_end", NULL,
+ Symbol_table::PREDEFINED,
+ 0, 0, elfcpp::STT_OBJECT,
+ elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
+ true, false);
+ }
+}
+
+// NaCl variant. It uses different PLT contents.
+
+template<bool big_endian>
+class Output_data_plt_arm_nacl;
+
+template<bool big_endian>
+class Target_arm_nacl : public Target_arm<big_endian>
+{
+ public:
+ Target_arm_nacl()
+ : Target_arm<big_endian>(&arm_nacl_info)
+ { }
+
+ protected:
+ virtual Output_data_plt_arm<big_endian>*
+ do_make_data_plt(
+ Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ { return new Output_data_plt_arm_nacl<big_endian>(
+ layout, got, got_plt, got_irelative); }
+
+ private:
+ static const Target::Target_info arm_nacl_info;
+};
+
+template<bool big_endian>
+const Target::Target_info Target_arm_nacl<big_endian>::arm_nacl_info =
+{
+ 32, // size
+ big_endian, // is_big_endian
+ elfcpp::EM_ARM, // machine_code
+ false, // has_make_symbol
+ false, // has_resolve
+ false, // has_code_fill
+ true, // is_default_stack_executable
+ false, // can_icf_inline_merge_sections
+ '\0', // wrap_char
+ "/lib/ld-nacl-arm.so.1", // dynamic_linker
+ 0x20000, // default_text_segment_address
+ 0x10000, // abi_pagesize (overridable by -z max-page-size)
+ 0x10000, // common_pagesize (overridable by -z common-page-size)
+ true, // isolate_execinstr
+ 0x10000000, // rosegment_gap
+ elfcpp::SHN_UNDEF, // small_common_shndx
+ elfcpp::SHN_UNDEF, // large_common_shndx
+ 0, // small_common_section_flags
+ 0, // large_common_section_flags
+ ".ARM.attributes", // attributes_section
+ "aeabi", // attributes_vendor
+ "_start", // entry_symbol_name
+ 32, // hash_entry_size
+};
+
+template<bool big_endian>
+class Output_data_plt_arm_nacl : public Output_data_plt_arm<big_endian>
+{
+ public:
+ Output_data_plt_arm_nacl(
+ Layout* layout,
+ Arm_output_data_got<big_endian>* got,
+ Output_data_space* got_plt,
+ Output_data_space* got_irelative)
+ : Output_data_plt_arm<big_endian>(layout, 16, got, got_plt, got_irelative)
+ { }
+
+ protected:
+ // Return the offset of the first non-reserved PLT entry.
+ virtual unsigned int
+ do_first_plt_entry_offset() const
+ { return sizeof(first_plt_entry); }
+
+ // Return the size of a PLT entry.
+ virtual unsigned int
+ do_get_plt_entry_size() const
+ { return sizeof(plt_entry); }
+
+ virtual void
+ do_fill_first_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address);
+
+ virtual void
+ do_fill_plt_entry(unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset);
+
+ private:
+ inline uint32_t arm_movw_immediate(uint32_t value)
+ {
+ return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
+ }
+
+ inline uint32_t arm_movt_immediate(uint32_t value)
+ {
+ return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
+ }
+
+ // Template for the first PLT entry.
+ static const uint32_t first_plt_entry[16];
+
+ // Template for subsequent PLT entries.
+ static const uint32_t plt_entry[4];
+};
+
+// The first entry in the PLT.
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_nacl<big_endian>::first_plt_entry[16] =
+{
+ // First bundle:
+ 0xe300c000, // movw ip, #:lower16:&GOT[2]-.+8
+ 0xe340c000, // movt ip, #:upper16:&GOT[2]-.+8
+ 0xe08cc00f, // add ip, ip, pc
+ 0xe52dc008, // str ip, [sp, #-8]!
+ // Second bundle:
+ 0xe3ccc103, // bic ip, ip, #0xc0000000
+ 0xe59cc000, // ldr ip, [ip]
+ 0xe3ccc13f, // bic ip, ip, #0xc000000f
+ 0xe12fff1c, // bx ip
+ // Third bundle:
+ 0xe320f000, // nop
+ 0xe320f000, // nop
+ 0xe320f000, // nop
+ // .Lplt_tail:
+ 0xe50dc004, // str ip, [sp, #-4]
+ // Fourth bundle:
+ 0xe3ccc103, // bic ip, ip, #0xc0000000
+ 0xe59cc000, // ldr ip, [ip]
+ 0xe3ccc13f, // bic ip, ip, #0xc000000f
+ 0xe12fff1c, // bx ip
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_nacl<big_endian>::do_fill_first_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address)
+{
+ // Write first PLT entry. All but first two words are constants.
+ const size_t num_first_plt_words = (sizeof(first_plt_entry)
+ / sizeof(first_plt_entry[0]));
+
+ int32_t got_displacement = got_address + 8 - (plt_address + 16);
+
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 0, first_plt_entry[0] | arm_movw_immediate (got_displacement));
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 4, first_plt_entry[1] | arm_movt_immediate (got_displacement));
+
+ for (size_t i = 2; i < num_first_plt_words; ++i)
+ elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
+}
+
+// Subsequent entries in the PLT.
+
+template<bool big_endian>
+const uint32_t Output_data_plt_arm_nacl<big_endian>::plt_entry[4] =
+{
+ 0xe300c000, // movw ip, #:lower16:&GOT[n]-.+8
+ 0xe340c000, // movt ip, #:upper16:&GOT[n]-.+8
+ 0xe08cc00f, // add ip, ip, pc
+ 0xea000000, // b .Lplt_tail
+};
+
+template<bool big_endian>
+void
+Output_data_plt_arm_nacl<big_endian>::do_fill_plt_entry(
+ unsigned char* pov,
+ Arm_address got_address,
+ Arm_address plt_address,
+ unsigned int got_offset,
+ unsigned int plt_offset)
+{
+ // Calculate the displacement between the PLT slot and the
+ // common tail that's part of the special initial PLT slot.
+ int32_t tail_displacement = (plt_address + (11 * sizeof(uint32_t))
+ - (plt_address + plt_offset
+ + sizeof(plt_entry) + sizeof(uint32_t)));
+ gold_assert((tail_displacement & 3) == 0);
+ tail_displacement >>= 2;
+
+ gold_assert ((tail_displacement & 0xff000000) == 0
+ || (-tail_displacement & 0xff000000) == 0);
+
+ // Calculate the displacement between the PLT slot and the entry
+ // in the GOT. The offset accounts for the value produced by
+ // adding to pc in the penultimate instruction of the PLT stub.
+ const int32_t got_displacement = (got_address + got_offset
+ - (plt_address + sizeof(plt_entry)));
+
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 0, plt_entry[0] | arm_movw_immediate (got_displacement));
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 4, plt_entry[1] | arm_movt_immediate (got_displacement));
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 8, plt_entry[2]);
+ elfcpp::Swap<32, big_endian>::writeval
+ (pov + 12, plt_entry[3] | (tail_displacement & 0x00ffffff));
}
-Target_selector_arm<false> target_selector_arm;
-Target_selector_arm<true> target_selector_armbe;
+// Target selectors.
+
+template<bool big_endian>
+class Target_selector_arm_nacl
+ : public Target_selector_nacl<Target_selector_arm<big_endian>,
+ Target_arm_nacl<big_endian> >
+{
+ public:
+ Target_selector_arm_nacl()
+ : Target_selector_nacl<Target_selector_arm<big_endian>,
+ Target_arm_nacl<big_endian> >(
+ "arm",
+ big_endian ? "elf32-bigarm-nacl" : "elf32-littlearm-nacl",
+ big_endian ? "armelfb_nacl" : "armelf_nacl")
+ { }
+};
+
+Target_selector_arm_nacl<false> target_selector_arm;
+Target_selector_arm_nacl<true> target_selector_armbe;
} // End anonymous namespace.