1 /* AArch64-specific support for NN-bit ELF.
2 Copyright 2009-2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "elf/aarch64.h"
150 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
151 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
152 #define LOG_FILE_ALIGN 3
156 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
157 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
158 #define LOG_FILE_ALIGN 2
161 static bfd_reloc_status_type
162 bfd_elf_aarch64_put_addend (bfd *abfd,
164 reloc_howto_type *howto, bfd_signed_vma addend);
166 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
167 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
168 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
170 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
171 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
173 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
174 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
175 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
176 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
177 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
178 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
179 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
180 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
181 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
182 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
183 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
184 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
185 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
187 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
188 ((R_TYPE) == R_AARCH64_TLSDESC_LD_PREL19 \
189 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
190 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE21 \
191 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
192 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
193 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
194 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
195 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
196 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
197 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
198 || (R_TYPE) == R_AARCH64_TLSDESC)
200 #define ELIMINATE_COPY_RELOCS 0
202 /* Return size of a relocation entry. HTAB is the bfd's
203 elf_aarch64_link_hash_entry. */
204 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
206 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
207 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
208 #define PLT_ENTRY_SIZE (32)
209 #define PLT_SMALL_ENTRY_SIZE (16)
210 #define PLT_TLSDESC_ENTRY_SIZE (32)
212 /* Take the PAGE component of an address or offset. */
213 #define PG(x) ((x) & ~ 0xfff)
214 #define PG_OFFSET(x) ((x) & 0xfff)
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
232 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
233 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
234 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
237 0x1f, 0x20, 0x03, 0xd5, /* nop */
240 /* Per function entry in a procedure linkage table looks like this
241 if the distance between the PLTGOT and the PLT is < 4GB use
242 these PLT entries. */
243 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
245 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
246 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
247 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
248 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
251 static const bfd_byte
252 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
254 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
255 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
256 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
257 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
258 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
259 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
261 0x1f, 0x20, 0x03, 0xd5, /* nop */
264 #define elf_info_to_howto elfNN_aarch64_info_to_howto
265 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
267 #define AARCH64_ELF_ABI_VERSION 0
269 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270 #define ALL_ONES (~ (bfd_vma) 0)
272 static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
277 FALSE, /* pc_relative */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
285 FALSE); /* pcrel_offset */
287 static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
289 HOWTO (R_AARCH64_COPY, /* type */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
293 FALSE, /* pc_relative */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
307 FALSE, /* pc_relative */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
321 FALSE, /* pc_relative */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
331 HOWTO (R_AARCH64_RELATIVE, /* type */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
335 FALSE, /* pc_relative */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
349 FALSE, /* pc_relative */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
363 FALSE, /* pc_relative */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE, /* pc_relative */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
387 HOWTO (R_AARCH64_TLSDESC, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE, /* pc_relative */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
403 /* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
407 static reloc_howto_type elf64_aarch64_howto_table[] =
409 /* Basic data relocations. */
411 HOWTO (R_AARCH64_NULL, /* type */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
415 FALSE, /* pc_relative */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
423 FALSE), /* pcrel_offset */
426 HOWTO (R_AARCH64_ABS64, /* type */
428 4, /* size (4 = long long) */
430 FALSE, /* pc_relative */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
441 HOWTO (R_AARCH64_ABS32, /* type */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
445 FALSE, /* pc_relative */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
456 HOWTO (R_AARCH64_ABS16, /* type */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE, /* pc_relative */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
473 4, /* size (4 = long long) */
475 TRUE, /* pc_relative */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
486 HOWTO (R_AARCH64_PREL32, /* type */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
490 TRUE, /* pc_relative */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
501 HOWTO (R_AARCH64_PREL16, /* type */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
505 TRUE, /* pc_relative */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
523 FALSE, /* pc_relative */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
538 FALSE, /* pc_relative */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
553 FALSE, /* pc_relative */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 FALSE, /* pc_relative */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
583 FALSE, /* pc_relative */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE, /* pc_relative */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
613 FALSE, /* pc_relative */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
632 FALSE, /* pc_relative */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
647 FALSE, /* pc_relative */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
662 FALSE, /* pc_relative */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
672 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
695 TRUE, /* pc_relative */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 TRUE, /* pc_relative */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
725 TRUE, /* pc_relative */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 FALSE, /* pc_relative */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE, /* pc_relative */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
765 /* Relocations for control-flow instructions. */
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
772 TRUE, /* pc_relative */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
787 TRUE, /* pc_relative */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
804 TRUE, /* pc_relative */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
819 TRUE, /* pc_relative */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
834 FALSE, /* pc_relative */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
849 FALSE, /* pc_relative */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
864 FALSE, /* pc_relative */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
892 FALSE, /* pc_relative */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
916 2, /* size (0 = byte,1 = short,2 = long) */
918 TRUE, /* pc_relative */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
936 TRUE, /* pc_relative */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
946 /* LDNN: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LDNN_GOT_LO12_NC, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 FALSE, /* pc_relative */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LDNN_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
962 static reloc_howto_type elf64_aarch64_tls_howto_table[] =
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
972 TRUE, /* pc_relative */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
987 FALSE, /* pc_relative */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 FALSE, /* pc_relative */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 FALSE, /* pc_relative */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 FALSE, /* pc_relative */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 FALSE, /* pc_relative */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 FALSE, /* pc_relative */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
1093 32, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 FALSE, /* pc_relative */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
1107 16, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 FALSE, /* pc_relative */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
1121 16, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 FALSE, /* pc_relative */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 FALSE, /* pc_relative */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 FALSE, /* pc_relative */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
1163 12, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 FALSE, /* pc_relative */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 FALSE, /* pc_relative */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 FALSE, /* pc_relative */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1205 static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1207 HOWTO (R_AARCH64_TLSDESC_LD_PREL19, /* type */
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1211 TRUE, /* pc_relative */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1225 TRUE, /* pc_relative */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE21, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 TRUE, /* pc_relative */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE21", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1256 FALSE, /* pc_relative */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1271 FALSE, /* pc_relative */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
1282 16, /* rightshift */
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1285 FALSE, /* pc_relative */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1299 FALSE, /* pc_relative */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1313 FALSE, /* pc_relative */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1321 FALSE), /* pcrel_offset */
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1327 FALSE, /* pc_relative */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1335 FALSE), /* pcrel_offset */
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1341 FALSE, /* pc_relative */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1349 FALSE), /* pcrel_offset */
1352 static reloc_howto_type *
1353 elfNN_aarch64_howto_from_type (unsigned int r_type)
1358 bfd_set_error (bfd_error_bad_value);
1363 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1364 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1366 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1367 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1369 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1370 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1372 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1373 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1377 case R_AARCH64_NONE:
1378 return &elf64_aarch64_howto_none;
1381 bfd_set_error (bfd_error_bad_value);
1386 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1387 Elf_Internal_Rela *elf_reloc)
1389 unsigned int r_type;
1391 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1392 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1395 struct elf64_aarch64_reloc_map
1397 bfd_reloc_code_real_type bfd_reloc_val;
1398 unsigned int elf_reloc_val;
1401 /* All entries in this list must also be present in
1402 elf64_aarch64_howto_table. */
1403 static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1405 {BFD_RELOC_NONE, R_AARCH64_NONE},
1407 /* Basic data relocations. */
1408 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1409 {BFD_RELOC_64, R_AARCH64_ABS64},
1410 {BFD_RELOC_32, R_AARCH64_ABS32},
1411 {BFD_RELOC_16, R_AARCH64_ABS16},
1412 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1413 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1414 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1416 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1418 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1419 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1420 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1422 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1423 signed value inline. */
1424 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1425 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1426 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1428 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1429 unsigned value inline. */
1430 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1431 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1432 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1433 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1435 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1436 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1437 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1438 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1439 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1440 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1441 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1442 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1443 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1444 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1445 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1447 /* Relocations for control-flow instructions. */
1448 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1449 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1450 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1451 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1453 /* Relocations for PIC. */
1454 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
1455 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1456 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1458 /* Relocations for TLS. */
1459 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1460 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1462 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1463 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1464 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1465 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1466 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1467 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1468 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1469 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1470 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1471 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1472 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1473 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1474 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1475 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1476 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1477 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1478 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1479 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1480 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1481 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1482 {BFD_RELOC_AARCH64_TLSDESC_LD_PREL19, R_AARCH64_TLSDESC_LD_PREL19},
1483 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1484 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21, R_AARCH64_TLSDESC_ADR_PAGE21},
1485 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1486 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1487 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1488 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1489 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1490 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1491 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1492 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1493 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1494 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1495 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1498 static reloc_howto_type *
1499 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1500 bfd_reloc_code_real_type code)
1504 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1505 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1506 return elf64_aarch64_howto_from_type
1507 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1509 bfd_set_error (bfd_error_bad_value);
1513 static reloc_howto_type *
1514 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1519 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1520 if (elf64_aarch64_howto_table[i].name != NULL
1521 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1522 return &elf64_aarch64_howto_table[i];
1527 /* Support for core dump NOTE sections. */
1530 elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1535 switch (note->descsz)
1540 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1542 elf_tdata (abfd)->core->signal
1543 = bfd_get_16 (abfd, note->descdata + 12);
1546 elf_tdata (abfd)->core->lwpid
1547 = bfd_get_32 (abfd, note->descdata + 32);
1556 /* Make a ".reg/999" section. */
1557 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1558 size, note->descpos + offset);
1561 #define TARGET_LITTLE_SYM bfd_elfNN_littleaarch64_vec
1562 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1563 #define TARGET_BIG_SYM bfd_elfNN_bigaarch64_vec
1564 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1566 #define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1568 typedef unsigned long int insn32;
1570 /* The linker script knows the section names for placement.
1571 The entry_names are used to do simple name mangling on the stubs.
1572 Given a function name, and its type, the stub can be found. The
1573 name can be changed. The only requirement is the %s be present. */
1574 #define STUB_ENTRY_NAME "__%s_veneer"
1576 /* The name of the dynamic interpreter. This is put in the .interp
1578 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1580 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1581 (((1 << 25) - 1) << 2)
1582 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1585 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1586 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1589 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1591 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1592 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1596 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1598 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1599 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1600 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1603 static const uint32_t aarch64_adrp_branch_stub [] =
1605 0x90000010, /* adrp ip0, X */
1606 /* R_AARCH64_ADR_HI21_PCREL(X) */
1607 0x91000210, /* add ip0, ip0, :lo12:X */
1608 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1609 0xd61f0200, /* br ip0 */
1612 static const uint32_t aarch64_long_branch_stub[] =
1615 0x58000090, /* ldr ip0, 1f */
1617 0x18000090, /* ldr wip0, 1f */
1619 0x10000011, /* adr ip1, #0 */
1620 0x8b110210, /* add ip0, ip0, ip1 */
1621 0xd61f0200, /* br ip0 */
1622 0x00000000, /* 1: .xword or .word
1623 R_AARCH64_PRELNN(X) + 12
1628 /* Section name for stubs is the associated section name plus this
1630 #define STUB_SUFFIX ".stub"
1632 enum elf_aarch64_stub_type
1635 aarch64_stub_adrp_branch,
1636 aarch64_stub_long_branch,
1639 struct elf_aarch64_stub_hash_entry
1641 /* Base hash table entry structure. */
1642 struct bfd_hash_entry root;
1644 /* The stub section. */
1647 /* Offset within stub_sec of the beginning of this stub. */
1648 bfd_vma stub_offset;
1650 /* Given the symbol's value and its section we can determine its final
1651 value when building the stubs (so the stub knows where to jump). */
1652 bfd_vma target_value;
1653 asection *target_section;
1655 enum elf_aarch64_stub_type stub_type;
1657 /* The symbol table entry, if any, that this was derived from. */
1658 struct elf_aarch64_link_hash_entry *h;
1660 /* Destination symbol type */
1661 unsigned char st_type;
1663 /* Where this stub is being called from, or, in the case of combined
1664 stub sections, the first input section in the group. */
1667 /* The name for the local symbol at the start of this stub. The
1668 stub name in the hash table has to be unique; this does not, so
1669 it can be friendlier. */
1673 /* Used to build a map of a section. This is required for mixed-endian
1676 typedef struct elf_elf_section_map
1681 elf_aarch64_section_map;
1684 typedef struct _aarch64_elf_section_data
1686 struct bfd_elf_section_data elf;
1687 unsigned int mapcount;
1688 unsigned int mapsize;
1689 elf_aarch64_section_map *map;
1691 _aarch64_elf_section_data;
1693 #define elf_aarch64_section_data(sec) \
1694 ((_aarch64_elf_section_data *) elf_section_data (sec))
1696 /* The size of the thread control block. */
1699 struct elf_aarch64_local_symbol
1701 unsigned int got_type;
1702 bfd_signed_vma got_refcount;
1705 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1706 offset is from the end of the jump table and reserved entries
1709 The magic value (bfd_vma) -1 indicates that an offset has not be
1711 bfd_vma tlsdesc_got_jump_table_offset;
1714 struct elf_aarch64_obj_tdata
1716 struct elf_obj_tdata root;
1718 /* local symbol descriptors */
1719 struct elf_aarch64_local_symbol *locals;
1721 /* Zero to warn when linking objects with incompatible enum sizes. */
1722 int no_enum_size_warning;
1724 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1725 int no_wchar_size_warning;
1728 #define elf_aarch64_tdata(bfd) \
1729 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1731 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1733 #define is_aarch64_elf(bfd) \
1734 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1735 && elf_tdata (bfd) != NULL \
1736 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1739 elfNN_aarch64_mkobject (bfd *abfd)
1741 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1745 #define elf_aarch64_hash_entry(ent) \
1746 ((struct elf_aarch64_link_hash_entry *)(ent))
1748 #define GOT_UNKNOWN 0
1749 #define GOT_NORMAL 1
1750 #define GOT_TLS_GD 2
1751 #define GOT_TLS_IE 4
1752 #define GOT_TLSDESC_GD 8
1754 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1756 /* AArch64 ELF linker hash entry. */
1757 struct elf_aarch64_link_hash_entry
1759 struct elf_link_hash_entry root;
1761 /* Track dynamic relocs copied for this symbol. */
1762 struct elf_dyn_relocs *dyn_relocs;
1764 /* Since PLT entries have variable size, we need to record the
1765 index into .got.plt instead of recomputing it from the PLT
1767 bfd_signed_vma plt_got_offset;
1769 /* Bit mask representing the type of GOT entry(s) if any required by
1771 unsigned int got_type;
1773 /* A pointer to the most recently used stub hash entry against this
1775 struct elf_aarch64_stub_hash_entry *stub_cache;
1777 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1778 is from the end of the jump table and reserved entries within the PLTGOT.
1780 The magic value (bfd_vma) -1 indicates that an offset has not
1782 bfd_vma tlsdesc_got_jump_table_offset;
1786 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1788 unsigned long r_symndx)
1791 return elf_aarch64_hash_entry (h)->got_type;
1793 if (! elf_aarch64_locals (abfd))
1796 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1799 /* Get the AArch64 elf linker hash table from a link_info structure. */
1800 #define elf_aarch64_hash_table(info) \
1801 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1803 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1804 ((struct elf_aarch64_stub_hash_entry *) \
1805 bfd_hash_lookup ((table), (string), (create), (copy)))
1807 /* AArch64 ELF linker hash table. */
1808 struct elf_aarch64_link_hash_table
1810 /* The main hash table. */
1811 struct elf_link_hash_table root;
1813 /* Nonzero to force PIC branch veneers. */
1816 /* The number of bytes in the initial entry in the PLT. */
1817 bfd_size_type plt_header_size;
1819 /* The number of bytes in the subsequent PLT etries. */
1820 bfd_size_type plt_entry_size;
1822 /* Short-cuts to get to dynamic linker sections. */
1826 /* Small local sym cache. */
1827 struct sym_cache sym_cache;
1829 /* For convenience in allocate_dynrelocs. */
1832 /* The amount of space used by the reserved portion of the sgotplt
1833 section, plus whatever space is used by the jump slots. */
1834 bfd_vma sgotplt_jump_table_size;
1836 /* The stub hash table. */
1837 struct bfd_hash_table stub_hash_table;
1839 /* Linker stub bfd. */
1842 /* Linker call-backs. */
1843 asection *(*add_stub_section) (const char *, asection *);
1844 void (*layout_sections_again) (void);
1846 /* Array to keep track of which stub sections have been created, and
1847 information on stub grouping. */
1850 /* This is the section to which stubs in the group will be
1853 /* The stub section. */
1857 /* Assorted information used by elfNN_aarch64_size_stubs. */
1858 unsigned int bfd_count;
1860 asection **input_list;
1862 /* The offset into splt of the PLT entry for the TLS descriptor
1863 resolver. Special values are 0, if not necessary (or not found
1864 to be necessary yet), and -1 if needed but not determined
1866 bfd_vma tlsdesc_plt;
1868 /* The GOT offset for the lazy trampoline. Communicated to the
1869 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1870 indicates an offset is not allocated. */
1871 bfd_vma dt_tlsdesc_got;
1875 /* Return non-zero if the indicated VALUE has overflowed the maximum
1876 range expressible by a unsigned number with the indicated number of
1879 static bfd_reloc_status_type
1880 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1883 if (bits >= sizeof (bfd_vma) * 8)
1884 return bfd_reloc_ok;
1885 lim = (bfd_vma) 1 << bits;
1887 return bfd_reloc_overflow;
1888 return bfd_reloc_ok;
1892 /* Return non-zero if the indicated VALUE has overflowed the maximum
1893 range expressible by an signed number with the indicated number of
1896 static bfd_reloc_status_type
1897 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1899 bfd_signed_vma svalue = (bfd_signed_vma) value;
1902 if (bits >= sizeof (bfd_vma) * 8)
1903 return bfd_reloc_ok;
1904 lim = (bfd_signed_vma) 1 << (bits - 1);
1905 if (svalue < -lim || svalue >= lim)
1906 return bfd_reloc_overflow;
1907 return bfd_reloc_ok;
1910 /* Create an entry in an AArch64 ELF linker hash table. */
1912 static struct bfd_hash_entry *
1913 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1914 struct bfd_hash_table *table,
1917 struct elf_aarch64_link_hash_entry *ret =
1918 (struct elf_aarch64_link_hash_entry *) entry;
1920 /* Allocate the structure if it has not already been allocated by a
1923 ret = bfd_hash_allocate (table,
1924 sizeof (struct elf_aarch64_link_hash_entry));
1926 return (struct bfd_hash_entry *) ret;
1928 /* Call the allocation method of the superclass. */
1929 ret = ((struct elf_aarch64_link_hash_entry *)
1930 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1934 ret->dyn_relocs = NULL;
1935 ret->got_type = GOT_UNKNOWN;
1936 ret->plt_got_offset = (bfd_vma) - 1;
1937 ret->stub_cache = NULL;
1938 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1941 return (struct bfd_hash_entry *) ret;
1944 /* Initialize an entry in the stub hash table. */
1946 static struct bfd_hash_entry *
1947 stub_hash_newfunc (struct bfd_hash_entry *entry,
1948 struct bfd_hash_table *table, const char *string)
1950 /* Allocate the structure if it has not already been allocated by a
1954 entry = bfd_hash_allocate (table,
1956 elf_aarch64_stub_hash_entry));
1961 /* Call the allocation method of the superclass. */
1962 entry = bfd_hash_newfunc (entry, table, string);
1965 struct elf_aarch64_stub_hash_entry *eh;
1967 /* Initialize the local fields. */
1968 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1969 eh->stub_sec = NULL;
1970 eh->stub_offset = 0;
1971 eh->target_value = 0;
1972 eh->target_section = NULL;
1973 eh->stub_type = aarch64_stub_none;
1982 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1985 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
1986 struct elf_link_hash_entry *dir,
1987 struct elf_link_hash_entry *ind)
1989 struct elf_aarch64_link_hash_entry *edir, *eind;
1991 edir = (struct elf_aarch64_link_hash_entry *) dir;
1992 eind = (struct elf_aarch64_link_hash_entry *) ind;
1994 if (eind->dyn_relocs != NULL)
1996 if (edir->dyn_relocs != NULL)
1998 struct elf_dyn_relocs **pp;
1999 struct elf_dyn_relocs *p;
2001 /* Add reloc counts against the indirect sym to the direct sym
2002 list. Merge any entries against the same section. */
2003 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2005 struct elf_dyn_relocs *q;
2007 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2008 if (q->sec == p->sec)
2010 q->pc_count += p->pc_count;
2011 q->count += p->count;
2018 *pp = edir->dyn_relocs;
2021 edir->dyn_relocs = eind->dyn_relocs;
2022 eind->dyn_relocs = NULL;
2025 if (ind->root.type == bfd_link_hash_indirect)
2027 /* Copy over PLT info. */
2028 if (dir->got.refcount <= 0)
2030 edir->got_type = eind->got_type;
2031 eind->got_type = GOT_UNKNOWN;
2035 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2038 /* Create an AArch64 elf linker hash table. */
2040 static struct bfd_link_hash_table *
2041 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2043 struct elf_aarch64_link_hash_table *ret;
2044 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2046 ret = bfd_zmalloc (amt);
2050 if (!_bfd_elf_link_hash_table_init
2051 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2052 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2058 ret->plt_header_size = PLT_ENTRY_SIZE;
2059 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2061 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2063 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2064 sizeof (struct elf_aarch64_stub_hash_entry)))
2070 return &ret->root.root;
2073 /* Free the derived linker hash table. */
2076 elfNN_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2078 struct elf_aarch64_link_hash_table *ret
2079 = (struct elf_aarch64_link_hash_table *) hash;
2081 bfd_hash_table_free (&ret->stub_hash_table);
2082 _bfd_elf_link_hash_table_free (hash);
2086 aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2087 bfd_vma addend, bfd_boolean weak_undef_p)
2091 case R_AARCH64_TLSDESC_CALL:
2092 case R_AARCH64_NONE:
2093 case R_AARCH64_NULL:
2096 case R_AARCH64_ADR_PREL_LO21:
2097 case R_AARCH64_CONDBR19:
2098 case R_AARCH64_LD_PREL_LO19:
2099 case R_AARCH64_PREL16:
2100 case R_AARCH64_PREL32:
2101 case R_AARCH64_PREL64:
2102 case R_AARCH64_TSTBR14:
2105 value = value + addend - place;
2108 case R_AARCH64_CALL26:
2109 case R_AARCH64_JUMP26:
2110 value = value + addend - place;
2113 case R_AARCH64_ABS16:
2114 case R_AARCH64_ABS32:
2115 case R_AARCH64_MOVW_SABS_G0:
2116 case R_AARCH64_MOVW_SABS_G1:
2117 case R_AARCH64_MOVW_SABS_G2:
2118 case R_AARCH64_MOVW_UABS_G0:
2119 case R_AARCH64_MOVW_UABS_G0_NC:
2120 case R_AARCH64_MOVW_UABS_G1:
2121 case R_AARCH64_MOVW_UABS_G1_NC:
2122 case R_AARCH64_MOVW_UABS_G2:
2123 case R_AARCH64_MOVW_UABS_G2_NC:
2124 case R_AARCH64_MOVW_UABS_G3:
2125 value = value + addend;
2128 case R_AARCH64_ADR_PREL_PG_HI21:
2129 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2132 value = PG (value + addend) - PG (place);
2135 case R_AARCH64_GOT_LD_PREL19:
2136 value = value + addend - place;
2139 case R_AARCH64_ADR_GOT_PAGE:
2140 case R_AARCH64_TLSDESC_ADR_PAGE21:
2141 case R_AARCH64_TLSGD_ADR_PAGE21:
2142 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2143 value = PG (value + addend) - PG (place);
2146 case R_AARCH64_ADD_ABS_LO12_NC:
2147 case R_AARCH64_LD64_GOT_LO12_NC:
2148 case R_AARCH64_LDST8_ABS_LO12_NC:
2149 case R_AARCH64_LDST16_ABS_LO12_NC:
2150 case R_AARCH64_LDST32_ABS_LO12_NC:
2151 case R_AARCH64_LDST64_ABS_LO12_NC:
2152 case R_AARCH64_LDST128_ABS_LO12_NC:
2153 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2154 case R_AARCH64_TLSDESC_ADD:
2155 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2156 case R_AARCH64_TLSDESC_LDR:
2157 case R_AARCH64_TLSGD_ADD_LO12_NC:
2158 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2159 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2160 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2161 value = PG_OFFSET (value + addend);
2164 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2165 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2166 value = (value + addend) & (bfd_vma) 0xffff0000;
2168 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2169 value = (value + addend) & (bfd_vma) 0xfff000;
2172 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2173 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2174 value = (value + addend) & (bfd_vma) 0xffff;
2177 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2178 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2179 value -= place & ~(bfd_vma) 0xffffffff;
2186 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2187 bfd_vma offset, bfd_vma value)
2189 reloc_howto_type *howto;
2192 howto = elfNN_aarch64_howto_from_type (r_type);
2193 place = (input_section->output_section->vma + input_section->output_offset
2195 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2196 return bfd_elf_aarch64_put_addend (input_bfd,
2197 input_section->contents + offset,
2201 static enum elf_aarch64_stub_type
2202 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2204 if (aarch64_valid_for_adrp_p (value, place))
2205 return aarch64_stub_adrp_branch;
2206 return aarch64_stub_long_branch;
2209 /* Determine the type of stub needed, if any, for a call. */
2211 static enum elf_aarch64_stub_type
2212 aarch64_type_of_stub (struct bfd_link_info *info,
2213 asection *input_sec,
2214 const Elf_Internal_Rela *rel,
2215 unsigned char st_type,
2216 struct elf_aarch64_link_hash_entry *hash,
2217 bfd_vma destination)
2220 bfd_signed_vma branch_offset;
2221 unsigned int r_type;
2222 struct elf_aarch64_link_hash_table *globals;
2223 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2224 bfd_boolean via_plt_p;
2226 if (st_type != STT_FUNC)
2229 globals = elf_aarch64_hash_table (info);
2230 via_plt_p = (globals->root.splt != NULL && hash != NULL
2231 && hash->root.plt.offset != (bfd_vma) - 1);
2236 /* Determine where the call point is. */
2237 location = (input_sec->output_offset
2238 + input_sec->output_section->vma + rel->r_offset);
2240 branch_offset = (bfd_signed_vma) (destination - location);
2242 r_type = ELFNN_R_TYPE (rel->r_info);
2244 /* We don't want to redirect any old unconditional jump in this way,
2245 only one which is being used for a sibcall, where it is
2246 acceptable for the IP0 and IP1 registers to be clobbered. */
2247 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2248 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2249 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2251 stub_type = aarch64_stub_long_branch;
2257 /* Build a name for an entry in the stub hash table. */
2260 elfNN_aarch64_stub_name (const asection *input_section,
2261 const asection *sym_sec,
2262 const struct elf_aarch64_link_hash_entry *hash,
2263 const Elf_Internal_Rela *rel)
2270 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2271 stub_name = bfd_malloc (len);
2272 if (stub_name != NULL)
2273 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2274 (unsigned int) input_section->id,
2275 hash->root.root.root.string,
2280 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2281 stub_name = bfd_malloc (len);
2282 if (stub_name != NULL)
2283 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2284 (unsigned int) input_section->id,
2285 (unsigned int) sym_sec->id,
2286 (unsigned int) ELFNN_R_SYM (rel->r_info),
2293 /* Look up an entry in the stub hash. Stub entries are cached because
2294 creating the stub name takes a bit of time. */
2296 static struct elf_aarch64_stub_hash_entry *
2297 elfNN_aarch64_get_stub_entry (const asection *input_section,
2298 const asection *sym_sec,
2299 struct elf_link_hash_entry *hash,
2300 const Elf_Internal_Rela *rel,
2301 struct elf_aarch64_link_hash_table *htab)
2303 struct elf_aarch64_stub_hash_entry *stub_entry;
2304 struct elf_aarch64_link_hash_entry *h =
2305 (struct elf_aarch64_link_hash_entry *) hash;
2306 const asection *id_sec;
2308 if ((input_section->flags & SEC_CODE) == 0)
2311 /* If this input section is part of a group of sections sharing one
2312 stub section, then use the id of the first section in the group.
2313 Stub names need to include a section id, as there may well be
2314 more than one stub used to reach say, printf, and we need to
2315 distinguish between them. */
2316 id_sec = htab->stub_group[input_section->id].link_sec;
2318 if (h != NULL && h->stub_cache != NULL
2319 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2321 stub_entry = h->stub_cache;
2327 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2328 if (stub_name == NULL)
2331 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2332 stub_name, FALSE, FALSE);
2334 h->stub_cache = stub_entry;
2342 /* Add a new stub entry to the stub hash. Not all fields of the new
2343 stub entry are initialised. */
2345 static struct elf_aarch64_stub_hash_entry *
2346 elfNN_aarch64_add_stub (const char *stub_name,
2348 struct elf_aarch64_link_hash_table *htab)
2352 struct elf_aarch64_stub_hash_entry *stub_entry;
2354 link_sec = htab->stub_group[section->id].link_sec;
2355 stub_sec = htab->stub_group[section->id].stub_sec;
2356 if (stub_sec == NULL)
2358 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2359 if (stub_sec == NULL)
2365 namelen = strlen (link_sec->name);
2366 len = namelen + sizeof (STUB_SUFFIX);
2367 s_name = bfd_alloc (htab->stub_bfd, len);
2371 memcpy (s_name, link_sec->name, namelen);
2372 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2373 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2374 if (stub_sec == NULL)
2376 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2378 htab->stub_group[section->id].stub_sec = stub_sec;
2381 /* Enter this entry into the linker stub hash table. */
2382 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2384 if (stub_entry == NULL)
2386 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2387 section->owner, stub_name);
2391 stub_entry->stub_sec = stub_sec;
2392 stub_entry->stub_offset = 0;
2393 stub_entry->id_sec = link_sec;
2399 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2400 void *in_arg ATTRIBUTE_UNUSED)
2402 struct elf_aarch64_stub_hash_entry *stub_entry;
2407 unsigned int template_size;
2408 const uint32_t *template;
2411 /* Massage our args to the form they really have. */
2412 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2414 stub_sec = stub_entry->stub_sec;
2416 /* Make a note of the offset within the stubs for this entry. */
2417 stub_entry->stub_offset = stub_sec->size;
2418 loc = stub_sec->contents + stub_entry->stub_offset;
2420 stub_bfd = stub_sec->owner;
2422 /* This is the address of the stub destination. */
2423 sym_value = (stub_entry->target_value
2424 + stub_entry->target_section->output_offset
2425 + stub_entry->target_section->output_section->vma);
2427 if (stub_entry->stub_type == aarch64_stub_long_branch)
2429 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2430 + stub_sec->output_offset);
2432 /* See if we can relax the stub. */
2433 if (aarch64_valid_for_adrp_p (sym_value, place))
2434 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2437 switch (stub_entry->stub_type)
2439 case aarch64_stub_adrp_branch:
2440 template = aarch64_adrp_branch_stub;
2441 template_size = sizeof (aarch64_adrp_branch_stub);
2443 case aarch64_stub_long_branch:
2444 template = aarch64_long_branch_stub;
2445 template_size = sizeof (aarch64_long_branch_stub);
2452 for (i = 0; i < (template_size / sizeof template[0]); i++)
2454 bfd_putl32 (template[i], loc);
2458 template_size = (template_size + 7) & ~7;
2459 stub_sec->size += template_size;
2461 switch (stub_entry->stub_type)
2463 case aarch64_stub_adrp_branch:
2464 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2465 stub_entry->stub_offset, sym_value))
2466 /* The stub would not have been relaxed if the offset was out
2470 _bfd_final_link_relocate
2471 (elfNN_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2475 stub_entry->stub_offset + 4,
2480 case aarch64_stub_long_branch:
2481 /* We want the value relative to the address 12 bytes back from the
2483 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2484 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2486 stub_entry->stub_offset + 16,
2496 /* As above, but don't actually build the stub. Just bump offset so
2497 we know stub section sizes. */
2500 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2501 void *in_arg ATTRIBUTE_UNUSED)
2503 struct elf_aarch64_stub_hash_entry *stub_entry;
2506 /* Massage our args to the form they really have. */
2507 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2509 switch (stub_entry->stub_type)
2511 case aarch64_stub_adrp_branch:
2512 size = sizeof (aarch64_adrp_branch_stub);
2514 case aarch64_stub_long_branch:
2515 size = sizeof (aarch64_long_branch_stub);
2523 size = (size + 7) & ~7;
2524 stub_entry->stub_sec->size += size;
2528 /* External entry points for sizing and building linker stubs. */
2530 /* Set up various things so that we can make a list of input sections
2531 for each output section included in the link. Returns -1 on error,
2532 0 when no stubs will be needed, and 1 on success. */
2535 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2536 struct bfd_link_info *info)
2539 unsigned int bfd_count;
2540 int top_id, top_index;
2542 asection **input_list, **list;
2544 struct elf_aarch64_link_hash_table *htab =
2545 elf_aarch64_hash_table (info);
2547 if (!is_elf_hash_table (htab))
2550 /* Count the number of input BFDs and find the top input section id. */
2551 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2552 input_bfd != NULL; input_bfd = input_bfd->link_next)
2555 for (section = input_bfd->sections;
2556 section != NULL; section = section->next)
2558 if (top_id < section->id)
2559 top_id = section->id;
2562 htab->bfd_count = bfd_count;
2564 amt = sizeof (struct map_stub) * (top_id + 1);
2565 htab->stub_group = bfd_zmalloc (amt);
2566 if (htab->stub_group == NULL)
2569 /* We can't use output_bfd->section_count here to find the top output
2570 section index as some sections may have been removed, and
2571 _bfd_strip_section_from_output doesn't renumber the indices. */
2572 for (section = output_bfd->sections, top_index = 0;
2573 section != NULL; section = section->next)
2575 if (top_index < section->index)
2576 top_index = section->index;
2579 htab->top_index = top_index;
2580 amt = sizeof (asection *) * (top_index + 1);
2581 input_list = bfd_malloc (amt);
2582 htab->input_list = input_list;
2583 if (input_list == NULL)
2586 /* For sections we aren't interested in, mark their entries with a
2587 value we can check later. */
2588 list = input_list + top_index;
2590 *list = bfd_abs_section_ptr;
2591 while (list-- != input_list);
2593 for (section = output_bfd->sections;
2594 section != NULL; section = section->next)
2596 if ((section->flags & SEC_CODE) != 0)
2597 input_list[section->index] = NULL;
2603 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2604 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2606 /* The linker repeatedly calls this function for each input section,
2607 in the order that input sections are linked into output sections.
2608 Build lists of input sections to determine groupings between which
2609 we may insert linker stubs. */
2612 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2614 struct elf_aarch64_link_hash_table *htab =
2615 elf_aarch64_hash_table (info);
2617 if (isec->output_section->index <= htab->top_index)
2619 asection **list = htab->input_list + isec->output_section->index;
2621 if (*list != bfd_abs_section_ptr)
2623 /* Steal the link_sec pointer for our list. */
2624 /* This happens to make the list in reverse order,
2625 which is what we want. */
2626 PREV_SEC (isec) = *list;
2632 /* See whether we can group stub sections together. Grouping stub
2633 sections may result in fewer stubs. More importantly, we need to
2634 put all .init* and .fini* stubs at the beginning of the .init or
2635 .fini output sections respectively, because glibc splits the
2636 _init and _fini functions into multiple parts. Putting a stub in
2637 the middle of a function is not a good idea. */
2640 group_sections (struct elf_aarch64_link_hash_table *htab,
2641 bfd_size_type stub_group_size,
2642 bfd_boolean stubs_always_before_branch)
2644 asection **list = htab->input_list + htab->top_index;
2648 asection *tail = *list;
2650 if (tail == bfd_abs_section_ptr)
2653 while (tail != NULL)
2657 bfd_size_type total;
2661 while ((prev = PREV_SEC (curr)) != NULL
2662 && ((total += curr->output_offset - prev->output_offset)
2666 /* OK, the size from the start of CURR to the end is less
2667 than stub_group_size and thus can be handled by one stub
2668 section. (Or the tail section is itself larger than
2669 stub_group_size, in which case we may be toast.)
2670 We should really be keeping track of the total size of
2671 stubs added here, as stubs contribute to the final output
2675 prev = PREV_SEC (tail);
2676 /* Set up this stub group. */
2677 htab->stub_group[tail->id].link_sec = curr;
2679 while (tail != curr && (tail = prev) != NULL);
2681 /* But wait, there's more! Input sections up to stub_group_size
2682 bytes before the stub section can be handled by it too. */
2683 if (!stubs_always_before_branch)
2687 && ((total += tail->output_offset - prev->output_offset)
2691 prev = PREV_SEC (tail);
2692 htab->stub_group[tail->id].link_sec = curr;
2698 while (list-- != htab->input_list);
2700 free (htab->input_list);
2705 /* Determine and set the size of the stub section for a final link.
2707 The basic idea here is to examine all the relocations looking for
2708 PC-relative calls to a target that is unreachable with a "bl"
2712 elfNN_aarch64_size_stubs (bfd *output_bfd,
2714 struct bfd_link_info *info,
2715 bfd_signed_vma group_size,
2716 asection * (*add_stub_section) (const char *,
2718 void (*layout_sections_again) (void))
2720 bfd_size_type stub_group_size;
2721 bfd_boolean stubs_always_before_branch;
2722 bfd_boolean stub_changed = 0;
2723 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
2725 /* Propagate mach to stub bfd, because it may not have been
2726 finalized when we created stub_bfd. */
2727 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2728 bfd_get_mach (output_bfd));
2730 /* Stash our params away. */
2731 htab->stub_bfd = stub_bfd;
2732 htab->add_stub_section = add_stub_section;
2733 htab->layout_sections_again = layout_sections_again;
2734 stubs_always_before_branch = group_size < 0;
2736 stub_group_size = -group_size;
2738 stub_group_size = group_size;
2740 if (stub_group_size == 1)
2742 /* Default values. */
2743 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
2744 stub_group_size = 127 * 1024 * 1024;
2747 group_sections (htab, stub_group_size, stubs_always_before_branch);
2752 unsigned int bfd_indx;
2755 for (input_bfd = info->input_bfds, bfd_indx = 0;
2756 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2758 Elf_Internal_Shdr *symtab_hdr;
2760 Elf_Internal_Sym *local_syms = NULL;
2762 /* We'll need the symbol table in a second. */
2763 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2764 if (symtab_hdr->sh_info == 0)
2767 /* Walk over each section attached to the input bfd. */
2768 for (section = input_bfd->sections;
2769 section != NULL; section = section->next)
2771 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2773 /* If there aren't any relocs, then there's nothing more
2775 if ((section->flags & SEC_RELOC) == 0
2776 || section->reloc_count == 0
2777 || (section->flags & SEC_CODE) == 0)
2780 /* If this section is a link-once section that will be
2781 discarded, then don't create any stubs. */
2782 if (section->output_section == NULL
2783 || section->output_section->owner != output_bfd)
2786 /* Get the relocs. */
2788 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2789 NULL, info->keep_memory);
2790 if (internal_relocs == NULL)
2791 goto error_ret_free_local;
2793 /* Now examine each relocation. */
2794 irela = internal_relocs;
2795 irelaend = irela + section->reloc_count;
2796 for (; irela < irelaend; irela++)
2798 unsigned int r_type, r_indx;
2799 enum elf_aarch64_stub_type stub_type;
2800 struct elf_aarch64_stub_hash_entry *stub_entry;
2803 bfd_vma destination;
2804 struct elf_aarch64_link_hash_entry *hash;
2805 const char *sym_name;
2807 const asection *id_sec;
2808 unsigned char st_type;
2811 r_type = ELFNN_R_TYPE (irela->r_info);
2812 r_indx = ELFNN_R_SYM (irela->r_info);
2814 if (r_type >= (unsigned int) R_AARCH64_end)
2816 bfd_set_error (bfd_error_bad_value);
2817 error_ret_free_internal:
2818 if (elf_section_data (section)->relocs == NULL)
2819 free (internal_relocs);
2820 goto error_ret_free_local;
2823 /* Only look for stubs on unconditional branch and
2824 branch and link instructions. */
2825 if (r_type != (unsigned int) R_AARCH64_CALL26
2826 && r_type != (unsigned int) R_AARCH64_JUMP26)
2829 /* Now determine the call target, its name, value,
2836 if (r_indx < symtab_hdr->sh_info)
2838 /* It's a local symbol. */
2839 Elf_Internal_Sym *sym;
2840 Elf_Internal_Shdr *hdr;
2842 if (local_syms == NULL)
2845 = (Elf_Internal_Sym *) symtab_hdr->contents;
2846 if (local_syms == NULL)
2848 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2849 symtab_hdr->sh_info, 0,
2851 if (local_syms == NULL)
2852 goto error_ret_free_internal;
2855 sym = local_syms + r_indx;
2856 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2857 sym_sec = hdr->bfd_section;
2859 /* This is an undefined symbol. It can never
2863 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2864 sym_value = sym->st_value;
2865 destination = (sym_value + irela->r_addend
2866 + sym_sec->output_offset
2867 + sym_sec->output_section->vma);
2868 st_type = ELF_ST_TYPE (sym->st_info);
2870 = bfd_elf_string_from_elf_section (input_bfd,
2871 symtab_hdr->sh_link,
2878 e_indx = r_indx - symtab_hdr->sh_info;
2879 hash = ((struct elf_aarch64_link_hash_entry *)
2880 elf_sym_hashes (input_bfd)[e_indx]);
2882 while (hash->root.root.type == bfd_link_hash_indirect
2883 || hash->root.root.type == bfd_link_hash_warning)
2884 hash = ((struct elf_aarch64_link_hash_entry *)
2885 hash->root.root.u.i.link);
2887 if (hash->root.root.type == bfd_link_hash_defined
2888 || hash->root.root.type == bfd_link_hash_defweak)
2890 struct elf_aarch64_link_hash_table *globals =
2891 elf_aarch64_hash_table (info);
2892 sym_sec = hash->root.root.u.def.section;
2893 sym_value = hash->root.root.u.def.value;
2894 /* For a destination in a shared library,
2895 use the PLT stub as target address to
2896 decide whether a branch stub is
2898 if (globals->root.splt != NULL && hash != NULL
2899 && hash->root.plt.offset != (bfd_vma) - 1)
2901 sym_sec = globals->root.splt;
2902 sym_value = hash->root.plt.offset;
2903 if (sym_sec->output_section != NULL)
2904 destination = (sym_value
2905 + sym_sec->output_offset
2907 sym_sec->output_section->vma);
2909 else if (sym_sec->output_section != NULL)
2910 destination = (sym_value + irela->r_addend
2911 + sym_sec->output_offset
2912 + sym_sec->output_section->vma);
2914 else if (hash->root.root.type == bfd_link_hash_undefined
2915 || (hash->root.root.type
2916 == bfd_link_hash_undefweak))
2918 /* For a shared library, use the PLT stub as
2919 target address to decide whether a long
2920 branch stub is needed.
2921 For absolute code, they cannot be handled. */
2922 struct elf_aarch64_link_hash_table *globals =
2923 elf_aarch64_hash_table (info);
2925 if (globals->root.splt != NULL && hash != NULL
2926 && hash->root.plt.offset != (bfd_vma) - 1)
2928 sym_sec = globals->root.splt;
2929 sym_value = hash->root.plt.offset;
2930 if (sym_sec->output_section != NULL)
2931 destination = (sym_value
2932 + sym_sec->output_offset
2934 sym_sec->output_section->vma);
2941 bfd_set_error (bfd_error_bad_value);
2942 goto error_ret_free_internal;
2944 st_type = ELF_ST_TYPE (hash->root.type);
2945 sym_name = hash->root.root.root.string;
2948 /* Determine what (if any) linker stub is needed. */
2949 stub_type = aarch64_type_of_stub
2950 (info, section, irela, st_type, hash, destination);
2951 if (stub_type == aarch64_stub_none)
2954 /* Support for grouping stub sections. */
2955 id_sec = htab->stub_group[section->id].link_sec;
2957 /* Get the name of this stub. */
2958 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
2961 goto error_ret_free_internal;
2964 aarch64_stub_hash_lookup (&htab->stub_hash_table,
2965 stub_name, FALSE, FALSE);
2966 if (stub_entry != NULL)
2968 /* The proper stub has already been created. */
2973 stub_entry = elfNN_aarch64_add_stub (stub_name, section,
2975 if (stub_entry == NULL)
2978 goto error_ret_free_internal;
2981 stub_entry->target_value = sym_value;
2982 stub_entry->target_section = sym_sec;
2983 stub_entry->stub_type = stub_type;
2984 stub_entry->h = hash;
2985 stub_entry->st_type = st_type;
2987 if (sym_name == NULL)
2988 sym_name = "unnamed";
2989 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
2990 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
2991 if (stub_entry->output_name == NULL)
2994 goto error_ret_free_internal;
2997 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3000 stub_changed = TRUE;
3003 /* We're done with the internal relocs, free them. */
3004 if (elf_section_data (section)->relocs == NULL)
3005 free (internal_relocs);
3012 /* OK, we've added some stubs. Find out the new size of the
3014 for (stub_sec = htab->stub_bfd->sections;
3015 stub_sec != NULL; stub_sec = stub_sec->next)
3018 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3020 /* Ask the linker to do its stuff. */
3021 (*htab->layout_sections_again) ();
3022 stub_changed = FALSE;
3027 error_ret_free_local:
3031 /* Build all the stubs associated with the current output file. The
3032 stubs are kept in a hash table attached to the main linker hash
3033 table. We also set up the .plt entries for statically linked PIC
3034 functions here. This function is called via aarch64_elf_finish in the
3038 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3041 struct bfd_hash_table *table;
3042 struct elf_aarch64_link_hash_table *htab;
3044 htab = elf_aarch64_hash_table (info);
3046 for (stub_sec = htab->stub_bfd->sections;
3047 stub_sec != NULL; stub_sec = stub_sec->next)
3051 /* Ignore non-stub sections. */
3052 if (!strstr (stub_sec->name, STUB_SUFFIX))
3055 /* Allocate memory to hold the linker stubs. */
3056 size = stub_sec->size;
3057 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3058 if (stub_sec->contents == NULL && size != 0)
3063 /* Build the stubs as directed by the stub hash table. */
3064 table = &htab->stub_hash_table;
3065 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3071 /* Add an entry to the code/data map for section SEC. */
3074 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3076 struct _aarch64_elf_section_data *sec_data =
3077 elf_aarch64_section_data (sec);
3078 unsigned int newidx;
3080 if (sec_data->map == NULL)
3082 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3083 sec_data->mapcount = 0;
3084 sec_data->mapsize = 1;
3087 newidx = sec_data->mapcount++;
3089 if (sec_data->mapcount > sec_data->mapsize)
3091 sec_data->mapsize *= 2;
3092 sec_data->map = bfd_realloc_or_free
3093 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3098 sec_data->map[newidx].vma = vma;
3099 sec_data->map[newidx].type = type;
3104 /* Initialise maps of insn/data for input BFDs. */
3106 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3108 Elf_Internal_Sym *isymbuf;
3109 Elf_Internal_Shdr *hdr;
3110 unsigned int i, localsyms;
3112 /* Make sure that we are dealing with an AArch64 elf binary. */
3113 if (!is_aarch64_elf (abfd))
3116 if ((abfd->flags & DYNAMIC) != 0)
3119 hdr = &elf_symtab_hdr (abfd);
3120 localsyms = hdr->sh_info;
3122 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3123 should contain the number of local symbols, which should come before any
3124 global symbols. Mapping symbols are always local. */
3125 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3127 /* No internal symbols read? Skip this BFD. */
3128 if (isymbuf == NULL)
3131 for (i = 0; i < localsyms; i++)
3133 Elf_Internal_Sym *isym = &isymbuf[i];
3134 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3137 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3139 name = bfd_elf_string_from_elf_section (abfd,
3143 if (bfd_is_aarch64_special_symbol_name
3144 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3145 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3150 /* Set option values needed during linking. */
3152 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3153 struct bfd_link_info *link_info,
3155 int no_wchar_warn, int pic_veneer)
3157 struct elf_aarch64_link_hash_table *globals;
3159 globals = elf_aarch64_hash_table (link_info);
3160 globals->pic_veneer = pic_veneer;
3162 BFD_ASSERT (is_aarch64_elf (output_bfd));
3163 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3164 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3167 #define MASK(n) ((1u << (n)) - 1)
3169 /* Decode the 26-bit offset of unconditional branch. */
3170 static inline uint32_t
3171 decode_branch_ofs_26 (uint32_t insn)
3173 return insn & MASK (26);
3176 /* Decode the 19-bit offset of conditional branch and compare & branch. */
3177 static inline uint32_t
3178 decode_cond_branch_ofs_19 (uint32_t insn)
3180 return (insn >> 5) & MASK (19);
3183 /* Decode the 19-bit offset of load literal. */
3184 static inline uint32_t
3185 decode_ld_lit_ofs_19 (uint32_t insn)
3187 return (insn >> 5) & MASK (19);
3190 /* Decode the 14-bit offset of test & branch. */
3191 static inline uint32_t
3192 decode_tst_branch_ofs_14 (uint32_t insn)
3194 return (insn >> 5) & MASK (14);
3197 /* Decode the 16-bit imm of move wide. */
3198 static inline uint32_t
3199 decode_movw_imm (uint32_t insn)
3201 return (insn >> 5) & MASK (16);
3204 /* Decode the 21-bit imm of adr. */
3205 static inline uint32_t
3206 decode_adr_imm (uint32_t insn)
3208 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3211 /* Decode the 12-bit imm of add immediate. */
3212 static inline uint32_t
3213 decode_add_imm (uint32_t insn)
3215 return (insn >> 10) & MASK (12);
3219 /* Encode the 26-bit offset of unconditional branch. */
3220 static inline uint32_t
3221 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3223 return (insn & ~MASK (26)) | (ofs & MASK (26));
3226 /* Encode the 19-bit offset of conditional branch and compare & branch. */
3227 static inline uint32_t
3228 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3230 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3233 /* Decode the 19-bit offset of load literal. */
3234 static inline uint32_t
3235 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3237 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3240 /* Encode the 14-bit offset of test & branch. */
3241 static inline uint32_t
3242 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3244 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3247 /* Reencode the imm field of move wide. */
3248 static inline uint32_t
3249 reencode_movw_imm (uint32_t insn, uint32_t imm)
3251 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3254 /* Reencode the imm field of adr. */
3255 static inline uint32_t
3256 reencode_adr_imm (uint32_t insn, uint32_t imm)
3258 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3259 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3262 /* Reencode the imm field of ld/st pos immediate. */
3263 static inline uint32_t
3264 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3266 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3269 /* Reencode the imm field of add immediate. */
3270 static inline uint32_t
3271 reencode_add_imm (uint32_t insn, uint32_t imm)
3273 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3276 /* Reencode mov[zn] to movz. */
3277 static inline uint32_t
3278 reencode_movzn_to_movz (uint32_t opcode)
3280 return opcode | (1 << 30);
3283 /* Reencode mov[zn] to movn. */
3284 static inline uint32_t
3285 reencode_movzn_to_movn (uint32_t opcode)
3287 return opcode & ~(1 << 30);
3290 /* Insert the addend/value into the instruction or data object being
3292 static bfd_reloc_status_type
3293 bfd_elf_aarch64_put_addend (bfd *abfd,
3295 reloc_howto_type *howto, bfd_signed_vma addend)
3297 bfd_reloc_status_type status = bfd_reloc_ok;
3298 bfd_signed_vma old_addend = addend;
3302 size = bfd_get_reloc_size (howto);
3306 contents = bfd_get_16 (abfd, address);
3309 if (howto->src_mask != 0xffffffff)
3310 /* Must be 32-bit instruction, always little-endian. */
3311 contents = bfd_getl32 (address);
3313 /* Must be 32-bit data (endianness dependent). */
3314 contents = bfd_get_32 (abfd, address);
3317 contents = bfd_get_64 (abfd, address);
3323 switch (howto->complain_on_overflow)
3325 case complain_overflow_dont:
3327 case complain_overflow_signed:
3328 status = aarch64_signed_overflow (addend,
3329 howto->bitsize + howto->rightshift);
3331 case complain_overflow_unsigned:
3332 status = aarch64_unsigned_overflow (addend,
3333 howto->bitsize + howto->rightshift);
3335 case complain_overflow_bitfield:
3340 addend >>= howto->rightshift;
3342 switch (howto->type)
3344 case R_AARCH64_JUMP26:
3345 case R_AARCH64_CALL26:
3346 contents = reencode_branch_ofs_26 (contents, addend);
3349 case R_AARCH64_CONDBR19:
3350 contents = reencode_cond_branch_ofs_19 (contents, addend);
3353 case R_AARCH64_TSTBR14:
3354 contents = reencode_tst_branch_ofs_14 (contents, addend);
3357 case R_AARCH64_LD_PREL_LO19:
3358 case R_AARCH64_GOT_LD_PREL19:
3359 if (old_addend & ((1 << howto->rightshift) - 1))
3360 return bfd_reloc_overflow;
3361 contents = reencode_ld_lit_ofs_19 (contents, addend);
3364 case R_AARCH64_TLSDESC_CALL:
3367 case R_AARCH64_TLSGD_ADR_PAGE21:
3368 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3369 case R_AARCH64_TLSDESC_ADR_PAGE21:
3370 case R_AARCH64_ADR_GOT_PAGE:
3371 case R_AARCH64_ADR_PREL_LO21:
3372 case R_AARCH64_ADR_PREL_PG_HI21:
3373 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3374 contents = reencode_adr_imm (contents, addend);
3377 case R_AARCH64_TLSGD_ADD_LO12_NC:
3378 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3379 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3380 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3381 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3382 case R_AARCH64_ADD_ABS_LO12_NC:
3383 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3384 12 bits of the page offset following
3385 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3386 (pc-relative) page base. */
3387 contents = reencode_add_imm (contents, addend);
3390 case R_AARCH64_LDST8_ABS_LO12_NC:
3391 case R_AARCH64_LDST16_ABS_LO12_NC:
3392 case R_AARCH64_LDST32_ABS_LO12_NC:
3393 case R_AARCH64_LDST64_ABS_LO12_NC:
3394 case R_AARCH64_LDST128_ABS_LO12_NC:
3395 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3396 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3397 case R_AARCH64_LD64_GOT_LO12_NC:
3398 if (old_addend & ((1 << howto->rightshift) - 1))
3399 return bfd_reloc_overflow;
3400 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3401 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3402 which computes the (pc-relative) page base. */
3403 contents = reencode_ldst_pos_imm (contents, addend);
3406 /* Group relocations to create high bits of a 16, 32, 48 or 64
3407 bit signed data or abs address inline. Will change
3408 instruction to MOVN or MOVZ depending on sign of calculated
3411 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3412 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3413 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3414 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3415 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3416 case R_AARCH64_MOVW_SABS_G0:
3417 case R_AARCH64_MOVW_SABS_G1:
3418 case R_AARCH64_MOVW_SABS_G2:
3419 /* NOTE: We can only come here with movz or movn. */
3422 /* Force use of MOVN. */
3424 contents = reencode_movzn_to_movn (contents);
3428 /* Force use of MOVZ. */
3429 contents = reencode_movzn_to_movz (contents);
3433 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3434 data or abs address inline. */
3436 case R_AARCH64_MOVW_UABS_G0:
3437 case R_AARCH64_MOVW_UABS_G0_NC:
3438 case R_AARCH64_MOVW_UABS_G1:
3439 case R_AARCH64_MOVW_UABS_G1_NC:
3440 case R_AARCH64_MOVW_UABS_G2:
3441 case R_AARCH64_MOVW_UABS_G2_NC:
3442 case R_AARCH64_MOVW_UABS_G3:
3443 contents = reencode_movw_imm (contents, addend);
3447 /* Repack simple data */
3448 if (howto->dst_mask & (howto->dst_mask + 1))
3449 return bfd_reloc_notsupported;
3451 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3458 bfd_put_16 (abfd, contents, address);
3461 if (howto->dst_mask != 0xffffffff)
3462 /* must be 32-bit instruction, always little-endian */
3463 bfd_putl32 (contents, address);
3465 /* must be 32-bit data (endianness dependent) */
3466 bfd_put_32 (abfd, contents, address);
3469 bfd_put_64 (abfd, contents, address);
3479 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3480 struct elf_aarch64_link_hash_table
3481 *globals, struct bfd_link_info *info,
3482 bfd_vma value, bfd *output_bfd,
3483 bfd_boolean *unresolved_reloc_p)
3485 bfd_vma off = (bfd_vma) - 1;
3486 asection *basegot = globals->root.sgot;
3487 bfd_boolean dyn = globals->root.dynamic_sections_created;
3491 off = h->got.offset;
3492 BFD_ASSERT (off != (bfd_vma) - 1);
3493 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3495 && SYMBOL_REFERENCES_LOCAL (info, h))
3496 || (ELF_ST_VISIBILITY (h->other)
3497 && h->root.type == bfd_link_hash_undefweak))
3499 /* This is actually a static link, or it is a -Bsymbolic link
3500 and the symbol is defined locally. We must initialize this
3501 entry in the global offset table. Since the offset must
3502 always be a multiple of 8, we use the least significant bit
3503 to record whether we have initialized it already.
3504 When doing a dynamic link, we create a .rel(a).got relocation
3505 entry to initialize the value. This is done in the
3506 finish_dynamic_symbol routine. */
3511 bfd_put_NN (output_bfd, value, basegot->contents + off);
3516 *unresolved_reloc_p = FALSE;
3518 off = off + basegot->output_section->vma + basegot->output_offset;
3524 /* Change R_TYPE to a more efficient access model where possible,
3525 return the new reloc type. */
3528 aarch64_tls_transition_without_check (unsigned int r_type,
3529 struct elf_link_hash_entry *h)
3531 bfd_boolean is_local = h == NULL;
3534 case R_AARCH64_TLSGD_ADR_PAGE21:
3535 case R_AARCH64_TLSDESC_ADR_PAGE21:
3537 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3539 case R_AARCH64_TLSGD_ADD_LO12_NC:
3540 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3542 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3543 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3545 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3546 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3548 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3549 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3551 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3552 case R_AARCH64_TLSDESC_CALL:
3553 /* Instructions with these relocations will become NOPs. */
3554 return R_AARCH64_NONE;
3561 aarch64_reloc_got_type (unsigned int r_type)
3565 case R_AARCH64_LD64_GOT_LO12_NC:
3566 case R_AARCH64_ADR_GOT_PAGE:
3567 case R_AARCH64_GOT_LD_PREL19:
3570 case R_AARCH64_TLSGD_ADR_PAGE21:
3571 case R_AARCH64_TLSGD_ADD_LO12_NC:
3574 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3575 case R_AARCH64_TLSDESC_ADR_PAGE21:
3576 case R_AARCH64_TLSDESC_CALL:
3577 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3578 return GOT_TLSDESC_GD;
3580 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3581 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3584 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3585 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3586 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3587 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3588 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3589 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3590 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3591 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3598 aarch64_can_relax_tls (bfd *input_bfd,
3599 struct bfd_link_info *info,
3600 unsigned int r_type,
3601 struct elf_link_hash_entry *h,
3602 unsigned long r_symndx)
3604 unsigned int symbol_got_type;
3605 unsigned int reloc_got_type;
3607 if (! IS_AARCH64_TLS_RELOC (r_type))
3610 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3611 reloc_got_type = aarch64_reloc_got_type (r_type);
3613 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3619 if (h && h->root.type == bfd_link_hash_undefweak)
3626 aarch64_tls_transition (bfd *input_bfd,
3627 struct bfd_link_info *info,
3628 unsigned int r_type,
3629 struct elf_link_hash_entry *h,
3630 unsigned long r_symndx)
3632 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3635 return aarch64_tls_transition_without_check (r_type, h);
3638 /* Return the base VMA address which should be subtracted from real addresses
3639 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3642 dtpoff_base (struct bfd_link_info *info)
3644 /* If tls_sec is NULL, we should have signalled an error already. */
3645 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3646 return elf_hash_table (info)->tls_sec->vma;
3650 /* Return the base VMA address which should be subtracted from real addresses
3651 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3654 tpoff_base (struct bfd_link_info *info)
3656 struct elf_link_hash_table *htab = elf_hash_table (info);
3658 /* If tls_sec is NULL, we should have signalled an error already. */
3659 if (htab->tls_sec == NULL)
3662 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3663 htab->tls_sec->alignment_power);
3664 return htab->tls_sec->vma - base;
3668 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3669 unsigned long r_symndx)
3671 /* Calculate the address of the GOT entry for symbol
3672 referred to in h. */
3674 return &h->got.offset;
3678 struct elf_aarch64_local_symbol *l;
3680 l = elf_aarch64_locals (input_bfd);
3681 return &l[r_symndx].got_offset;
3686 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3687 unsigned long r_symndx)
3690 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3695 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3696 unsigned long r_symndx)
3699 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3704 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3705 unsigned long r_symndx)
3708 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3714 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3715 unsigned long r_symndx)
3717 /* Calculate the address of the GOT entry for symbol
3718 referred to in h. */
3721 struct elf_aarch64_link_hash_entry *eh;
3722 eh = (struct elf_aarch64_link_hash_entry *) h;
3723 return &eh->tlsdesc_got_jump_table_offset;
3728 struct elf_aarch64_local_symbol *l;
3730 l = elf_aarch64_locals (input_bfd);
3731 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3736 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3737 unsigned long r_symndx)
3740 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3745 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3746 struct elf_link_hash_entry *h,
3747 unsigned long r_symndx)
3750 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3755 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3756 unsigned long r_symndx)
3759 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3764 /* Perform a relocation as part of a final link. */
3765 static bfd_reloc_status_type
3766 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
3769 asection *input_section,
3771 Elf_Internal_Rela *rel,
3773 struct bfd_link_info *info,
3775 struct elf_link_hash_entry *h,
3776 bfd_boolean *unresolved_reloc_p,
3777 bfd_boolean save_addend,
3778 bfd_vma *saved_addend)
3780 unsigned int r_type = howto->type;
3781 unsigned long r_symndx;
3782 bfd_byte *hit_data = contents + rel->r_offset;
3784 bfd_signed_vma signed_addend;
3785 struct elf_aarch64_link_hash_table *globals;
3786 bfd_boolean weak_undef_p;
3788 globals = elf_aarch64_hash_table (info);
3790 BFD_ASSERT (is_aarch64_elf (input_bfd));
3792 r_symndx = ELFNN_R_SYM (rel->r_info);
3794 /* It is possible to have linker relaxations on some TLS access
3795 models. Update our information here. */
3796 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3798 if (r_type != howto->type)
3799 howto = elfNN_aarch64_howto_from_type (r_type);
3801 place = input_section->output_section->vma
3802 + input_section->output_offset + rel->r_offset;
3804 /* Get addend, accumulating the addend for consecutive relocs
3805 which refer to the same offset. */
3806 signed_addend = saved_addend ? *saved_addend : 0;
3807 signed_addend += rel->r_addend;
3809 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3810 : bfd_is_und_section (sym_sec));
3813 case R_AARCH64_NONE:
3814 case R_AARCH64_NULL:
3815 case R_AARCH64_TLSDESC_CALL:
3816 *unresolved_reloc_p = FALSE;
3817 return bfd_reloc_ok;
3819 case R_AARCH64_ABS64:
3821 /* When generating a shared object or relocatable executable, these
3822 relocations are copied into the output file to be resolved at
3824 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3825 && (input_section->flags & SEC_ALLOC)
3827 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3828 || h->root.type != bfd_link_hash_undefweak))
3830 Elf_Internal_Rela outrel;
3832 bfd_boolean skip, relocate;
3835 *unresolved_reloc_p = FALSE;
3837 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3840 return bfd_reloc_notsupported;
3845 outrel.r_addend = signed_addend;
3847 _bfd_elf_section_offset (output_bfd, info, input_section,
3849 if (outrel.r_offset == (bfd_vma) - 1)
3851 else if (outrel.r_offset == (bfd_vma) - 2)
3857 outrel.r_offset += (input_section->output_section->vma
3858 + input_section->output_offset);
3861 memset (&outrel, 0, sizeof outrel);
3864 && (!info->shared || !info->symbolic || !h->def_regular))
3865 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
3870 /* On SVR4-ish systems, the dynamic loader cannot
3871 relocate the text and data segments independently,
3872 so the symbol does not matter. */
3874 outrel.r_info = ELFNN_R_INFO (symbol, R_AARCH64_RELATIVE);
3875 outrel.r_addend += value;
3878 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3879 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
3881 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3883 /* Sanity to check that we have previously allocated
3884 sufficient space in the relocation section for the
3885 number of relocations we actually want to emit. */
3889 /* If this reloc is against an external symbol, we do not want to
3890 fiddle with the addend. Otherwise, we need to include the symbol
3891 value so that it becomes an addend for the dynamic reloc. */
3893 return bfd_reloc_ok;
3895 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3896 contents, rel->r_offset, value,
3900 value += signed_addend;
3903 case R_AARCH64_JUMP26:
3904 case R_AARCH64_CALL26:
3906 asection *splt = globals->root.splt;
3907 bfd_boolean via_plt_p =
3908 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3910 /* A call to an undefined weak symbol is converted to a jump to
3911 the next instruction unless a PLT entry will be created.
3912 The jump to the next instruction is optimized as a NOP.
3913 Do the same for local undefined symbols. */
3914 if (weak_undef_p && ! via_plt_p)
3916 bfd_putl32 (INSN_NOP, hit_data);
3917 return bfd_reloc_ok;
3920 /* If the call goes through a PLT entry, make sure to
3921 check distance to the right destination address. */
3924 value = (splt->output_section->vma
3925 + splt->output_offset + h->plt.offset);
3926 *unresolved_reloc_p = FALSE;
3929 /* If the target symbol is global and marked as a function the
3930 relocation applies a function call or a tail call. In this
3931 situation we can veneer out of range branches. The veneers
3932 use IP0 and IP1 hence cannot be used arbitrary out of range
3933 branches that occur within the body of a function. */
3934 if (h && h->type == STT_FUNC)
3936 /* Check if a stub has to be inserted because the destination
3938 if (! aarch64_valid_branch_p (value, place))
3940 /* The target is out of reach, so redirect the branch to
3941 the local stub for this function. */
3942 struct elf_aarch64_stub_hash_entry *stub_entry;
3943 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
3946 if (stub_entry != NULL)
3947 value = (stub_entry->stub_offset
3948 + stub_entry->stub_sec->output_offset
3949 + stub_entry->stub_sec->output_section->vma);
3953 value = aarch64_resolve_relocation (r_type, place, value,
3954 signed_addend, weak_undef_p);
3957 case R_AARCH64_ABS16:
3958 case R_AARCH64_ABS32:
3959 case R_AARCH64_ADD_ABS_LO12_NC:
3960 case R_AARCH64_ADR_PREL_LO21:
3961 case R_AARCH64_ADR_PREL_PG_HI21:
3962 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3963 case R_AARCH64_CONDBR19:
3964 case R_AARCH64_LD_PREL_LO19:
3965 case R_AARCH64_LDST8_ABS_LO12_NC:
3966 case R_AARCH64_LDST16_ABS_LO12_NC:
3967 case R_AARCH64_LDST32_ABS_LO12_NC:
3968 case R_AARCH64_LDST64_ABS_LO12_NC:
3969 case R_AARCH64_LDST128_ABS_LO12_NC:
3970 case R_AARCH64_MOVW_SABS_G0:
3971 case R_AARCH64_MOVW_SABS_G1:
3972 case R_AARCH64_MOVW_SABS_G2:
3973 case R_AARCH64_MOVW_UABS_G0:
3974 case R_AARCH64_MOVW_UABS_G0_NC:
3975 case R_AARCH64_MOVW_UABS_G1:
3976 case R_AARCH64_MOVW_UABS_G1_NC:
3977 case R_AARCH64_MOVW_UABS_G2:
3978 case R_AARCH64_MOVW_UABS_G2_NC:
3979 case R_AARCH64_MOVW_UABS_G3:
3980 case R_AARCH64_PREL16:
3981 case R_AARCH64_PREL32:
3982 case R_AARCH64_PREL64:
3983 case R_AARCH64_TSTBR14:
3984 value = aarch64_resolve_relocation (r_type, place, value,
3985 signed_addend, weak_undef_p);
3988 case R_AARCH64_LD64_GOT_LO12_NC:
3989 case R_AARCH64_ADR_GOT_PAGE:
3990 case R_AARCH64_GOT_LD_PREL19:
3991 if (globals->root.sgot == NULL)
3992 BFD_ASSERT (h != NULL);
3996 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
3998 unresolved_reloc_p);
3999 value = aarch64_resolve_relocation (r_type, place, value,
4004 case R_AARCH64_TLSGD_ADR_PAGE21:
4005 case R_AARCH64_TLSGD_ADD_LO12_NC:
4006 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4007 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4008 if (globals->root.sgot == NULL)
4009 return bfd_reloc_notsupported;
4011 value = (symbol_got_offset (input_bfd, h, r_symndx)
4012 + globals->root.sgot->output_section->vma
4013 + globals->root.sgot->output_section->output_offset);
4015 value = aarch64_resolve_relocation (r_type, place, value,
4017 *unresolved_reloc_p = FALSE;
4020 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4021 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4022 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4023 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4024 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4025 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4026 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4027 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4028 value = aarch64_resolve_relocation (r_type, place, value,
4029 signed_addend - tpoff_base (info), weak_undef_p);
4030 *unresolved_reloc_p = FALSE;
4033 case R_AARCH64_TLSDESC_ADR_PAGE21:
4034 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4035 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4036 case R_AARCH64_TLSDESC_ADD:
4037 case R_AARCH64_TLSDESC_LDR:
4038 if (globals->root.sgot == NULL)
4039 return bfd_reloc_notsupported;
4041 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4042 + globals->root.sgotplt->output_section->vma
4043 + globals->root.sgotplt->output_section->output_offset
4044 + globals->sgotplt_jump_table_size);
4046 value = aarch64_resolve_relocation (r_type, place, value,
4048 *unresolved_reloc_p = FALSE;
4052 return bfd_reloc_notsupported;
4056 *saved_addend = value;
4058 /* Only apply the final relocation in a sequence. */
4060 return bfd_reloc_continue;
4062 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4065 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4066 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4069 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4070 is to then call final_link_relocate. Return other values in the
4073 static bfd_reloc_status_type
4074 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4075 bfd *input_bfd, bfd_byte *contents,
4076 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4078 bfd_boolean is_local = h == NULL;
4079 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4082 BFD_ASSERT (globals && input_bfd && contents && rel);
4086 case R_AARCH64_TLSGD_ADR_PAGE21:
4087 case R_AARCH64_TLSDESC_ADR_PAGE21:
4090 /* GD->LE relaxation:
4091 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4093 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4095 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4096 return bfd_reloc_continue;
4100 /* GD->IE relaxation:
4101 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4103 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4105 insn = bfd_getl32 (contents + rel->r_offset);
4106 return bfd_reloc_continue;
4109 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4112 /* GD->LE relaxation:
4113 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4115 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4116 return bfd_reloc_continue;
4120 /* GD->IE relaxation:
4121 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4123 insn = bfd_getl32 (contents + rel->r_offset);
4125 bfd_putl32 (insn, contents + rel->r_offset);
4126 return bfd_reloc_continue;
4129 case R_AARCH64_TLSGD_ADD_LO12_NC:
4132 /* GD->LE relaxation
4133 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4134 bl __tls_get_addr => mrs x1, tpidr_el0
4135 nop => add x0, x1, x0
4138 /* First kill the tls_get_addr reloc on the bl instruction. */
4139 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4140 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4142 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4143 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4144 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4145 return bfd_reloc_continue;
4149 /* GD->IE relaxation
4150 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4151 BL __tls_get_addr => mrs x1, tpidr_el0
4153 NOP => add x0, x1, x0
4156 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4158 /* Remove the relocation on the BL instruction. */
4159 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4161 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4163 /* We choose to fixup the BL and NOP instructions using the
4164 offset from the second relocation to allow flexibility in
4165 scheduling instructions between the ADD and BL. */
4166 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4167 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4168 return bfd_reloc_continue;
4171 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4172 case R_AARCH64_TLSDESC_CALL:
4173 /* GD->IE/LE relaxation:
4174 add x0, x0, #:tlsdesc_lo12:var => nop
4177 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4178 return bfd_reloc_ok;
4180 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4181 /* IE->LE relaxation:
4182 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4186 insn = bfd_getl32 (contents + rel->r_offset);
4187 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4189 return bfd_reloc_continue;
4191 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4192 /* IE->LE relaxation:
4193 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4197 insn = bfd_getl32 (contents + rel->r_offset);
4198 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4200 return bfd_reloc_continue;
4203 return bfd_reloc_continue;
4206 return bfd_reloc_ok;
4209 /* Relocate an AArch64 ELF section. */
4212 elfNN_aarch64_relocate_section (bfd *output_bfd,
4213 struct bfd_link_info *info,
4215 asection *input_section,
4217 Elf_Internal_Rela *relocs,
4218 Elf_Internal_Sym *local_syms,
4219 asection **local_sections)
4221 Elf_Internal_Shdr *symtab_hdr;
4222 struct elf_link_hash_entry **sym_hashes;
4223 Elf_Internal_Rela *rel;
4224 Elf_Internal_Rela *relend;
4226 struct elf_aarch64_link_hash_table *globals;
4227 bfd_boolean save_addend = FALSE;
4230 globals = elf_aarch64_hash_table (info);
4232 symtab_hdr = &elf_symtab_hdr (input_bfd);
4233 sym_hashes = elf_sym_hashes (input_bfd);
4236 relend = relocs + input_section->reloc_count;
4237 for (; rel < relend; rel++)
4239 unsigned int r_type;
4240 unsigned int relaxed_r_type;
4241 reloc_howto_type *howto;
4242 unsigned long r_symndx;
4243 Elf_Internal_Sym *sym;
4245 struct elf_link_hash_entry *h;
4247 bfd_reloc_status_type r;
4250 bfd_boolean unresolved_reloc = FALSE;
4251 char *error_message = NULL;
4253 r_symndx = ELFNN_R_SYM (rel->r_info);
4254 r_type = ELFNN_R_TYPE (rel->r_info);
4256 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4257 howto = bfd_reloc.howto;
4261 (*_bfd_error_handler)
4262 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4263 input_bfd, input_section, r_type);
4271 if (r_symndx < symtab_hdr->sh_info)
4273 sym = local_syms + r_symndx;
4274 sym_type = ELFNN_ST_TYPE (sym->st_info);
4275 sec = local_sections[r_symndx];
4277 /* An object file might have a reference to a local
4278 undefined symbol. This is a daft object file, but we
4279 should at least do something about it. */
4280 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4281 && bfd_is_und_section (sec)
4282 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4284 if (!info->callbacks->undefined_symbol
4285 (info, bfd_elf_string_from_elf_section
4286 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4287 input_bfd, input_section, rel->r_offset, TRUE))
4291 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4297 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4298 r_symndx, symtab_hdr, sym_hashes,
4300 unresolved_reloc, warned);
4305 if (sec != NULL && discarded_section (sec))
4306 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4307 rel, 1, relend, howto, 0, contents);
4309 if (info->relocatable)
4311 /* This is a relocatable link. We don't have to change
4312 anything, unless the reloc is against a section symbol,
4313 in which case we have to adjust according to where the
4314 section symbol winds up in the output section. */
4315 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4316 rel->r_addend += sec->output_offset;
4321 name = h->root.root.string;
4324 name = (bfd_elf_string_from_elf_section
4325 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4326 if (name == NULL || *name == '\0')
4327 name = bfd_section_name (input_bfd, sec);
4331 && r_type != R_AARCH64_NONE
4332 && r_type != R_AARCH64_NULL
4334 || h->root.type == bfd_link_hash_defined
4335 || h->root.type == bfd_link_hash_defweak)
4336 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4338 (*_bfd_error_handler)
4339 ((sym_type == STT_TLS
4340 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4341 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4343 input_section, (long) rel->r_offset, howto->name, name);
4347 /* We relax only if we can see that there can be a valid transition
4348 from a reloc type to another.
4349 We call elfNN_aarch64_final_link_relocate unless we're completely
4350 done, i.e., the relaxation produced the final output we want. */
4352 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4354 if (relaxed_r_type != r_type)
4356 r_type = relaxed_r_type;
4357 howto = elfNN_aarch64_howto_from_type (r_type);
4359 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4360 unresolved_reloc = 0;
4363 r = bfd_reloc_continue;
4365 /* There may be multiple consecutive relocations for the
4366 same offset. In that case we are supposed to treat the
4367 output of each relocation as the addend for the next. */
4368 if (rel + 1 < relend
4369 && rel->r_offset == rel[1].r_offset
4370 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4371 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4374 save_addend = FALSE;
4376 if (r == bfd_reloc_continue)
4377 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4378 input_section, contents, rel,
4379 relocation, info, sec,
4380 h, &unresolved_reloc,
4381 save_addend, &addend);
4385 case R_AARCH64_TLSGD_ADR_PAGE21:
4386 case R_AARCH64_TLSGD_ADD_LO12_NC:
4387 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4389 bfd_boolean need_relocs = FALSE;
4394 off = symbol_got_offset (input_bfd, h, r_symndx);
4395 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4398 (info->shared || indx != 0) &&
4400 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4401 || h->root.type != bfd_link_hash_undefweak);
4403 BFD_ASSERT (globals->root.srelgot != NULL);
4407 Elf_Internal_Rela rela;
4408 rela.r_info = ELFNN_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4410 rela.r_offset = globals->root.sgot->output_section->vma +
4411 globals->root.sgot->output_offset + off;
4414 loc = globals->root.srelgot->contents;
4415 loc += globals->root.srelgot->reloc_count++
4416 * RELOC_SIZE (htab);
4417 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4421 bfd_put_NN (output_bfd,
4422 relocation - dtpoff_base (info),
4423 globals->root.sgot->contents + off
4428 /* This TLS symbol is global. We emit a
4429 relocation to fixup the tls offset at load
4432 ELFNN_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4435 (globals->root.sgot->output_section->vma
4436 + globals->root.sgot->output_offset + off
4439 loc = globals->root.srelgot->contents;
4440 loc += globals->root.srelgot->reloc_count++
4441 * RELOC_SIZE (globals);
4442 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4443 bfd_put_NN (output_bfd, (bfd_vma) 0,
4444 globals->root.sgot->contents + off
4450 bfd_put_NN (output_bfd, (bfd_vma) 1,
4451 globals->root.sgot->contents + off);
4452 bfd_put_NN (output_bfd,
4453 relocation - dtpoff_base (info),
4454 globals->root.sgot->contents + off
4458 symbol_got_offset_mark (input_bfd, h, r_symndx);
4462 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4463 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4464 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4466 bfd_boolean need_relocs = FALSE;
4471 off = symbol_got_offset (input_bfd, h, r_symndx);
4473 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4476 (info->shared || indx != 0) &&
4478 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4479 || h->root.type != bfd_link_hash_undefweak);
4481 BFD_ASSERT (globals->root.srelgot != NULL);
4485 Elf_Internal_Rela rela;
4488 rela.r_addend = relocation - dtpoff_base (info);
4492 rela.r_info = ELFNN_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4493 rela.r_offset = globals->root.sgot->output_section->vma +
4494 globals->root.sgot->output_offset + off;
4496 loc = globals->root.srelgot->contents;
4497 loc += globals->root.srelgot->reloc_count++
4498 * RELOC_SIZE (htab);
4500 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4502 bfd_put_NN (output_bfd, rela.r_addend,
4503 globals->root.sgot->contents + off);
4506 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
4507 globals->root.sgot->contents + off);
4509 symbol_got_offset_mark (input_bfd, h, r_symndx);
4513 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4514 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4515 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4516 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4517 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4518 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4519 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4520 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4523 case R_AARCH64_TLSDESC_ADR_PAGE21:
4524 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4525 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4526 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4528 bfd_boolean need_relocs = FALSE;
4529 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4530 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4532 need_relocs = (h == NULL
4533 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4534 || h->root.type != bfd_link_hash_undefweak);
4536 BFD_ASSERT (globals->root.srelgot != NULL);
4537 BFD_ASSERT (globals->root.sgot != NULL);
4542 Elf_Internal_Rela rela;
4543 rela.r_info = ELFNN_R_INFO (indx, R_AARCH64_TLSDESC);
4545 rela.r_offset = (globals->root.sgotplt->output_section->vma
4546 + globals->root.sgotplt->output_offset
4547 + off + globals->sgotplt_jump_table_size);
4550 rela.r_addend = relocation - dtpoff_base (info);
4552 /* Allocate the next available slot in the PLT reloc
4553 section to hold our R_AARCH64_TLSDESC, the next
4554 available slot is determined from reloc_count,
4555 which we step. But note, reloc_count was
4556 artifically moved down while allocating slots for
4557 real PLT relocs such that all of the PLT relocs
4558 will fit above the initial reloc_count and the
4559 extra stuff will fit below. */
4560 loc = globals->root.srelplt->contents;
4561 loc += globals->root.srelplt->reloc_count++
4562 * RELOC_SIZE (globals);
4564 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4566 bfd_put_NN (output_bfd, (bfd_vma) 0,
4567 globals->root.sgotplt->contents + off +
4568 globals->sgotplt_jump_table_size);
4569 bfd_put_NN (output_bfd, (bfd_vma) 0,
4570 globals->root.sgotplt->contents + off +
4571 globals->sgotplt_jump_table_size +
4575 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4584 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4585 because such sections are not SEC_ALLOC and thus ld.so will
4586 not process them. */
4587 if (unresolved_reloc
4588 && !((input_section->flags & SEC_DEBUGGING) != 0
4590 && _bfd_elf_section_offset (output_bfd, info, input_section,
4591 +rel->r_offset) != (bfd_vma) - 1)
4593 (*_bfd_error_handler)
4595 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4596 input_bfd, input_section, (long) rel->r_offset, howto->name,
4597 h->root.root.string);
4601 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4605 case bfd_reloc_overflow:
4606 /* If the overflowing reloc was to an undefined symbol,
4607 we have already printed one error message and there
4608 is no point complaining again. */
4610 h->root.type != bfd_link_hash_undefined)
4611 && (!((*info->callbacks->reloc_overflow)
4612 (info, (h ? &h->root : NULL), name, howto->name,
4613 (bfd_vma) 0, input_bfd, input_section,
4618 case bfd_reloc_undefined:
4619 if (!((*info->callbacks->undefined_symbol)
4620 (info, name, input_bfd, input_section,
4621 rel->r_offset, TRUE)))
4625 case bfd_reloc_outofrange:
4626 error_message = _("out of range");
4629 case bfd_reloc_notsupported:
4630 error_message = _("unsupported relocation");
4633 case bfd_reloc_dangerous:
4634 /* error_message should already be set. */
4638 error_message = _("unknown error");
4642 BFD_ASSERT (error_message != NULL);
4643 if (!((*info->callbacks->reloc_dangerous)
4644 (info, error_message, input_bfd, input_section,
4655 /* Set the right machine number. */
4658 elfNN_aarch64_object_p (bfd *abfd)
4661 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
4663 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4668 /* Function to keep AArch64 specific flags in the ELF header. */
4671 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
4673 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4678 elf_elfheader (abfd)->e_flags = flags;
4679 elf_flags_init (abfd) = TRUE;
4685 /* Copy backend specific data from one object module to another. */
4688 elfNN_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4692 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4695 in_flags = elf_elfheader (ibfd)->e_flags;
4697 elf_elfheader (obfd)->e_flags = in_flags;
4698 elf_flags_init (obfd) = TRUE;
4700 /* Also copy the EI_OSABI field. */
4701 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4702 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4704 /* Copy object attributes. */
4705 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4710 /* Merge backend specific data from an object file to the output
4711 object file when linking. */
4714 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4718 bfd_boolean flags_compatible = TRUE;
4721 /* Check if we have the same endianess. */
4722 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4725 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4728 /* The input BFD must have had its flags initialised. */
4729 /* The following seems bogus to me -- The flags are initialized in
4730 the assembler but I don't think an elf_flags_init field is
4731 written into the object. */
4732 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4734 in_flags = elf_elfheader (ibfd)->e_flags;
4735 out_flags = elf_elfheader (obfd)->e_flags;
4737 if (!elf_flags_init (obfd))
4739 /* If the input is the default architecture and had the default
4740 flags then do not bother setting the flags for the output
4741 architecture, instead allow future merges to do this. If no
4742 future merges ever set these flags then they will retain their
4743 uninitialised values, which surprise surprise, correspond
4744 to the default values. */
4745 if (bfd_get_arch_info (ibfd)->the_default
4746 && elf_elfheader (ibfd)->e_flags == 0)
4749 elf_flags_init (obfd) = TRUE;
4750 elf_elfheader (obfd)->e_flags = in_flags;
4752 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4753 && bfd_get_arch_info (obfd)->the_default)
4754 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4755 bfd_get_mach (ibfd));
4760 /* Identical flags must be compatible. */
4761 if (in_flags == out_flags)
4764 /* Check to see if the input BFD actually contains any sections. If
4765 not, its flags may not have been initialised either, but it
4766 cannot actually cause any incompatiblity. Do not short-circuit
4767 dynamic objects; their section list may be emptied by
4768 elf_link_add_object_symbols.
4770 Also check to see if there are no code sections in the input.
4771 In this case there is no need to check for code specific flags.
4772 XXX - do we need to worry about floating-point format compatability
4773 in data sections ? */
4774 if (!(ibfd->flags & DYNAMIC))
4776 bfd_boolean null_input_bfd = TRUE;
4777 bfd_boolean only_data_sections = TRUE;
4779 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4781 if ((bfd_get_section_flags (ibfd, sec)
4782 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4783 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4784 only_data_sections = FALSE;
4786 null_input_bfd = FALSE;
4790 if (null_input_bfd || only_data_sections)
4794 return flags_compatible;
4797 /* Display the flags field. */
4800 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4802 FILE *file = (FILE *) ptr;
4803 unsigned long flags;
4805 BFD_ASSERT (abfd != NULL && ptr != NULL);
4807 /* Print normal ELF private data. */
4808 _bfd_elf_print_private_bfd_data (abfd, ptr);
4810 flags = elf_elfheader (abfd)->e_flags;
4811 /* Ignore init flag - it may not be set, despite the flags field
4812 containing valid data. */
4814 /* xgettext:c-format */
4815 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4818 fprintf (file, _("<Unrecognised flag bits set>"));
4825 /* Update the got entry reference counts for the section being removed. */
4828 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
4829 struct bfd_link_info *info,
4831 const Elf_Internal_Rela * relocs)
4833 struct elf_aarch64_link_hash_table *htab;
4834 Elf_Internal_Shdr *symtab_hdr;
4835 struct elf_link_hash_entry **sym_hashes;
4836 struct elf_aarch64_local_symbol *locals;
4837 const Elf_Internal_Rela *rel, *relend;
4839 if (info->relocatable)
4842 htab = elf_aarch64_hash_table (info);
4847 elf_section_data (sec)->local_dynrel = NULL;
4849 symtab_hdr = &elf_symtab_hdr (abfd);
4850 sym_hashes = elf_sym_hashes (abfd);
4852 locals = elf_aarch64_locals (abfd);
4854 relend = relocs + sec->reloc_count;
4855 for (rel = relocs; rel < relend; rel++)
4857 unsigned long r_symndx;
4858 unsigned int r_type;
4859 struct elf_link_hash_entry *h = NULL;
4861 r_symndx = ELFNN_R_SYM (rel->r_info);
4863 if (r_symndx >= symtab_hdr->sh_info)
4865 struct elf_aarch64_link_hash_entry *eh;
4866 struct elf_dyn_relocs **pp;
4867 struct elf_dyn_relocs *p;
4869 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4870 while (h->root.type == bfd_link_hash_indirect
4871 || h->root.type == bfd_link_hash_warning)
4872 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4873 eh = (struct elf_aarch64_link_hash_entry *) h;
4875 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
4879 /* Everything must go for SEC. */
4887 Elf_Internal_Sym *isym;
4889 /* A local symbol. */
4890 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
4896 r_type = ELFNN_R_TYPE (rel->r_info);
4897 r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
4900 case R_AARCH64_LD64_GOT_LO12_NC:
4901 case R_AARCH64_GOT_LD_PREL19:
4902 case R_AARCH64_ADR_GOT_PAGE:
4903 case R_AARCH64_TLSGD_ADR_PAGE21:
4904 case R_AARCH64_TLSGD_ADD_LO12_NC:
4905 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4906 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4907 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4908 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4909 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4910 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4911 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4912 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4913 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4914 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4915 case R_AARCH64_TLSDESC_ADR_PAGE21:
4916 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4917 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4920 if (h->got.refcount > 0)
4921 h->got.refcount -= 1;
4923 else if (locals != NULL)
4925 if (locals[r_symndx].got_refcount > 0)
4926 locals[r_symndx].got_refcount -= 1;
4930 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4931 case R_AARCH64_ADR_PREL_PG_HI21:
4932 case R_AARCH64_ADR_PREL_LO21:
4933 if (h != NULL && info->executable)
4935 if (h->plt.refcount > 0)
4936 h->plt.refcount -= 1;
4940 case R_AARCH64_CALL26:
4941 case R_AARCH64_JUMP26:
4942 /* If this is a local symbol then we resolve it
4943 directly without creating a PLT entry. */
4947 if (h->plt.refcount > 0)
4948 h->plt.refcount -= 1;
4951 case R_AARCH64_ABS64:
4952 if (h != NULL && info->executable)
4954 if (h->plt.refcount > 0)
4955 h->plt.refcount -= 1;
4967 /* Adjust a symbol defined by a dynamic object and referenced by a
4968 regular object. The current definition is in some section of the
4969 dynamic object, but we're not including those sections. We have to
4970 change the definition to something the rest of the link can
4974 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4975 struct elf_link_hash_entry *h)
4977 struct elf_aarch64_link_hash_table *htab;
4980 /* If this is a function, put it in the procedure linkage table. We
4981 will fill in the contents of the procedure linkage table later,
4982 when we know the address of the .got section. */
4983 if (h->type == STT_FUNC || h->needs_plt)
4985 if (h->plt.refcount <= 0
4986 || SYMBOL_CALLS_LOCAL (info, h)
4987 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4988 && h->root.type == bfd_link_hash_undefweak))
4990 /* This case can occur if we saw a CALL26 reloc in
4991 an input file, but the symbol wasn't referred to
4992 by a dynamic object or all references were
4993 garbage collected. In which case we can end up
4995 h->plt.offset = (bfd_vma) - 1;
5002 /* It's possible that we incorrectly decided a .plt reloc was
5003 needed for an R_X86_64_PC32 reloc to a non-function sym in
5004 check_relocs. We can't decide accurately between function and
5005 non-function syms in check-relocs; Objects loaded later in
5006 the link may change h->type. So fix it now. */
5007 h->plt.offset = (bfd_vma) - 1;
5010 /* If this is a weak symbol, and there is a real definition, the
5011 processor independent code will have arranged for us to see the
5012 real definition first, and we can just use the same value. */
5013 if (h->u.weakdef != NULL)
5015 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5016 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5017 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5018 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5019 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5020 h->non_got_ref = h->u.weakdef->non_got_ref;
5024 /* If we are creating a shared library, we must presume that the
5025 only references to the symbol are via the global offset table.
5026 For such cases we need not do anything here; the relocations will
5027 be handled correctly by relocate_section. */
5031 /* If there are no references to this symbol that do not use the
5032 GOT, we don't need to generate a copy reloc. */
5033 if (!h->non_got_ref)
5036 /* If -z nocopyreloc was given, we won't generate them either. */
5037 if (info->nocopyreloc)
5043 /* We must allocate the symbol in our .dynbss section, which will
5044 become part of the .bss section of the executable. There will be
5045 an entry for this symbol in the .dynsym section. The dynamic
5046 object will contain position independent code, so all references
5047 from the dynamic object to this symbol will go through the global
5048 offset table. The dynamic linker will use the .dynsym entry to
5049 determine the address it must put in the global offset table, so
5050 both the dynamic object and the regular object will refer to the
5051 same memory location for the variable. */
5053 htab = elf_aarch64_hash_table (info);
5055 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5056 to copy the initial value out of the dynamic object and into the
5057 runtime process image. */
5058 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5060 htab->srelbss->size += RELOC_SIZE (htab);
5066 return _bfd_elf_adjust_dynamic_copy (h, s);
5071 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5073 struct elf_aarch64_local_symbol *locals;
5074 locals = elf_aarch64_locals (abfd);
5077 locals = (struct elf_aarch64_local_symbol *)
5078 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5081 elf_aarch64_locals (abfd) = locals;
5086 /* Look through the relocs for a section during the first phase. */
5089 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5090 asection *sec, const Elf_Internal_Rela *relocs)
5092 Elf_Internal_Shdr *symtab_hdr;
5093 struct elf_link_hash_entry **sym_hashes;
5094 const Elf_Internal_Rela *rel;
5095 const Elf_Internal_Rela *rel_end;
5098 struct elf_aarch64_link_hash_table *htab;
5100 if (info->relocatable)
5103 BFD_ASSERT (is_aarch64_elf (abfd));
5105 htab = elf_aarch64_hash_table (info);
5108 symtab_hdr = &elf_symtab_hdr (abfd);
5109 sym_hashes = elf_sym_hashes (abfd);
5111 rel_end = relocs + sec->reloc_count;
5112 for (rel = relocs; rel < rel_end; rel++)
5114 struct elf_link_hash_entry *h;
5115 unsigned long r_symndx;
5116 unsigned int r_type;
5118 r_symndx = ELFNN_R_SYM (rel->r_info);
5119 r_type = ELFNN_R_TYPE (rel->r_info);
5121 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5123 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5128 if (r_symndx < symtab_hdr->sh_info)
5132 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5133 while (h->root.type == bfd_link_hash_indirect
5134 || h->root.type == bfd_link_hash_warning)
5135 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5137 /* PR15323, ref flags aren't set for references in the same
5139 h->root.non_ir_ref = 1;
5142 /* Could be done earlier, if h were already available. */
5143 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5147 case R_AARCH64_ABS64:
5149 /* We don't need to handle relocs into sections not going into
5150 the "real" output. */
5151 if ((sec->flags & SEC_ALLOC) == 0)
5159 h->plt.refcount += 1;
5160 h->pointer_equality_needed = 1;
5163 /* No need to do anything if we're not creating a shared
5169 struct elf_dyn_relocs *p;
5170 struct elf_dyn_relocs **head;
5172 /* We must copy these reloc types into the output file.
5173 Create a reloc section in dynobj and make room for
5177 if (htab->root.dynobj == NULL)
5178 htab->root.dynobj = abfd;
5180 sreloc = _bfd_elf_make_dynamic_reloc_section
5181 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5187 /* If this is a global symbol, we count the number of
5188 relocations we need for this symbol. */
5191 struct elf_aarch64_link_hash_entry *eh;
5192 eh = (struct elf_aarch64_link_hash_entry *) h;
5193 head = &eh->dyn_relocs;
5197 /* Track dynamic relocs needed for local syms too.
5198 We really need local syms available to do this
5203 Elf_Internal_Sym *isym;
5205 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5210 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5214 /* Beware of type punned pointers vs strict aliasing
5216 vpp = &(elf_section_data (s)->local_dynrel);
5217 head = (struct elf_dyn_relocs **) vpp;
5221 if (p == NULL || p->sec != sec)
5223 bfd_size_type amt = sizeof *p;
5224 p = ((struct elf_dyn_relocs *)
5225 bfd_zalloc (htab->root.dynobj, amt));
5238 /* RR: We probably want to keep a consistency check that
5239 there are no dangling GOT_PAGE relocs. */
5240 case R_AARCH64_LD64_GOT_LO12_NC:
5241 case R_AARCH64_GOT_LD_PREL19:
5242 case R_AARCH64_ADR_GOT_PAGE:
5243 case R_AARCH64_TLSGD_ADR_PAGE21:
5244 case R_AARCH64_TLSGD_ADD_LO12_NC:
5245 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5246 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5247 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5248 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5249 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5250 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5251 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5252 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5253 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5254 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5255 case R_AARCH64_TLSDESC_ADR_PAGE21:
5256 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5257 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5260 unsigned old_got_type;
5262 got_type = aarch64_reloc_got_type (r_type);
5266 h->got.refcount += 1;
5267 old_got_type = elf_aarch64_hash_entry (h)->got_type;
5271 struct elf_aarch64_local_symbol *locals;
5273 if (!elfNN_aarch64_allocate_local_symbols
5274 (abfd, symtab_hdr->sh_info))
5277 locals = elf_aarch64_locals (abfd);
5278 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5279 locals[r_symndx].got_refcount += 1;
5280 old_got_type = locals[r_symndx].got_type;
5283 /* If a variable is accessed with both general dynamic TLS
5284 methods, two slots may be created. */
5285 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5286 got_type |= old_got_type;
5288 /* We will already have issued an error message if there
5289 is a TLS/non-TLS mismatch, based on the symbol type.
5290 So just combine any TLS types needed. */
5291 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5292 && got_type != GOT_NORMAL)
5293 got_type |= old_got_type;
5295 /* If the symbol is accessed by both IE and GD methods, we
5296 are able to relax. Turn off the GD flag, without
5297 messing up with any other kind of TLS types that may be
5299 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5300 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5302 if (old_got_type != got_type)
5305 elf_aarch64_hash_entry (h)->got_type = got_type;
5308 struct elf_aarch64_local_symbol *locals;
5309 locals = elf_aarch64_locals (abfd);
5310 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5311 locals[r_symndx].got_type = got_type;
5315 if (htab->root.sgot == NULL)
5317 if (htab->root.dynobj == NULL)
5318 htab->root.dynobj = abfd;
5319 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5325 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5326 case R_AARCH64_ADR_PREL_PG_HI21:
5327 case R_AARCH64_ADR_PREL_LO21:
5328 if (h != NULL && info->executable)
5330 /* If this reloc is in a read-only section, we might
5331 need a copy reloc. We can't check reliably at this
5332 stage whether the section is read-only, as input
5333 sections have not yet been mapped to output sections.
5334 Tentatively set the flag for now, and correct in
5335 adjust_dynamic_symbol. */
5337 h->plt.refcount += 1;
5338 h->pointer_equality_needed = 1;
5340 /* FIXME:: RR need to handle these in shared libraries
5341 and essentially bomb out as these being non-PIC
5342 relocations in shared libraries. */
5345 case R_AARCH64_CALL26:
5346 case R_AARCH64_JUMP26:
5347 /* If this is a local symbol then we resolve it
5348 directly without creating a PLT entry. */
5353 h->plt.refcount += 1;
5360 /* Treat mapping symbols as special target symbols. */
5363 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5366 return bfd_is_aarch64_special_symbol_name (sym->name,
5367 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5370 /* This is a copy of elf_find_function () from elf.c except that
5371 AArch64 mapping symbols are ignored when looking for function names. */
5374 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5378 const char **filename_ptr,
5379 const char **functionname_ptr)
5381 const char *filename = NULL;
5382 asymbol *func = NULL;
5383 bfd_vma low_func = 0;
5386 for (p = symbols; *p != NULL; p++)
5390 q = (elf_symbol_type *) * p;
5392 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5397 filename = bfd_asymbol_name (&q->symbol);
5401 /* Skip mapping symbols. */
5402 if ((q->symbol.flags & BSF_LOCAL)
5403 && (bfd_is_aarch64_special_symbol_name
5404 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5407 if (bfd_get_section (&q->symbol) == section
5408 && q->symbol.value >= low_func && q->symbol.value <= offset)
5410 func = (asymbol *) q;
5411 low_func = q->symbol.value;
5421 *filename_ptr = filename;
5422 if (functionname_ptr)
5423 *functionname_ptr = bfd_asymbol_name (func);
5429 /* Find the nearest line to a particular section and offset, for error
5430 reporting. This code is a duplicate of the code in elf.c, except
5431 that it uses aarch64_elf_find_function. */
5434 elfNN_aarch64_find_nearest_line (bfd *abfd,
5438 const char **filename_ptr,
5439 const char **functionname_ptr,
5440 unsigned int *line_ptr)
5442 bfd_boolean found = FALSE;
5444 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5445 toolchain uses it. */
5447 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5448 section, symbols, offset,
5449 filename_ptr, functionname_ptr,
5451 &elf_tdata (abfd)->dwarf2_find_line_info))
5453 if (!*functionname_ptr)
5454 aarch64_elf_find_function (abfd, section, symbols, offset,
5455 *filename_ptr ? NULL : filename_ptr,
5461 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5462 &found, filename_ptr,
5463 functionname_ptr, line_ptr,
5464 &elf_tdata (abfd)->line_info))
5467 if (found && (*functionname_ptr || *line_ptr))
5470 if (symbols == NULL)
5473 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5474 filename_ptr, functionname_ptr))
5482 elfNN_aarch64_find_inliner_info (bfd *abfd,
5483 const char **filename_ptr,
5484 const char **functionname_ptr,
5485 unsigned int *line_ptr)
5488 found = _bfd_dwarf2_find_inliner_info
5489 (abfd, filename_ptr,
5490 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5496 elfNN_aarch64_post_process_headers (bfd *abfd,
5497 struct bfd_link_info *link_info
5500 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5502 i_ehdrp = elf_elfheader (abfd);
5503 i_ehdrp->e_ident[EI_OSABI] = 0;
5504 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5507 static enum elf_reloc_type_class
5508 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5509 const asection *rel_sec ATTRIBUTE_UNUSED,
5510 const Elf_Internal_Rela *rela)
5512 switch ((int) ELFNN_R_TYPE (rela->r_info))
5514 case R_AARCH64_RELATIVE:
5515 return reloc_class_relative;
5516 case R_AARCH64_JUMP_SLOT:
5517 return reloc_class_plt;
5518 case R_AARCH64_COPY:
5519 return reloc_class_copy;
5521 return reloc_class_normal;
5525 /* Set the right machine number for an AArch64 ELF file. */
5528 elfNN_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5530 if (hdr->sh_type == SHT_NOTE)
5531 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5536 /* Handle an AArch64 specific section when reading an object file. This is
5537 called when bfd_section_from_shdr finds a section with an unknown
5541 elfNN_aarch64_section_from_shdr (bfd *abfd,
5542 Elf_Internal_Shdr *hdr,
5543 const char *name, int shindex)
5545 /* There ought to be a place to keep ELF backend specific flags, but
5546 at the moment there isn't one. We just keep track of the
5547 sections by their name, instead. Fortunately, the ABI gives
5548 names for all the AArch64 specific sections, so we will probably get
5550 switch (hdr->sh_type)
5552 case SHT_AARCH64_ATTRIBUTES:
5559 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5565 /* A structure used to record a list of sections, independently
5566 of the next and prev fields in the asection structure. */
5567 typedef struct section_list
5570 struct section_list *next;
5571 struct section_list *prev;
5575 /* Unfortunately we need to keep a list of sections for which
5576 an _aarch64_elf_section_data structure has been allocated. This
5577 is because it is possible for functions like elfNN_aarch64_write_section
5578 to be called on a section which has had an elf_data_structure
5579 allocated for it (and so the used_by_bfd field is valid) but
5580 for which the AArch64 extended version of this structure - the
5581 _aarch64_elf_section_data structure - has not been allocated. */
5582 static section_list *sections_with_aarch64_elf_section_data = NULL;
5585 record_section_with_aarch64_elf_section_data (asection *sec)
5587 struct section_list *entry;
5589 entry = bfd_malloc (sizeof (*entry));
5593 entry->next = sections_with_aarch64_elf_section_data;
5595 if (entry->next != NULL)
5596 entry->next->prev = entry;
5597 sections_with_aarch64_elf_section_data = entry;
5600 static struct section_list *
5601 find_aarch64_elf_section_entry (asection *sec)
5603 struct section_list *entry;
5604 static struct section_list *last_entry = NULL;
5606 /* This is a short cut for the typical case where the sections are added
5607 to the sections_with_aarch64_elf_section_data list in forward order and
5608 then looked up here in backwards order. This makes a real difference
5609 to the ld-srec/sec64k.exp linker test. */
5610 entry = sections_with_aarch64_elf_section_data;
5611 if (last_entry != NULL)
5613 if (last_entry->sec == sec)
5615 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5616 entry = last_entry->next;
5619 for (; entry; entry = entry->next)
5620 if (entry->sec == sec)
5624 /* Record the entry prior to this one - it is the entry we are
5625 most likely to want to locate next time. Also this way if we
5626 have been called from
5627 unrecord_section_with_aarch64_elf_section_data () we will not
5628 be caching a pointer that is about to be freed. */
5629 last_entry = entry->prev;
5635 unrecord_section_with_aarch64_elf_section_data (asection *sec)
5637 struct section_list *entry;
5639 entry = find_aarch64_elf_section_entry (sec);
5643 if (entry->prev != NULL)
5644 entry->prev->next = entry->next;
5645 if (entry->next != NULL)
5646 entry->next->prev = entry->prev;
5647 if (entry == sections_with_aarch64_elf_section_data)
5648 sections_with_aarch64_elf_section_data = entry->next;
5657 struct bfd_link_info *info;
5660 int (*func) (void *, const char *, Elf_Internal_Sym *,
5661 asection *, struct elf_link_hash_entry *);
5662 } output_arch_syminfo;
5664 enum map_symbol_type
5671 /* Output a single mapping symbol. */
5674 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
5675 enum map_symbol_type type, bfd_vma offset)
5677 static const char *names[2] = { "$x", "$d" };
5678 Elf_Internal_Sym sym;
5680 sym.st_value = (osi->sec->output_section->vma
5681 + osi->sec->output_offset + offset);
5684 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5685 sym.st_shndx = osi->sec_shndx;
5686 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5691 /* Output mapping symbols for PLT entries associated with H. */
5694 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5696 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5699 if (h->root.type == bfd_link_hash_indirect)
5702 if (h->root.type == bfd_link_hash_warning)
5703 /* When warning symbols are created, they **replace** the "real"
5704 entry in the hash table, thus we never get to see the real
5705 symbol in a hash traversal. So look at it now. */
5706 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5708 if (h->plt.offset == (bfd_vma) - 1)
5711 addr = h->plt.offset;
5714 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5721 /* Output a single local symbol for a generated stub. */
5724 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5725 bfd_vma offset, bfd_vma size)
5727 Elf_Internal_Sym sym;
5729 sym.st_value = (osi->sec->output_section->vma
5730 + osi->sec->output_offset + offset);
5733 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5734 sym.st_shndx = osi->sec_shndx;
5735 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5739 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5741 struct elf_aarch64_stub_hash_entry *stub_entry;
5745 output_arch_syminfo *osi;
5747 /* Massage our args to the form they really have. */
5748 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
5749 osi = (output_arch_syminfo *) in_arg;
5751 stub_sec = stub_entry->stub_sec;
5753 /* Ensure this stub is attached to the current section being
5755 if (stub_sec != osi->sec)
5758 addr = (bfd_vma) stub_entry->stub_offset;
5760 stub_name = stub_entry->output_name;
5762 switch (stub_entry->stub_type)
5764 case aarch64_stub_adrp_branch:
5765 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
5766 sizeof (aarch64_adrp_branch_stub)))
5768 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5771 case aarch64_stub_long_branch:
5772 if (!elfNN_aarch64_output_stub_sym
5773 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5775 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5777 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5787 /* Output mapping symbols for linker generated sections. */
5790 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
5791 struct bfd_link_info *info,
5793 int (*func) (void *, const char *,
5796 struct elf_link_hash_entry
5799 output_arch_syminfo osi;
5800 struct elf_aarch64_link_hash_table *htab;
5802 htab = elf_aarch64_hash_table (info);
5808 /* Long calls stubs. */
5809 if (htab->stub_bfd && htab->stub_bfd->sections)
5813 for (stub_sec = htab->stub_bfd->sections;
5814 stub_sec != NULL; stub_sec = stub_sec->next)
5816 /* Ignore non-stub sections. */
5817 if (!strstr (stub_sec->name, STUB_SUFFIX))
5822 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5823 (output_bfd, osi.sec->output_section);
5825 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5830 /* Finally, output mapping symbols for the PLT. */
5831 if (!htab->root.splt || htab->root.splt->size == 0)
5834 /* For now live without mapping symbols for the plt. */
5835 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5836 (output_bfd, htab->root.splt->output_section);
5837 osi.sec = htab->root.splt;
5839 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
5846 /* Allocate target specific section data. */
5849 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
5851 if (!sec->used_by_bfd)
5853 _aarch64_elf_section_data *sdata;
5854 bfd_size_type amt = sizeof (*sdata);
5856 sdata = bfd_zalloc (abfd, amt);
5859 sec->used_by_bfd = sdata;
5862 record_section_with_aarch64_elf_section_data (sec);
5864 return _bfd_elf_new_section_hook (abfd, sec);
5869 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5871 void *ignore ATTRIBUTE_UNUSED)
5873 unrecord_section_with_aarch64_elf_section_data (sec);
5877 elfNN_aarch64_close_and_cleanup (bfd *abfd)
5880 bfd_map_over_sections (abfd,
5881 unrecord_section_via_map_over_sections, NULL);
5883 return _bfd_elf_close_and_cleanup (abfd);
5887 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
5890 bfd_map_over_sections (abfd,
5891 unrecord_section_via_map_over_sections, NULL);
5893 return _bfd_free_cached_info (abfd);
5897 elfNN_aarch64_is_function_type (unsigned int type)
5899 return type == STT_FUNC;
5902 /* Create dynamic sections. This is different from the ARM backend in that
5903 the got, plt, gotplt and their relocation sections are all created in the
5904 standard part of the bfd elf backend. */
5907 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
5908 struct bfd_link_info *info)
5910 struct elf_aarch64_link_hash_table *htab;
5911 struct elf_link_hash_entry *h;
5913 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5916 htab = elf_aarch64_hash_table (info);
5917 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5919 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5921 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5924 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5925 dynobj's .got section. We don't do this in the linker script
5926 because we don't want to define the symbol if we are not creating
5927 a global offset table. */
5928 h = _bfd_elf_define_linkage_sym (dynobj, info,
5929 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5930 elf_hash_table (info)->hgot = h;
5938 /* Allocate space in .plt, .got and associated reloc sections for
5942 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5944 struct bfd_link_info *info;
5945 struct elf_aarch64_link_hash_table *htab;
5946 struct elf_aarch64_link_hash_entry *eh;
5947 struct elf_dyn_relocs *p;
5949 /* An example of a bfd_link_hash_indirect symbol is versioned
5950 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5951 -> __gxx_personality_v0(bfd_link_hash_defined)
5953 There is no need to process bfd_link_hash_indirect symbols here
5954 because we will also be presented with the concrete instance of
5955 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
5956 called to copy all relevant data from the generic to the concrete
5959 if (h->root.type == bfd_link_hash_indirect)
5962 if (h->root.type == bfd_link_hash_warning)
5963 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5965 info = (struct bfd_link_info *) inf;
5966 htab = elf_aarch64_hash_table (info);
5968 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
5970 /* Make sure this symbol is output as a dynamic symbol.
5971 Undefined weak syms won't yet be marked as dynamic. */
5972 if (h->dynindx == -1 && !h->forced_local)
5974 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5978 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5980 asection *s = htab->root.splt;
5982 /* If this is the first .plt entry, make room for the special
5985 s->size += htab->plt_header_size;
5987 h->plt.offset = s->size;
5989 /* If this symbol is not defined in a regular file, and we are
5990 not generating a shared library, then set the symbol to this
5991 location in the .plt. This is required to make function
5992 pointers compare as equal between the normal executable and
5993 the shared library. */
5994 if (!info->shared && !h->def_regular)
5996 h->root.u.def.section = s;
5997 h->root.u.def.value = h->plt.offset;
6000 /* Make room for this entry. For now we only create the
6001 small model PLT entries. We later need to find a way
6002 of relaxing into these from the large model PLT entries. */
6003 s->size += PLT_SMALL_ENTRY_SIZE;
6005 /* We also need to make an entry in the .got.plt section, which
6006 will be placed in the .got section by the linker script. */
6007 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6009 /* We also need to make an entry in the .rela.plt section. */
6010 htab->root.srelplt->size += RELOC_SIZE (htab);
6012 /* We need to ensure that all GOT entries that serve the PLT
6013 are consecutive with the special GOT slots [0] [1] and
6014 [2]. Any addtional relocations, such as
6015 R_AARCH64_TLSDESC, must be placed after the PLT related
6016 entries. We abuse the reloc_count such that during
6017 sizing we adjust reloc_count to indicate the number of
6018 PLT related reserved entries. In subsequent phases when
6019 filling in the contents of the reloc entries, PLT related
6020 entries are placed by computing their PLT index (0
6021 .. reloc_count). While other none PLT relocs are placed
6022 at the slot indicated by reloc_count and reloc_count is
6025 htab->root.srelplt->reloc_count++;
6029 h->plt.offset = (bfd_vma) - 1;
6035 h->plt.offset = (bfd_vma) - 1;
6039 eh = (struct elf_aarch64_link_hash_entry *) h;
6040 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6042 if (h->got.refcount > 0)
6045 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6047 h->got.offset = (bfd_vma) - 1;
6049 dyn = htab->root.dynamic_sections_created;
6051 /* Make sure this symbol is output as a dynamic symbol.
6052 Undefined weak syms won't yet be marked as dynamic. */
6053 if (dyn && h->dynindx == -1 && !h->forced_local)
6055 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6059 if (got_type == GOT_UNKNOWN)
6062 else if (got_type == GOT_NORMAL)
6064 h->got.offset = htab->root.sgot->size;
6065 htab->root.sgot->size += GOT_ENTRY_SIZE;
6066 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6067 || h->root.type != bfd_link_hash_undefweak)
6069 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6071 htab->root.srelgot->size += RELOC_SIZE (htab);
6077 if (got_type & GOT_TLSDESC_GD)
6079 eh->tlsdesc_got_jump_table_offset =
6080 (htab->root.sgotplt->size
6081 - aarch64_compute_jump_table_size (htab));
6082 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6083 h->got.offset = (bfd_vma) - 2;
6086 if (got_type & GOT_TLS_GD)
6088 h->got.offset = htab->root.sgot->size;
6089 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6092 if (got_type & GOT_TLS_IE)
6094 h->got.offset = htab->root.sgot->size;
6095 htab->root.sgot->size += GOT_ENTRY_SIZE;
6098 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6099 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6100 || h->root.type != bfd_link_hash_undefweak)
6103 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6105 if (got_type & GOT_TLSDESC_GD)
6107 htab->root.srelplt->size += RELOC_SIZE (htab);
6108 /* Note reloc_count not incremented here! We have
6109 already adjusted reloc_count for this relocation
6112 /* TLSDESC PLT is now needed, but not yet determined. */
6113 htab->tlsdesc_plt = (bfd_vma) - 1;
6116 if (got_type & GOT_TLS_GD)
6117 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6119 if (got_type & GOT_TLS_IE)
6120 htab->root.srelgot->size += RELOC_SIZE (htab);
6126 h->got.offset = (bfd_vma) - 1;
6129 if (eh->dyn_relocs == NULL)
6132 /* In the shared -Bsymbolic case, discard space allocated for
6133 dynamic pc-relative relocs against symbols which turn out to be
6134 defined in regular objects. For the normal shared case, discard
6135 space for pc-relative relocs that have become local due to symbol
6136 visibility changes. */
6140 /* Relocs that use pc_count are those that appear on a call
6141 insn, or certain REL relocs that can generated via assembly.
6142 We want calls to protected symbols to resolve directly to the
6143 function rather than going via the plt. If people want
6144 function pointer comparisons to work as expected then they
6145 should avoid writing weird assembly. */
6146 if (SYMBOL_CALLS_LOCAL (info, h))
6148 struct elf_dyn_relocs **pp;
6150 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6152 p->count -= p->pc_count;
6161 /* Also discard relocs on undefined weak syms with non-default
6163 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6165 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6166 eh->dyn_relocs = NULL;
6168 /* Make sure undefined weak symbols are output as a dynamic
6170 else if (h->dynindx == -1
6172 && !bfd_elf_link_record_dynamic_symbol (info, h))
6177 else if (ELIMINATE_COPY_RELOCS)
6179 /* For the non-shared case, discard space for relocs against
6180 symbols which turn out to need copy relocs or are not
6186 || (htab->root.dynamic_sections_created
6187 && (h->root.type == bfd_link_hash_undefweak
6188 || h->root.type == bfd_link_hash_undefined))))
6190 /* Make sure this symbol is output as a dynamic symbol.
6191 Undefined weak syms won't yet be marked as dynamic. */
6192 if (h->dynindx == -1
6194 && !bfd_elf_link_record_dynamic_symbol (info, h))
6197 /* If that succeeded, we know we'll be keeping all the
6199 if (h->dynindx != -1)
6203 eh->dyn_relocs = NULL;
6208 /* Finally, allocate space. */
6209 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6213 sreloc = elf_section_data (p->sec)->sreloc;
6215 BFD_ASSERT (sreloc != NULL);
6217 sreloc->size += p->count * RELOC_SIZE (htab);
6224 /* This is the most important function of all . Innocuosly named
6227 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6228 struct bfd_link_info *info)
6230 struct elf_aarch64_link_hash_table *htab;
6236 htab = elf_aarch64_hash_table ((info));
6237 dynobj = htab->root.dynobj;
6239 BFD_ASSERT (dynobj != NULL);
6241 if (htab->root.dynamic_sections_created)
6243 if (info->executable)
6245 s = bfd_get_linker_section (dynobj, ".interp");
6248 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6249 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6253 /* Set up .got offsets for local syms, and space for local dynamic
6255 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6257 struct elf_aarch64_local_symbol *locals = NULL;
6258 Elf_Internal_Shdr *symtab_hdr;
6262 if (!is_aarch64_elf (ibfd))
6265 for (s = ibfd->sections; s != NULL; s = s->next)
6267 struct elf_dyn_relocs *p;
6269 for (p = (struct elf_dyn_relocs *)
6270 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6272 if (!bfd_is_abs_section (p->sec)
6273 && bfd_is_abs_section (p->sec->output_section))
6275 /* Input section has been discarded, either because
6276 it is a copy of a linkonce section or due to
6277 linker script /DISCARD/, so we'll be discarding
6280 else if (p->count != 0)
6282 srel = elf_section_data (p->sec)->sreloc;
6283 srel->size += p->count * RELOC_SIZE (htab);
6284 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6285 info->flags |= DF_TEXTREL;
6290 locals = elf_aarch64_locals (ibfd);
6294 symtab_hdr = &elf_symtab_hdr (ibfd);
6295 srel = htab->root.srelgot;
6296 for (i = 0; i < symtab_hdr->sh_info; i++)
6298 locals[i].got_offset = (bfd_vma) - 1;
6299 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6300 if (locals[i].got_refcount > 0)
6302 unsigned got_type = locals[i].got_type;
6303 if (got_type & GOT_TLSDESC_GD)
6305 locals[i].tlsdesc_got_jump_table_offset =
6306 (htab->root.sgotplt->size
6307 - aarch64_compute_jump_table_size (htab));
6308 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6309 locals[i].got_offset = (bfd_vma) - 2;
6312 if (got_type & GOT_TLS_GD)
6314 locals[i].got_offset = htab->root.sgot->size;
6315 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6318 if (got_type & GOT_TLS_IE)
6320 locals[i].got_offset = htab->root.sgot->size;
6321 htab->root.sgot->size += GOT_ENTRY_SIZE;
6324 if (got_type == GOT_UNKNOWN)
6328 if (got_type == GOT_NORMAL)
6334 if (got_type & GOT_TLSDESC_GD)
6336 htab->root.srelplt->size += RELOC_SIZE (htab);
6337 /* Note RELOC_COUNT not incremented here! */
6338 htab->tlsdesc_plt = (bfd_vma) - 1;
6341 if (got_type & GOT_TLS_GD)
6342 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6344 if (got_type & GOT_TLS_IE)
6345 htab->root.srelgot->size += RELOC_SIZE (htab);
6350 locals[i].got_refcount = (bfd_vma) - 1;
6356 /* Allocate global sym .plt and .got entries, and space for global
6357 sym dynamic relocs. */
6358 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
6362 /* For every jump slot reserved in the sgotplt, reloc_count is
6363 incremented. However, when we reserve space for TLS descriptors,
6364 it's not incremented, so in order to compute the space reserved
6365 for them, it suffices to multiply the reloc count by the jump
6368 if (htab->root.srelplt)
6369 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6371 if (htab->tlsdesc_plt)
6373 if (htab->root.splt->size == 0)
6374 htab->root.splt->size += PLT_ENTRY_SIZE;
6376 htab->tlsdesc_plt = htab->root.splt->size;
6377 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6379 /* If we're not using lazy TLS relocations, don't generate the
6380 GOT entry required. */
6381 if (!(info->flags & DF_BIND_NOW))
6383 htab->dt_tlsdesc_got = htab->root.sgot->size;
6384 htab->root.sgot->size += GOT_ENTRY_SIZE;
6388 /* We now have determined the sizes of the various dynamic sections.
6389 Allocate memory for them. */
6391 for (s = dynobj->sections; s != NULL; s = s->next)
6393 if ((s->flags & SEC_LINKER_CREATED) == 0)
6396 if (s == htab->root.splt
6397 || s == htab->root.sgot
6398 || s == htab->root.sgotplt
6399 || s == htab->root.iplt
6400 || s == htab->root.igotplt || s == htab->sdynbss)
6402 /* Strip this section if we don't need it; see the
6405 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6407 if (s->size != 0 && s != htab->root.srelplt)
6410 /* We use the reloc_count field as a counter if we need
6411 to copy relocs into the output file. */
6412 if (s != htab->root.srelplt)
6417 /* It's not one of our sections, so don't allocate space. */
6423 /* If we don't need this section, strip it from the
6424 output file. This is mostly to handle .rela.bss and
6425 .rela.plt. We must create both sections in
6426 create_dynamic_sections, because they must be created
6427 before the linker maps input sections to output
6428 sections. The linker does that before
6429 adjust_dynamic_symbol is called, and it is that
6430 function which decides whether anything needs to go
6431 into these sections. */
6433 s->flags |= SEC_EXCLUDE;
6437 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6440 /* Allocate memory for the section contents. We use bfd_zalloc
6441 here in case unused entries are not reclaimed before the
6442 section's contents are written out. This should not happen,
6443 but this way if it does, we get a R_AARCH64_NONE reloc instead
6445 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6446 if (s->contents == NULL)
6450 if (htab->root.dynamic_sections_created)
6452 /* Add some entries to the .dynamic section. We fill in the
6453 values later, in elfNN_aarch64_finish_dynamic_sections, but we
6454 must add the entries now so that we get the correct size for
6455 the .dynamic section. The DT_DEBUG entry is filled in by the
6456 dynamic linker and used by the debugger. */
6457 #define add_dynamic_entry(TAG, VAL) \
6458 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6460 if (info->executable)
6462 if (!add_dynamic_entry (DT_DEBUG, 0))
6466 if (htab->root.splt->size != 0)
6468 if (!add_dynamic_entry (DT_PLTGOT, 0)
6469 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6470 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6471 || !add_dynamic_entry (DT_JMPREL, 0))
6474 if (htab->tlsdesc_plt
6475 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6476 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6482 if (!add_dynamic_entry (DT_RELA, 0)
6483 || !add_dynamic_entry (DT_RELASZ, 0)
6484 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6487 /* If any dynamic relocs apply to a read-only section,
6488 then we need a DT_TEXTREL entry. */
6489 if ((info->flags & DF_TEXTREL) != 0)
6491 if (!add_dynamic_entry (DT_TEXTREL, 0))
6496 #undef add_dynamic_entry
6504 elf64_aarch64_update_plt_entry (bfd *output_bfd,
6505 unsigned int r_type,
6506 bfd_byte *plt_entry, bfd_vma value)
6508 reloc_howto_type *howto;
6509 howto = elfNN_aarch64_howto_from_type (r_type);
6510 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6514 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6515 struct elf_aarch64_link_hash_table
6516 *htab, bfd *output_bfd)
6518 bfd_byte *plt_entry;
6521 bfd_vma gotplt_entry_address;
6522 bfd_vma plt_entry_address;
6523 Elf_Internal_Rela rela;
6526 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6528 /* Offset in the GOT is PLT index plus got GOT headers(3)
6529 times GOT_ENTRY_SIZE. */
6530 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6531 plt_entry = htab->root.splt->contents + h->plt.offset;
6532 plt_entry_address = htab->root.splt->output_section->vma
6533 + htab->root.splt->output_section->output_offset + h->plt.offset;
6534 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6535 htab->root.sgotplt->output_offset + got_offset;
6537 /* Copy in the boiler-plate for the PLTn entry. */
6538 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6540 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6541 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6542 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6544 PG (gotplt_entry_address) -
6545 PG (plt_entry_address));
6547 /* Fill in the lo12 bits for the load from the pltgot. */
6548 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6550 PG_OFFSET (gotplt_entry_address));
6552 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6553 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6555 PG_OFFSET (gotplt_entry_address));
6557 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6558 bfd_put_NN (output_bfd,
6559 (htab->root.splt->output_section->vma
6560 + htab->root.splt->output_offset),
6561 htab->root.sgotplt->contents + got_offset);
6563 /* Fill in the entry in the .rela.plt section. */
6564 rela.r_offset = gotplt_entry_address;
6565 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
6568 /* Compute the relocation entry to used based on PLT index and do
6569 not adjust reloc_count. The reloc_count has already been adjusted
6570 to account for this entry. */
6571 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6572 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6575 /* Size sections even though they're not dynamic. We use it to setup
6576 _TLS_MODULE_BASE_, if needed. */
6579 elfNN_aarch64_always_size_sections (bfd *output_bfd,
6580 struct bfd_link_info *info)
6584 if (info->relocatable)
6587 tls_sec = elf_hash_table (info)->tls_sec;
6591 struct elf_link_hash_entry *tlsbase;
6593 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6594 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6598 struct bfd_link_hash_entry *h = NULL;
6599 const struct elf_backend_data *bed =
6600 get_elf_backend_data (output_bfd);
6602 if (!(_bfd_generic_link_add_one_symbol
6603 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6604 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6607 tlsbase->type = STT_TLS;
6608 tlsbase = (struct elf_link_hash_entry *) h;
6609 tlsbase->def_regular = 1;
6610 tlsbase->other = STV_HIDDEN;
6611 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6618 /* Finish up dynamic symbol handling. We set the contents of various
6619 dynamic sections here. */
6621 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6622 struct bfd_link_info *info,
6623 struct elf_link_hash_entry *h,
6624 Elf_Internal_Sym *sym)
6626 struct elf_aarch64_link_hash_table *htab;
6627 htab = elf_aarch64_hash_table (info);
6629 if (h->plt.offset != (bfd_vma) - 1)
6631 /* This symbol has an entry in the procedure linkage table. Set
6634 if (h->dynindx == -1
6635 || htab->root.splt == NULL
6636 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6639 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6640 if (!h->def_regular)
6642 /* Mark the symbol as undefined, rather than as defined in
6643 the .plt section. Leave the value alone. This is a clue
6644 for the dynamic linker, to make function pointer
6645 comparisons work between an application and shared
6647 sym->st_shndx = SHN_UNDEF;
6651 if (h->got.offset != (bfd_vma) - 1
6652 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6654 Elf_Internal_Rela rela;
6657 /* This symbol has an entry in the global offset table. Set it
6659 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6662 rela.r_offset = (htab->root.sgot->output_section->vma
6663 + htab->root.sgot->output_offset
6664 + (h->got.offset & ~(bfd_vma) 1));
6666 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6668 if (!h->def_regular)
6671 BFD_ASSERT ((h->got.offset & 1) != 0);
6672 rela.r_info = ELFNN_R_INFO (0, R_AARCH64_RELATIVE);
6673 rela.r_addend = (h->root.u.def.value
6674 + h->root.u.def.section->output_section->vma
6675 + h->root.u.def.section->output_offset);
6679 BFD_ASSERT ((h->got.offset & 1) == 0);
6680 bfd_put_NN (output_bfd, (bfd_vma) 0,
6681 htab->root.sgot->contents + h->got.offset);
6682 rela.r_info = ELFNN_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6686 loc = htab->root.srelgot->contents;
6687 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6688 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6693 Elf_Internal_Rela rela;
6696 /* This symbol needs a copy reloc. Set it up. */
6698 if (h->dynindx == -1
6699 || (h->root.type != bfd_link_hash_defined
6700 && h->root.type != bfd_link_hash_defweak)
6701 || htab->srelbss == NULL)
6704 rela.r_offset = (h->root.u.def.value
6705 + h->root.u.def.section->output_section->vma
6706 + h->root.u.def.section->output_offset);
6707 rela.r_info = ELFNN_R_INFO (h->dynindx, R_AARCH64_COPY);
6709 loc = htab->srelbss->contents;
6710 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6711 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
6714 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6715 be NULL for local symbols. */
6717 && (h == elf_hash_table (info)->hdynamic
6718 || h == elf_hash_table (info)->hgot))
6719 sym->st_shndx = SHN_ABS;
6725 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6726 struct elf_aarch64_link_hash_table
6729 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6730 small and large plts and at the minute just generates
6733 /* PLT0 of the small PLT looks like this in ELF64 -
6734 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6735 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6736 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6738 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6739 // GOTPLT entry for this.
6741 PLT0 will be slightly different in ELF32 due to different got entry
6744 bfd_vma plt_got_base;
6748 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
6750 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6753 plt_got_base = (htab->root.sgotplt->output_section->vma
6754 + htab->root.sgotplt->output_offset);
6756 plt_base = htab->root.splt->output_section->vma +
6757 htab->root.splt->output_section->output_offset;
6759 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6760 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6761 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6762 htab->root.splt->contents + 4,
6763 PG (plt_got_base + 16) - PG (plt_base + 4));
6765 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6766 htab->root.splt->contents + 8,
6767 PG_OFFSET (plt_got_base + 16));
6769 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6770 htab->root.splt->contents + 12,
6771 PG_OFFSET (plt_got_base + 16));
6775 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
6776 struct bfd_link_info *info)
6778 struct elf_aarch64_link_hash_table *htab;
6782 htab = elf_aarch64_hash_table (info);
6783 dynobj = htab->root.dynobj;
6784 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6786 if (htab->root.dynamic_sections_created)
6788 ElfNN_External_Dyn *dyncon, *dynconend;
6790 if (sdyn == NULL || htab->root.sgot == NULL)
6793 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
6794 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
6795 for (; dyncon < dynconend; dyncon++)
6797 Elf_Internal_Dyn dyn;
6800 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
6808 s = htab->root.sgotplt;
6809 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6813 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6817 s = htab->root.srelplt->output_section;
6818 dyn.d_un.d_val = s->size;
6822 /* The procedure linkage table relocs (DT_JMPREL) should
6823 not be included in the overall relocs (DT_RELA).
6824 Therefore, we override the DT_RELASZ entry here to
6825 make it not include the JMPREL relocs. Since the
6826 linker script arranges for .rela.plt to follow all
6827 other relocation sections, we don't have to worry
6828 about changing the DT_RELA entry. */
6829 if (htab->root.srelplt != NULL)
6831 s = htab->root.srelplt->output_section;
6832 dyn.d_un.d_val -= s->size;
6836 case DT_TLSDESC_PLT:
6837 s = htab->root.splt;
6838 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6839 + htab->tlsdesc_plt;
6842 case DT_TLSDESC_GOT:
6843 s = htab->root.sgot;
6844 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6845 + htab->dt_tlsdesc_got;
6849 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
6854 /* Fill in the special first entry in the procedure linkage table. */
6855 if (htab->root.splt && htab->root.splt->size > 0)
6857 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
6859 elf_section_data (htab->root.splt->output_section)->
6860 this_hdr.sh_entsize = htab->plt_entry_size;
6863 if (htab->tlsdesc_plt)
6865 bfd_put_NN (output_bfd, (bfd_vma) 0,
6866 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6868 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6869 elfNN_aarch64_tlsdesc_small_plt_entry,
6870 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
6873 bfd_vma adrp1_addr =
6874 htab->root.splt->output_section->vma
6875 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6877 bfd_vma adrp2_addr =
6878 htab->root.splt->output_section->vma
6879 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6882 htab->root.sgot->output_section->vma
6883 + htab->root.sgot->output_offset;
6885 bfd_vma pltgot_addr =
6886 htab->root.sgotplt->output_section->vma
6887 + htab->root.sgotplt->output_offset;
6889 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6892 /* adrp x2, DT_TLSDESC_GOT */
6893 opcode = bfd_get_32 (output_bfd,
6894 htab->root.splt->contents
6895 + htab->tlsdesc_plt + 4);
6896 opcode = reencode_adr_imm
6897 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6898 bfd_put_32 (output_bfd, opcode,
6899 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6902 opcode = bfd_get_32 (output_bfd,
6903 htab->root.splt->contents
6904 + htab->tlsdesc_plt + 8);
6905 opcode = reencode_adr_imm
6906 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6907 bfd_put_32 (output_bfd, opcode,
6908 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6910 /* ldr x2, [x2, #0] */
6911 opcode = bfd_get_32 (output_bfd,
6912 htab->root.splt->contents
6913 + htab->tlsdesc_plt + 12);
6914 opcode = reencode_ldst_pos_imm (opcode,
6915 PG_OFFSET (dt_tlsdesc_got) >> 3);
6916 bfd_put_32 (output_bfd, opcode,
6917 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6920 opcode = bfd_get_32 (output_bfd,
6921 htab->root.splt->contents
6922 + htab->tlsdesc_plt + 16);
6923 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6924 bfd_put_32 (output_bfd, opcode,
6925 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6930 if (htab->root.sgotplt)
6932 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6934 (*_bfd_error_handler)
6935 (_("discarded output section: `%A'"), htab->root.sgotplt);
6939 /* Fill in the first three entries in the global offset table. */
6940 if (htab->root.sgotplt->size > 0)
6942 /* Set the first entry in the global offset table to the address of
6943 the dynamic section. */
6945 bfd_put_NN (output_bfd, (bfd_vma) 0,
6946 htab->root.sgotplt->contents);
6948 bfd_put_NN (output_bfd,
6949 sdyn->output_section->vma + sdyn->output_offset,
6950 htab->root.sgotplt->contents);
6951 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6952 bfd_put_NN (output_bfd,
6954 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6955 bfd_put_NN (output_bfd,
6957 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6960 elf_section_data (htab->root.sgotplt->output_section)->
6961 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6964 if (htab->root.sgot && htab->root.sgot->size > 0)
6965 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6971 /* Return address for Ith PLT stub in section PLT, for relocation REL
6972 or (bfd_vma) -1 if it should not be included. */
6975 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6976 const arelent *rel ATTRIBUTE_UNUSED)
6978 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6982 /* We use this so we can override certain functions
6983 (though currently we don't). */
6985 const struct elf_size_info elfNN_aarch64_size_info =
6987 sizeof (ElfNN_External_Ehdr),
6988 sizeof (ElfNN_External_Phdr),
6989 sizeof (ElfNN_External_Shdr),
6990 sizeof (ElfNN_External_Rel),
6991 sizeof (ElfNN_External_Rela),
6992 sizeof (ElfNN_External_Sym),
6993 sizeof (ElfNN_External_Dyn),
6994 sizeof (Elf_External_Note),
6995 4, /* Hash table entry size. */
6996 1, /* Internal relocs per external relocs. */
6997 ARCH_SIZE, /* Arch size. */
6998 LOG_FILE_ALIGN, /* Log_file_align. */
6999 ELFCLASSNN, EV_CURRENT,
7000 bfd_elfNN_write_out_phdrs,
7001 bfd_elfNN_write_shdrs_and_ehdr,
7002 bfd_elfNN_checksum_contents,
7003 bfd_elfNN_write_relocs,
7004 bfd_elfNN_swap_symbol_in,
7005 bfd_elfNN_swap_symbol_out,
7006 bfd_elfNN_slurp_reloc_table,
7007 bfd_elfNN_slurp_symbol_table,
7008 bfd_elfNN_swap_dyn_in,
7009 bfd_elfNN_swap_dyn_out,
7010 bfd_elfNN_swap_reloc_in,
7011 bfd_elfNN_swap_reloc_out,
7012 bfd_elfNN_swap_reloca_in,
7013 bfd_elfNN_swap_reloca_out
7016 #define ELF_ARCH bfd_arch_aarch64
7017 #define ELF_MACHINE_CODE EM_AARCH64
7018 #define ELF_MAXPAGESIZE 0x10000
7019 #define ELF_MINPAGESIZE 0x1000
7020 #define ELF_COMMONPAGESIZE 0x1000
7022 #define bfd_elfNN_close_and_cleanup \
7023 elfNN_aarch64_close_and_cleanup
7025 #define bfd_elfNN_bfd_copy_private_bfd_data \
7026 elfNN_aarch64_copy_private_bfd_data
7028 #define bfd_elfNN_bfd_free_cached_info \
7029 elfNN_aarch64_bfd_free_cached_info
7031 #define bfd_elfNN_bfd_is_target_special_symbol \
7032 elfNN_aarch64_is_target_special_symbol
7034 #define bfd_elfNN_bfd_link_hash_table_create \
7035 elfNN_aarch64_link_hash_table_create
7037 #define bfd_elfNN_bfd_link_hash_table_free \
7038 elfNN_aarch64_hash_table_free
7040 #define bfd_elfNN_bfd_merge_private_bfd_data \
7041 elfNN_aarch64_merge_private_bfd_data
7043 #define bfd_elfNN_bfd_print_private_bfd_data \
7044 elfNN_aarch64_print_private_bfd_data
7046 #define bfd_elfNN_bfd_reloc_type_lookup \
7047 elfNN_aarch64_reloc_type_lookup
7049 #define bfd_elfNN_bfd_reloc_name_lookup \
7050 elfNN_aarch64_reloc_name_lookup
7052 #define bfd_elfNN_bfd_set_private_flags \
7053 elfNN_aarch64_set_private_flags
7055 #define bfd_elfNN_find_inliner_info \
7056 elfNN_aarch64_find_inliner_info
7058 #define bfd_elfNN_find_nearest_line \
7059 elfNN_aarch64_find_nearest_line
7061 #define bfd_elfNN_mkobject \
7062 elfNN_aarch64_mkobject
7064 #define bfd_elfNN_new_section_hook \
7065 elfNN_aarch64_new_section_hook
7067 #define elf_backend_adjust_dynamic_symbol \
7068 elfNN_aarch64_adjust_dynamic_symbol
7070 #define elf_backend_always_size_sections \
7071 elfNN_aarch64_always_size_sections
7073 #define elf_backend_check_relocs \
7074 elfNN_aarch64_check_relocs
7076 #define elf_backend_copy_indirect_symbol \
7077 elfNN_aarch64_copy_indirect_symbol
7079 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7080 to them in our hash. */
7081 #define elf_backend_create_dynamic_sections \
7082 elfNN_aarch64_create_dynamic_sections
7084 #define elf_backend_init_index_section \
7085 _bfd_elf_init_2_index_sections
7087 #define elf_backend_is_function_type \
7088 elfNN_aarch64_is_function_type
7090 #define elf_backend_finish_dynamic_sections \
7091 elfNN_aarch64_finish_dynamic_sections
7093 #define elf_backend_finish_dynamic_symbol \
7094 elfNN_aarch64_finish_dynamic_symbol
7096 #define elf_backend_gc_sweep_hook \
7097 elfNN_aarch64_gc_sweep_hook
7099 #define elf_backend_object_p \
7100 elfNN_aarch64_object_p
7102 #define elf_backend_output_arch_local_syms \
7103 elfNN_aarch64_output_arch_local_syms
7105 #define elf_backend_plt_sym_val \
7106 elfNN_aarch64_plt_sym_val
7108 #define elf_backend_post_process_headers \
7109 elfNN_aarch64_post_process_headers
7111 #define elf_backend_relocate_section \
7112 elfNN_aarch64_relocate_section
7114 #define elf_backend_reloc_type_class \
7115 elfNN_aarch64_reloc_type_class
7117 #define elf_backend_section_flags \
7118 elfNN_aarch64_section_flags
7120 #define elf_backend_section_from_shdr \
7121 elfNN_aarch64_section_from_shdr
7123 #define elf_backend_size_dynamic_sections \
7124 elfNN_aarch64_size_dynamic_sections
7126 #define elf_backend_size_info \
7127 elfNN_aarch64_size_info
7129 #define elf_backend_can_refcount 1
7130 #define elf_backend_can_gc_sections 1
7131 #define elf_backend_plt_readonly 1
7132 #define elf_backend_want_got_plt 1
7133 #define elf_backend_want_plt_sym 0
7134 #define elf_backend_may_use_rel_p 0
7135 #define elf_backend_may_use_rela_p 1
7136 #define elf_backend_default_use_rela_p 1
7137 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7138 #define elf_backend_default_execstack 0
7140 #undef elf_backend_obj_attrs_section
7141 #define elf_backend_obj_attrs_section ".ARM.attributes"
7143 #include "elfNN-target.h"