1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
189 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
191 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
192 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
205 #define ELIMINATE_COPY_RELOCS 0
207 /* Return size of a relocation entry. HTAB is the bfd's
208 elf_aarch64_link_hash_entry. */
209 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
211 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
212 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
213 #define PLT_ENTRY_SIZE (32)
214 #define PLT_SMALL_ENTRY_SIZE (16)
215 #define PLT_TLSDESC_ENTRY_SIZE (32)
217 /* Encoding of the nop instruction */
218 #define INSN_NOP 0xd503201f
220 #define aarch64_compute_jump_table_size(htab) \
221 (((htab)->root.srelplt == NULL) ? 0 \
222 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
224 /* The first entry in a procedure linkage table looks like this
225 if the distance between the PLTGOT and the PLT is < 4GB use
226 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
227 in x16 and needs to work out PLTGOT[1] by using an address of
228 [x16,#-GOT_ENTRY_SIZE]. */
229 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
231 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
232 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
234 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
235 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
237 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
238 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
240 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 0x1f, 0x20, 0x03, 0xd5, /* nop */
246 /* Per function entry in a procedure linkage table looks like this
247 if the distance between the PLTGOT and the PLT is < 4GB use
248 these PLT entries. */
249 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
251 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
253 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
254 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
256 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
257 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
259 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
262 static const bfd_byte
263 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
265 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
266 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
267 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
269 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
270 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
272 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
273 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
275 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 0x1f, 0x20, 0x03, 0xd5, /* nop */
280 #define elf_info_to_howto elfNN_aarch64_info_to_howto
281 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
283 #define AARCH64_ELF_ABI_VERSION 0
285 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
286 #define ALL_ONES (~ (bfd_vma) 0)
288 /* Indexed by the bfd interal reloc enumerators.
289 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
292 static reloc_howto_type elfNN_aarch64_howto_table[] =
296 /* Basic data relocations. */
299 HOWTO (R_AARCH64_NULL, /* type */
301 3, /* size (0 = byte, 1 = short, 2 = long) */
303 FALSE, /* pc_relative */
305 complain_overflow_dont, /* complain_on_overflow */
306 bfd_elf_generic_reloc, /* special_function */
307 "R_AARCH64_NULL", /* name */
308 FALSE, /* partial_inplace */
311 FALSE), /* pcrel_offset */
313 HOWTO (R_AARCH64_NONE, /* type */
315 3, /* size (0 = byte, 1 = short, 2 = long) */
317 FALSE, /* pc_relative */
319 complain_overflow_dont, /* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_AARCH64_NONE", /* name */
322 FALSE, /* partial_inplace */
325 FALSE), /* pcrel_offset */
329 HOWTO64 (AARCH64_R (ABS64), /* type */
331 4, /* size (4 = long long) */
333 FALSE, /* pc_relative */
335 complain_overflow_unsigned, /* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 AARCH64_R_STR (ABS64), /* name */
338 FALSE, /* partial_inplace */
339 ALL_ONES, /* src_mask */
340 ALL_ONES, /* dst_mask */
341 FALSE), /* pcrel_offset */
344 HOWTO (AARCH64_R (ABS32), /* type */
346 2, /* size (0 = byte, 1 = short, 2 = long) */
348 FALSE, /* pc_relative */
350 complain_overflow_unsigned, /* complain_on_overflow */
351 bfd_elf_generic_reloc, /* special_function */
352 AARCH64_R_STR (ABS32), /* name */
353 FALSE, /* partial_inplace */
354 0xffffffff, /* src_mask */
355 0xffffffff, /* dst_mask */
356 FALSE), /* pcrel_offset */
359 HOWTO (AARCH64_R (ABS16), /* type */
361 1, /* size (0 = byte, 1 = short, 2 = long) */
363 FALSE, /* pc_relative */
365 complain_overflow_unsigned, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 AARCH64_R_STR (ABS16), /* name */
368 FALSE, /* partial_inplace */
369 0xffff, /* src_mask */
370 0xffff, /* dst_mask */
371 FALSE), /* pcrel_offset */
373 /* .xword: (S+A-P) */
374 HOWTO64 (AARCH64_R (PREL64), /* type */
376 4, /* size (4 = long long) */
378 TRUE, /* pc_relative */
380 complain_overflow_signed, /* complain_on_overflow */
381 bfd_elf_generic_reloc, /* special_function */
382 AARCH64_R_STR (PREL64), /* name */
383 FALSE, /* partial_inplace */
384 ALL_ONES, /* src_mask */
385 ALL_ONES, /* dst_mask */
386 TRUE), /* pcrel_offset */
389 HOWTO (AARCH64_R (PREL32), /* type */
391 2, /* size (0 = byte, 1 = short, 2 = long) */
393 TRUE, /* pc_relative */
395 complain_overflow_signed, /* complain_on_overflow */
396 bfd_elf_generic_reloc, /* special_function */
397 AARCH64_R_STR (PREL32), /* name */
398 FALSE, /* partial_inplace */
399 0xffffffff, /* src_mask */
400 0xffffffff, /* dst_mask */
401 TRUE), /* pcrel_offset */
404 HOWTO (AARCH64_R (PREL16), /* type */
406 1, /* size (0 = byte, 1 = short, 2 = long) */
408 TRUE, /* pc_relative */
410 complain_overflow_signed, /* complain_on_overflow */
411 bfd_elf_generic_reloc, /* special_function */
412 AARCH64_R_STR (PREL16), /* name */
413 FALSE, /* partial_inplace */
414 0xffff, /* src_mask */
415 0xffff, /* dst_mask */
416 TRUE), /* pcrel_offset */
418 /* Group relocations to create a 16, 32, 48 or 64 bit
419 unsigned data or abs address inline. */
421 /* MOVZ: ((S+A) >> 0) & 0xffff */
422 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
424 2, /* size (0 = byte, 1 = short, 2 = long) */
426 FALSE, /* pc_relative */
428 complain_overflow_unsigned, /* complain_on_overflow */
429 bfd_elf_generic_reloc, /* special_function */
430 AARCH64_R_STR (MOVW_UABS_G0), /* name */
431 FALSE, /* partial_inplace */
432 0xffff, /* src_mask */
433 0xffff, /* dst_mask */
434 FALSE), /* pcrel_offset */
436 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
437 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
439 2, /* size (0 = byte, 1 = short, 2 = long) */
441 FALSE, /* pc_relative */
443 complain_overflow_dont, /* complain_on_overflow */
444 bfd_elf_generic_reloc, /* special_function */
445 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
446 FALSE, /* partial_inplace */
447 0xffff, /* src_mask */
448 0xffff, /* dst_mask */
449 FALSE), /* pcrel_offset */
451 /* MOVZ: ((S+A) >> 16) & 0xffff */
452 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_unsigned, /* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 AARCH64_R_STR (MOVW_UABS_G1), /* name */
461 FALSE, /* partial_inplace */
462 0xffff, /* src_mask */
463 0xffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
467 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
469 2, /* size (0 = byte, 1 = short, 2 = long) */
471 FALSE, /* pc_relative */
473 complain_overflow_dont, /* complain_on_overflow */
474 bfd_elf_generic_reloc, /* special_function */
475 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
476 FALSE, /* partial_inplace */
477 0xffff, /* src_mask */
478 0xffff, /* dst_mask */
479 FALSE), /* pcrel_offset */
481 /* MOVZ: ((S+A) >> 32) & 0xffff */
482 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
486 FALSE, /* pc_relative */
488 complain_overflow_unsigned, /* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 AARCH64_R_STR (MOVW_UABS_G2), /* name */
491 FALSE, /* partial_inplace */
492 0xffff, /* src_mask */
493 0xffff, /* dst_mask */
494 FALSE), /* pcrel_offset */
496 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
497 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
499 2, /* size (0 = byte, 1 = short, 2 = long) */
501 FALSE, /* pc_relative */
503 complain_overflow_dont, /* complain_on_overflow */
504 bfd_elf_generic_reloc, /* special_function */
505 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
506 FALSE, /* partial_inplace */
507 0xffff, /* src_mask */
508 0xffff, /* dst_mask */
509 FALSE), /* pcrel_offset */
511 /* MOVZ: ((S+A) >> 48) & 0xffff */
512 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 FALSE, /* pc_relative */
518 complain_overflow_unsigned, /* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 AARCH64_R_STR (MOVW_UABS_G3), /* name */
521 FALSE, /* partial_inplace */
522 0xffff, /* src_mask */
523 0xffff, /* dst_mask */
524 FALSE), /* pcrel_offset */
526 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
527 signed data or abs address inline. Will change instruction
528 to MOVN or MOVZ depending on sign of calculated value. */
530 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
531 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
533 2, /* size (0 = byte, 1 = short, 2 = long) */
535 FALSE, /* pc_relative */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (MOVW_SABS_G0), /* name */
540 FALSE, /* partial_inplace */
541 0xffff, /* src_mask */
542 0xffff, /* dst_mask */
543 FALSE), /* pcrel_offset */
545 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
546 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
548 2, /* size (0 = byte, 1 = short, 2 = long) */
550 FALSE, /* pc_relative */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (MOVW_SABS_G1), /* name */
555 FALSE, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 FALSE), /* pcrel_offset */
560 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
561 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
563 2, /* size (0 = byte, 1 = short, 2 = long) */
565 FALSE, /* pc_relative */
567 complain_overflow_signed, /* complain_on_overflow */
568 bfd_elf_generic_reloc, /* special_function */
569 AARCH64_R_STR (MOVW_SABS_G2), /* name */
570 FALSE, /* partial_inplace */
571 0xffff, /* src_mask */
572 0xffff, /* dst_mask */
573 FALSE), /* pcrel_offset */
575 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
576 addresses: PG(x) is (x & ~0xfff). */
578 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
579 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
583 TRUE, /* pc_relative */
585 complain_overflow_signed, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (LD_PREL_LO19), /* name */
588 FALSE, /* partial_inplace */
589 0x7ffff, /* src_mask */
590 0x7ffff, /* dst_mask */
591 TRUE), /* pcrel_offset */
593 /* ADR: (S+A-P) & 0x1fffff */
594 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 TRUE, /* pc_relative */
600 complain_overflow_signed, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (ADR_PREL_LO21), /* name */
603 FALSE, /* partial_inplace */
604 0x1fffff, /* src_mask */
605 0x1fffff, /* dst_mask */
606 TRUE), /* pcrel_offset */
608 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
609 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
613 TRUE, /* pc_relative */
615 complain_overflow_signed, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
618 FALSE, /* partial_inplace */
619 0x1fffff, /* src_mask */
620 0x1fffff, /* dst_mask */
621 TRUE), /* pcrel_offset */
623 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
624 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 TRUE, /* pc_relative */
630 complain_overflow_dont, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
633 FALSE, /* partial_inplace */
634 0x1fffff, /* src_mask */
635 0x1fffff, /* dst_mask */
636 TRUE), /* pcrel_offset */
638 /* ADD: (S+A) & 0xfff [no overflow check] */
639 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
643 FALSE, /* pc_relative */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
648 FALSE, /* partial_inplace */
649 0x3ffc00, /* src_mask */
650 0x3ffc00, /* dst_mask */
651 FALSE), /* pcrel_offset */
653 /* LD/ST8: (S+A) & 0xfff */
654 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
656 2, /* size (0 = byte, 1 = short, 2 = long) */
658 FALSE, /* pc_relative */
660 complain_overflow_dont, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
663 FALSE, /* partial_inplace */
664 0xfff, /* src_mask */
665 0xfff, /* dst_mask */
666 FALSE), /* pcrel_offset */
668 /* Relocations for control-flow instructions. */
670 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
671 HOWTO (AARCH64_R (TSTBR14), /* type */
673 2, /* size (0 = byte, 1 = short, 2 = long) */
675 TRUE, /* pc_relative */
677 complain_overflow_signed, /* complain_on_overflow */
678 bfd_elf_generic_reloc, /* special_function */
679 AARCH64_R_STR (TSTBR14), /* name */
680 FALSE, /* partial_inplace */
681 0x3fff, /* src_mask */
682 0x3fff, /* dst_mask */
683 TRUE), /* pcrel_offset */
685 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
686 HOWTO (AARCH64_R (CONDBR19), /* type */
688 2, /* size (0 = byte, 1 = short, 2 = long) */
690 TRUE, /* pc_relative */
692 complain_overflow_signed, /* complain_on_overflow */
693 bfd_elf_generic_reloc, /* special_function */
694 AARCH64_R_STR (CONDBR19), /* name */
695 FALSE, /* partial_inplace */
696 0x7ffff, /* src_mask */
697 0x7ffff, /* dst_mask */
698 TRUE), /* pcrel_offset */
700 /* B: ((S+A-P) >> 2) & 0x3ffffff */
701 HOWTO (AARCH64_R (JUMP26), /* type */
703 2, /* size (0 = byte, 1 = short, 2 = long) */
705 TRUE, /* pc_relative */
707 complain_overflow_signed, /* complain_on_overflow */
708 bfd_elf_generic_reloc, /* special_function */
709 AARCH64_R_STR (JUMP26), /* name */
710 FALSE, /* partial_inplace */
711 0x3ffffff, /* src_mask */
712 0x3ffffff, /* dst_mask */
713 TRUE), /* pcrel_offset */
715 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
716 HOWTO (AARCH64_R (CALL26), /* type */
718 2, /* size (0 = byte, 1 = short, 2 = long) */
720 TRUE, /* pc_relative */
722 complain_overflow_signed, /* complain_on_overflow */
723 bfd_elf_generic_reloc, /* special_function */
724 AARCH64_R_STR (CALL26), /* name */
725 FALSE, /* partial_inplace */
726 0x3ffffff, /* src_mask */
727 0x3ffffff, /* dst_mask */
728 TRUE), /* pcrel_offset */
730 /* LD/ST16: (S+A) & 0xffe */
731 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
733 2, /* size (0 = byte, 1 = short, 2 = long) */
735 FALSE, /* pc_relative */
737 complain_overflow_dont, /* complain_on_overflow */
738 bfd_elf_generic_reloc, /* special_function */
739 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
740 FALSE, /* partial_inplace */
741 0xffe, /* src_mask */
742 0xffe, /* dst_mask */
743 FALSE), /* pcrel_offset */
745 /* LD/ST32: (S+A) & 0xffc */
746 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont, /* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
755 FALSE, /* partial_inplace */
756 0xffc, /* src_mask */
757 0xffc, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 /* LD/ST64: (S+A) & 0xff8 */
761 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
763 2, /* size (0 = byte, 1 = short, 2 = long) */
765 FALSE, /* pc_relative */
767 complain_overflow_dont, /* complain_on_overflow */
768 bfd_elf_generic_reloc, /* special_function */
769 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
770 FALSE, /* partial_inplace */
771 0xff8, /* src_mask */
772 0xff8, /* dst_mask */
773 FALSE), /* pcrel_offset */
775 /* LD/ST128: (S+A) & 0xff0 */
776 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
780 FALSE, /* pc_relative */
782 complain_overflow_dont, /* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
785 FALSE, /* partial_inplace */
786 0xff0, /* src_mask */
787 0xff0, /* dst_mask */
788 FALSE), /* pcrel_offset */
790 /* Set a load-literal immediate field to bits
791 0x1FFFFC of G(S)-P */
792 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
794 2, /* size (0 = byte,1 = short,2 = long) */
796 TRUE, /* pc_relative */
798 complain_overflow_signed, /* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 AARCH64_R_STR (GOT_LD_PREL19), /* name */
801 FALSE, /* partial_inplace */
802 0xffffe0, /* src_mask */
803 0xffffe0, /* dst_mask */
804 TRUE), /* pcrel_offset */
806 /* Get to the page for the GOT entry for the symbol
807 (G(S) - P) using an ADRP instruction. */
808 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
810 2, /* size (0 = byte, 1 = short, 2 = long) */
812 TRUE, /* pc_relative */
814 complain_overflow_dont, /* complain_on_overflow */
815 bfd_elf_generic_reloc, /* special_function */
816 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
817 FALSE, /* partial_inplace */
818 0x1fffff, /* src_mask */
819 0x1fffff, /* dst_mask */
820 TRUE), /* pcrel_offset */
822 /* LD64: GOT offset G(S) & 0xff8 */
823 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
825 2, /* size (0 = byte, 1 = short, 2 = long) */
827 FALSE, /* pc_relative */
829 complain_overflow_dont, /* complain_on_overflow */
830 bfd_elf_generic_reloc, /* special_function */
831 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
832 FALSE, /* partial_inplace */
833 0xff8, /* src_mask */
834 0xff8, /* dst_mask */
835 FALSE), /* pcrel_offset */
837 /* LD32: GOT offset G(S) & 0xffc */
838 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
842 FALSE, /* pc_relative */
844 complain_overflow_dont, /* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
847 FALSE, /* partial_inplace */
848 0xffc, /* src_mask */
849 0xffc, /* dst_mask */
850 FALSE), /* pcrel_offset */
852 /* LD64: GOT offset for the symbol. */
853 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
857 FALSE, /* pc_relative */
859 complain_overflow_unsigned, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
862 FALSE, /* partial_inplace */
863 0x7ff8, /* src_mask */
864 0x7ff8, /* dst_mask */
865 FALSE), /* pcrel_offset */
867 /* LD32: GOT offset to the page address of GOT table.
868 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
869 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
871 2, /* size (0 = byte, 1 = short, 2 = long) */
873 FALSE, /* pc_relative */
875 complain_overflow_unsigned, /* complain_on_overflow */
876 bfd_elf_generic_reloc, /* special_function */
877 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
878 FALSE, /* partial_inplace */
879 0x5ffc, /* src_mask */
880 0x5ffc, /* dst_mask */
881 FALSE), /* pcrel_offset */
883 /* LD64: GOT offset to the page address of GOT table.
884 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
885 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
887 2, /* size (0 = byte, 1 = short, 2 = long) */
889 FALSE, /* pc_relative */
891 complain_overflow_unsigned, /* complain_on_overflow */
892 bfd_elf_generic_reloc, /* special_function */
893 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
894 FALSE, /* partial_inplace */
895 0x7ff8, /* src_mask */
896 0x7ff8, /* dst_mask */
897 FALSE), /* pcrel_offset */
899 /* Get to the page for the GOT entry for the symbol
900 (G(S) - P) using an ADRP instruction. */
901 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
903 2, /* size (0 = byte, 1 = short, 2 = long) */
905 TRUE, /* pc_relative */
907 complain_overflow_dont, /* complain_on_overflow */
908 bfd_elf_generic_reloc, /* special_function */
909 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
910 FALSE, /* partial_inplace */
911 0x1fffff, /* src_mask */
912 0x1fffff, /* dst_mask */
913 TRUE), /* pcrel_offset */
915 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
917 2, /* size (0 = byte, 1 = short, 2 = long) */
919 TRUE, /* pc_relative */
921 complain_overflow_dont, /* complain_on_overflow */
922 bfd_elf_generic_reloc, /* special_function */
923 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
924 FALSE, /* partial_inplace */
925 0x1fffff, /* src_mask */
926 0x1fffff, /* dst_mask */
927 TRUE), /* pcrel_offset */
929 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
930 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
932 2, /* size (0 = byte, 1 = short, 2 = long) */
934 FALSE, /* pc_relative */
936 complain_overflow_dont, /* complain_on_overflow */
937 bfd_elf_generic_reloc, /* special_function */
938 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
939 FALSE, /* partial_inplace */
940 0xfff, /* src_mask */
941 0xfff, /* dst_mask */
942 FALSE), /* pcrel_offset */
944 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
946 2, /* size (0 = byte, 1 = short, 2 = long) */
948 FALSE, /* pc_relative */
950 complain_overflow_dont, /* complain_on_overflow */
951 bfd_elf_generic_reloc, /* special_function */
952 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
953 FALSE, /* partial_inplace */
954 0xffff, /* src_mask */
955 0xffff, /* dst_mask */
956 FALSE), /* pcrel_offset */
958 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
960 2, /* size (0 = byte, 1 = short, 2 = long) */
962 FALSE, /* pc_relative */
964 complain_overflow_dont, /* complain_on_overflow */
965 bfd_elf_generic_reloc, /* special_function */
966 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
967 FALSE, /* partial_inplace */
968 0xffff, /* src_mask */
969 0xffff, /* dst_mask */
970 FALSE), /* pcrel_offset */
972 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
974 2, /* size (0 = byte, 1 = short, 2 = long) */
976 FALSE, /* pc_relative */
978 complain_overflow_dont, /* complain_on_overflow */
979 bfd_elf_generic_reloc, /* special_function */
980 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
981 FALSE, /* partial_inplace */
982 0x1fffff, /* src_mask */
983 0x1fffff, /* dst_mask */
984 FALSE), /* pcrel_offset */
986 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
988 2, /* size (0 = byte, 1 = short, 2 = long) */
990 FALSE, /* pc_relative */
992 complain_overflow_dont, /* complain_on_overflow */
993 bfd_elf_generic_reloc, /* special_function */
994 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
995 FALSE, /* partial_inplace */
996 0xff8, /* src_mask */
997 0xff8, /* dst_mask */
998 FALSE), /* pcrel_offset */
1000 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1002 2, /* size (0 = byte, 1 = short, 2 = long) */
1004 FALSE, /* pc_relative */
1006 complain_overflow_dont, /* complain_on_overflow */
1007 bfd_elf_generic_reloc, /* special_function */
1008 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1009 FALSE, /* partial_inplace */
1010 0xffc, /* src_mask */
1011 0xffc, /* dst_mask */
1012 FALSE), /* pcrel_offset */
1014 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1016 2, /* size (0 = byte, 1 = short, 2 = long) */
1018 FALSE, /* pc_relative */
1020 complain_overflow_dont, /* complain_on_overflow */
1021 bfd_elf_generic_reloc, /* special_function */
1022 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1023 FALSE, /* partial_inplace */
1024 0x1ffffc, /* src_mask */
1025 0x1ffffc, /* dst_mask */
1026 FALSE), /* pcrel_offset */
1028 /* Get to the page for the GOT entry for the symbol
1029 (G(S) - P) using an ADRP instruction. */
1030 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1031 12, /* rightshift */
1032 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 TRUE, /* pc_relative */
1036 complain_overflow_signed, /* complain_on_overflow */
1037 bfd_elf_generic_reloc, /* special_function */
1038 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1039 FALSE, /* partial_inplace */
1040 0x1fffff, /* src_mask */
1041 0x1fffff, /* dst_mask */
1042 TRUE), /* pcrel_offset */
1044 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1046 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 TRUE, /* pc_relative */
1050 complain_overflow_signed, /* complain_on_overflow */
1051 bfd_elf_generic_reloc, /* special_function */
1052 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1053 FALSE, /* partial_inplace */
1054 0x1fffff, /* src_mask */
1055 0x1fffff, /* dst_mask */
1056 TRUE), /* pcrel_offset */
1058 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1059 32, /* rightshift */
1060 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 FALSE, /* pc_relative */
1064 complain_overflow_unsigned, /* complain_on_overflow */
1065 bfd_elf_generic_reloc, /* special_function */
1066 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1067 FALSE, /* partial_inplace */
1068 0xffff, /* src_mask */
1069 0xffff, /* dst_mask */
1070 FALSE), /* pcrel_offset */
1072 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1073 16, /* rightshift */
1074 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 FALSE, /* pc_relative */
1078 complain_overflow_dont, /* complain_on_overflow */
1079 bfd_elf_generic_reloc, /* special_function */
1080 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1081 FALSE, /* partial_inplace */
1082 0xffff, /* src_mask */
1083 0xffff, /* dst_mask */
1084 FALSE), /* pcrel_offset */
1086 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1087 16, /* rightshift */
1088 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 FALSE, /* pc_relative */
1092 complain_overflow_dont, /* complain_on_overflow */
1093 bfd_elf_generic_reloc, /* special_function */
1094 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1095 FALSE, /* partial_inplace */
1096 0xffff, /* src_mask */
1097 0xffff, /* dst_mask */
1098 FALSE), /* pcrel_offset */
1100 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1102 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 FALSE, /* pc_relative */
1106 complain_overflow_dont, /* complain_on_overflow */
1107 bfd_elf_generic_reloc, /* special_function */
1108 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1109 FALSE, /* partial_inplace */
1110 0xffff, /* src_mask */
1111 0xffff, /* dst_mask */
1112 FALSE), /* pcrel_offset */
1114 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1116 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 FALSE, /* pc_relative */
1120 complain_overflow_dont, /* complain_on_overflow */
1121 bfd_elf_generic_reloc, /* special_function */
1122 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1123 FALSE, /* partial_inplace */
1124 0xffff, /* src_mask */
1125 0xffff, /* dst_mask */
1126 FALSE), /* pcrel_offset */
1128 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1129 12, /* rightshift */
1130 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 FALSE, /* pc_relative */
1134 complain_overflow_unsigned, /* complain_on_overflow */
1135 bfd_elf_generic_reloc, /* special_function */
1136 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1137 FALSE, /* partial_inplace */
1138 0xfff, /* src_mask */
1139 0xfff, /* dst_mask */
1140 FALSE), /* pcrel_offset */
1142 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1144 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 FALSE, /* pc_relative */
1148 complain_overflow_unsigned, /* complain_on_overflow */
1149 bfd_elf_generic_reloc, /* special_function */
1150 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1151 FALSE, /* partial_inplace */
1152 0xfff, /* src_mask */
1153 0xfff, /* dst_mask */
1154 FALSE), /* pcrel_offset */
1156 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1158 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 FALSE, /* pc_relative */
1162 complain_overflow_dont, /* complain_on_overflow */
1163 bfd_elf_generic_reloc, /* special_function */
1164 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1165 FALSE, /* partial_inplace */
1166 0xfff, /* src_mask */
1167 0xfff, /* dst_mask */
1168 FALSE), /* pcrel_offset */
1170 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1172 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 TRUE, /* pc_relative */
1176 complain_overflow_dont, /* complain_on_overflow */
1177 bfd_elf_generic_reloc, /* special_function */
1178 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1179 FALSE, /* partial_inplace */
1180 0x0ffffe0, /* src_mask */
1181 0x0ffffe0, /* dst_mask */
1182 TRUE), /* pcrel_offset */
1184 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1186 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 TRUE, /* pc_relative */
1190 complain_overflow_dont, /* complain_on_overflow */
1191 bfd_elf_generic_reloc, /* special_function */
1192 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1193 FALSE, /* partial_inplace */
1194 0x1fffff, /* src_mask */
1195 0x1fffff, /* dst_mask */
1196 TRUE), /* pcrel_offset */
1198 /* Get to the page for the GOT entry for the symbol
1199 (G(S) - P) using an ADRP instruction. */
1200 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1201 12, /* rightshift */
1202 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 TRUE, /* pc_relative */
1206 complain_overflow_dont, /* complain_on_overflow */
1207 bfd_elf_generic_reloc, /* special_function */
1208 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1209 FALSE, /* partial_inplace */
1210 0x1fffff, /* src_mask */
1211 0x1fffff, /* dst_mask */
1212 TRUE), /* pcrel_offset */
1214 /* LD64: GOT offset G(S) & 0xff8. */
1215 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 FALSE, /* pc_relative */
1221 complain_overflow_dont, /* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1224 FALSE, /* partial_inplace */
1225 0xff8, /* src_mask */
1226 0xff8, /* dst_mask */
1227 FALSE), /* pcrel_offset */
1229 /* LD32: GOT offset G(S) & 0xffc. */
1230 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1232 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 FALSE, /* pc_relative */
1236 complain_overflow_dont, /* complain_on_overflow */
1237 bfd_elf_generic_reloc, /* special_function */
1238 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1239 FALSE, /* partial_inplace */
1240 0xffc, /* src_mask */
1241 0xffc, /* dst_mask */
1242 FALSE), /* pcrel_offset */
1244 /* ADD: GOT offset G(S) & 0xfff. */
1245 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 FALSE, /* pc_relative */
1251 complain_overflow_dont, /* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1254 FALSE, /* partial_inplace */
1255 0xfff, /* src_mask */
1256 0xfff, /* dst_mask */
1257 FALSE), /* pcrel_offset */
1259 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1260 16, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 FALSE, /* pc_relative */
1265 complain_overflow_dont, /* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1268 FALSE, /* partial_inplace */
1269 0xffff, /* src_mask */
1270 0xffff, /* dst_mask */
1271 FALSE), /* pcrel_offset */
1273 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1277 FALSE, /* pc_relative */
1279 complain_overflow_dont, /* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1282 FALSE, /* partial_inplace */
1283 0xffff, /* src_mask */
1284 0xffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1287 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1291 FALSE, /* pc_relative */
1293 complain_overflow_dont, /* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 AARCH64_R_STR (TLSDESC_LDR), /* name */
1296 FALSE, /* partial_inplace */
1299 FALSE), /* pcrel_offset */
1301 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1305 FALSE, /* pc_relative */
1307 complain_overflow_dont, /* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 AARCH64_R_STR (TLSDESC_ADD), /* name */
1310 FALSE, /* partial_inplace */
1313 FALSE), /* pcrel_offset */
1315 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1319 FALSE, /* pc_relative */
1321 complain_overflow_dont, /* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 AARCH64_R_STR (TLSDESC_CALL), /* name */
1324 FALSE, /* partial_inplace */
1327 FALSE), /* pcrel_offset */
1329 HOWTO (AARCH64_R (COPY), /* type */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1333 FALSE, /* pc_relative */
1335 complain_overflow_bitfield, /* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 AARCH64_R_STR (COPY), /* name */
1338 TRUE, /* partial_inplace */
1339 0xffffffff, /* src_mask */
1340 0xffffffff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_bitfield, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 AARCH64_R_STR (GLOB_DAT), /* name */
1352 TRUE, /* partial_inplace */
1353 0xffffffff, /* src_mask */
1354 0xffffffff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1357 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1359 2, /* size (0 = byte, 1 = short, 2 = long) */
1361 FALSE, /* pc_relative */
1363 complain_overflow_bitfield, /* complain_on_overflow */
1364 bfd_elf_generic_reloc, /* special_function */
1365 AARCH64_R_STR (JUMP_SLOT), /* name */
1366 TRUE, /* partial_inplace */
1367 0xffffffff, /* src_mask */
1368 0xffffffff, /* dst_mask */
1369 FALSE), /* pcrel_offset */
1371 HOWTO (AARCH64_R (RELATIVE), /* type */
1373 2, /* size (0 = byte, 1 = short, 2 = long) */
1375 FALSE, /* pc_relative */
1377 complain_overflow_bitfield, /* complain_on_overflow */
1378 bfd_elf_generic_reloc, /* special_function */
1379 AARCH64_R_STR (RELATIVE), /* name */
1380 TRUE, /* partial_inplace */
1381 ALL_ONES, /* src_mask */
1382 ALL_ONES, /* dst_mask */
1383 FALSE), /* pcrel_offset */
1385 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1387 2, /* size (0 = byte, 1 = short, 2 = long) */
1389 FALSE, /* pc_relative */
1391 complain_overflow_dont, /* complain_on_overflow */
1392 bfd_elf_generic_reloc, /* special_function */
1394 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1396 AARCH64_R_STR (TLS_DTPMOD), /* name */
1398 FALSE, /* partial_inplace */
1400 ALL_ONES, /* dst_mask */
1401 FALSE), /* pc_reloffset */
1403 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1405 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 FALSE, /* pc_relative */
1409 complain_overflow_dont, /* complain_on_overflow */
1410 bfd_elf_generic_reloc, /* special_function */
1412 AARCH64_R_STR (TLS_DTPREL64), /* name */
1414 AARCH64_R_STR (TLS_DTPREL), /* name */
1416 FALSE, /* partial_inplace */
1418 ALL_ONES, /* dst_mask */
1419 FALSE), /* pcrel_offset */
1421 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1423 2, /* size (0 = byte, 1 = short, 2 = long) */
1425 FALSE, /* pc_relative */
1427 complain_overflow_dont, /* complain_on_overflow */
1428 bfd_elf_generic_reloc, /* special_function */
1430 AARCH64_R_STR (TLS_TPREL64), /* name */
1432 AARCH64_R_STR (TLS_TPREL), /* name */
1434 FALSE, /* partial_inplace */
1436 ALL_ONES, /* dst_mask */
1437 FALSE), /* pcrel_offset */
1439 HOWTO (AARCH64_R (TLSDESC), /* type */
1441 2, /* size (0 = byte, 1 = short, 2 = long) */
1443 FALSE, /* pc_relative */
1445 complain_overflow_dont, /* complain_on_overflow */
1446 bfd_elf_generic_reloc, /* special_function */
1447 AARCH64_R_STR (TLSDESC), /* name */
1448 FALSE, /* partial_inplace */
1450 ALL_ONES, /* dst_mask */
1451 FALSE), /* pcrel_offset */
1453 HOWTO (AARCH64_R (IRELATIVE), /* type */
1455 2, /* size (0 = byte, 1 = short, 2 = long) */
1457 FALSE, /* pc_relative */
1459 complain_overflow_bitfield, /* complain_on_overflow */
1460 bfd_elf_generic_reloc, /* special_function */
1461 AARCH64_R_STR (IRELATIVE), /* name */
1462 FALSE, /* partial_inplace */
1464 ALL_ONES, /* dst_mask */
1465 FALSE), /* pcrel_offset */
1470 static reloc_howto_type elfNN_aarch64_howto_none =
1471 HOWTO (R_AARCH64_NONE, /* type */
1473 3, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE, /* pc_relative */
1477 complain_overflow_dont,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_AARCH64_NONE", /* name */
1480 FALSE, /* partial_inplace */
1483 FALSE); /* pcrel_offset */
1485 /* Given HOWTO, return the bfd internal relocation enumerator. */
1487 static bfd_reloc_code_real_type
1488 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1491 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1492 const ptrdiff_t offset
1493 = howto - elfNN_aarch64_howto_table;
1495 if (offset > 0 && offset < size - 1)
1496 return BFD_RELOC_AARCH64_RELOC_START + offset;
1498 if (howto == &elfNN_aarch64_howto_none)
1499 return BFD_RELOC_AARCH64_NONE;
1501 return BFD_RELOC_AARCH64_RELOC_START;
1504 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1506 static bfd_reloc_code_real_type
1507 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1509 static bfd_boolean initialized_p = FALSE;
1510 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1511 static unsigned int offsets[R_AARCH64_end];
1513 if (initialized_p == FALSE)
1517 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1518 if (elfNN_aarch64_howto_table[i].type != 0)
1519 offsets[elfNN_aarch64_howto_table[i].type] = i;
1521 initialized_p = TRUE;
1524 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1525 return BFD_RELOC_AARCH64_NONE;
1527 /* PR 17512: file: b371e70a. */
1528 if (r_type >= R_AARCH64_end)
1530 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1531 bfd_set_error (bfd_error_bad_value);
1532 return BFD_RELOC_AARCH64_NONE;
1535 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1538 struct elf_aarch64_reloc_map
1540 bfd_reloc_code_real_type from;
1541 bfd_reloc_code_real_type to;
1544 /* Map bfd generic reloc to AArch64-specific reloc. */
1545 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1547 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1549 /* Basic data relocations. */
1550 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1551 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1552 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1553 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1554 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1555 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1556 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1559 /* Given the bfd internal relocation enumerator in CODE, return the
1560 corresponding howto entry. */
1562 static reloc_howto_type *
1563 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1567 /* Convert bfd generic reloc to AArch64-specific reloc. */
1568 if (code < BFD_RELOC_AARCH64_RELOC_START
1569 || code > BFD_RELOC_AARCH64_RELOC_END)
1570 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1571 if (elf_aarch64_reloc_map[i].from == code)
1573 code = elf_aarch64_reloc_map[i].to;
1577 if (code > BFD_RELOC_AARCH64_RELOC_START
1578 && code < BFD_RELOC_AARCH64_RELOC_END)
1579 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1580 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1582 if (code == BFD_RELOC_AARCH64_NONE)
1583 return &elfNN_aarch64_howto_none;
1588 static reloc_howto_type *
1589 elfNN_aarch64_howto_from_type (unsigned int r_type)
1591 bfd_reloc_code_real_type val;
1592 reloc_howto_type *howto;
1597 bfd_set_error (bfd_error_bad_value);
1602 if (r_type == R_AARCH64_NONE)
1603 return &elfNN_aarch64_howto_none;
1605 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1606 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1611 bfd_set_error (bfd_error_bad_value);
1616 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1617 Elf_Internal_Rela *elf_reloc)
1619 unsigned int r_type;
1621 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1622 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1625 static reloc_howto_type *
1626 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1627 bfd_reloc_code_real_type code)
1629 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1634 bfd_set_error (bfd_error_bad_value);
1638 static reloc_howto_type *
1639 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1644 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1645 if (elfNN_aarch64_howto_table[i].name != NULL
1646 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1647 return &elfNN_aarch64_howto_table[i];
1652 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1653 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1654 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1655 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1657 /* The linker script knows the section names for placement.
1658 The entry_names are used to do simple name mangling on the stubs.
1659 Given a function name, and its type, the stub can be found. The
1660 name can be changed. The only requirement is the %s be present. */
1661 #define STUB_ENTRY_NAME "__%s_veneer"
1663 /* The name of the dynamic interpreter. This is put in the .interp
1665 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1667 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1668 (((1 << 25) - 1) << 2)
1669 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1672 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1673 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1676 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1678 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1679 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1683 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1685 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1686 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1687 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1690 static const uint32_t aarch64_adrp_branch_stub [] =
1692 0x90000010, /* adrp ip0, X */
1693 /* R_AARCH64_ADR_HI21_PCREL(X) */
1694 0x91000210, /* add ip0, ip0, :lo12:X */
1695 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1696 0xd61f0200, /* br ip0 */
1699 static const uint32_t aarch64_long_branch_stub[] =
1702 0x58000090, /* ldr ip0, 1f */
1704 0x18000090, /* ldr wip0, 1f */
1706 0x10000011, /* adr ip1, #0 */
1707 0x8b110210, /* add ip0, ip0, ip1 */
1708 0xd61f0200, /* br ip0 */
1709 0x00000000, /* 1: .xword or .word
1710 R_AARCH64_PRELNN(X) + 12
1715 static const uint32_t aarch64_erratum_835769_stub[] =
1717 0x00000000, /* Placeholder for multiply accumulate. */
1718 0x14000000, /* b <label> */
1721 static const uint32_t aarch64_erratum_843419_stub[] =
1723 0x00000000, /* Placeholder for LDR instruction. */
1724 0x14000000, /* b <label> */
1727 /* Section name for stubs is the associated section name plus this
1729 #define STUB_SUFFIX ".stub"
1731 enum elf_aarch64_stub_type
1734 aarch64_stub_adrp_branch,
1735 aarch64_stub_long_branch,
1736 aarch64_stub_erratum_835769_veneer,
1737 aarch64_stub_erratum_843419_veneer,
1740 struct elf_aarch64_stub_hash_entry
1742 /* Base hash table entry structure. */
1743 struct bfd_hash_entry root;
1745 /* The stub section. */
1748 /* Offset within stub_sec of the beginning of this stub. */
1749 bfd_vma stub_offset;
1751 /* Given the symbol's value and its section we can determine its final
1752 value when building the stubs (so the stub knows where to jump). */
1753 bfd_vma target_value;
1754 asection *target_section;
1756 enum elf_aarch64_stub_type stub_type;
1758 /* The symbol table entry, if any, that this was derived from. */
1759 struct elf_aarch64_link_hash_entry *h;
1761 /* Destination symbol type */
1762 unsigned char st_type;
1764 /* Where this stub is being called from, or, in the case of combined
1765 stub sections, the first input section in the group. */
1768 /* The name for the local symbol at the start of this stub. The
1769 stub name in the hash table has to be unique; this does not, so
1770 it can be friendlier. */
1773 /* The instruction which caused this stub to be generated (only valid for
1774 erratum 835769 workaround stubs at present). */
1775 uint32_t veneered_insn;
1777 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1778 bfd_vma adrp_offset;
1781 /* Used to build a map of a section. This is required for mixed-endian
1784 typedef struct elf_elf_section_map
1789 elf_aarch64_section_map;
1792 typedef struct _aarch64_elf_section_data
1794 struct bfd_elf_section_data elf;
1795 unsigned int mapcount;
1796 unsigned int mapsize;
1797 elf_aarch64_section_map *map;
1799 _aarch64_elf_section_data;
1801 #define elf_aarch64_section_data(sec) \
1802 ((_aarch64_elf_section_data *) elf_section_data (sec))
1804 /* The size of the thread control block which is defined to be two pointers. */
1805 #define TCB_SIZE (ARCH_SIZE/8)*2
1807 struct elf_aarch64_local_symbol
1809 unsigned int got_type;
1810 bfd_signed_vma got_refcount;
1813 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1814 offset is from the end of the jump table and reserved entries
1817 The magic value (bfd_vma) -1 indicates that an offset has not be
1819 bfd_vma tlsdesc_got_jump_table_offset;
1822 struct elf_aarch64_obj_tdata
1824 struct elf_obj_tdata root;
1826 /* local symbol descriptors */
1827 struct elf_aarch64_local_symbol *locals;
1829 /* Zero to warn when linking objects with incompatible enum sizes. */
1830 int no_enum_size_warning;
1832 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1833 int no_wchar_size_warning;
1836 #define elf_aarch64_tdata(bfd) \
1837 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1839 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1841 #define is_aarch64_elf(bfd) \
1842 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1843 && elf_tdata (bfd) != NULL \
1844 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1847 elfNN_aarch64_mkobject (bfd *abfd)
1849 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1853 #define elf_aarch64_hash_entry(ent) \
1854 ((struct elf_aarch64_link_hash_entry *)(ent))
1856 #define GOT_UNKNOWN 0
1857 #define GOT_NORMAL 1
1858 #define GOT_TLS_GD 2
1859 #define GOT_TLS_IE 4
1860 #define GOT_TLSDESC_GD 8
1862 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1864 /* AArch64 ELF linker hash entry. */
1865 struct elf_aarch64_link_hash_entry
1867 struct elf_link_hash_entry root;
1869 /* Track dynamic relocs copied for this symbol. */
1870 struct elf_dyn_relocs *dyn_relocs;
1872 /* Since PLT entries have variable size, we need to record the
1873 index into .got.plt instead of recomputing it from the PLT
1875 bfd_signed_vma plt_got_offset;
1877 /* Bit mask representing the type of GOT entry(s) if any required by
1879 unsigned int got_type;
1881 /* A pointer to the most recently used stub hash entry against this
1883 struct elf_aarch64_stub_hash_entry *stub_cache;
1885 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1886 is from the end of the jump table and reserved entries within the PLTGOT.
1888 The magic value (bfd_vma) -1 indicates that an offset has not
1890 bfd_vma tlsdesc_got_jump_table_offset;
1894 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1896 unsigned long r_symndx)
1899 return elf_aarch64_hash_entry (h)->got_type;
1901 if (! elf_aarch64_locals (abfd))
1904 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1907 /* Get the AArch64 elf linker hash table from a link_info structure. */
1908 #define elf_aarch64_hash_table(info) \
1909 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1911 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1912 ((struct elf_aarch64_stub_hash_entry *) \
1913 bfd_hash_lookup ((table), (string), (create), (copy)))
1915 /* AArch64 ELF linker hash table. */
1916 struct elf_aarch64_link_hash_table
1918 /* The main hash table. */
1919 struct elf_link_hash_table root;
1921 /* Nonzero to force PIC branch veneers. */
1924 /* Fix erratum 835769. */
1925 int fix_erratum_835769;
1927 /* Fix erratum 843419. */
1928 int fix_erratum_843419;
1930 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1931 int fix_erratum_843419_adr;
1933 /* The number of bytes in the initial entry in the PLT. */
1934 bfd_size_type plt_header_size;
1936 /* The number of bytes in the subsequent PLT etries. */
1937 bfd_size_type plt_entry_size;
1939 /* Short-cuts to get to dynamic linker sections. */
1943 /* Small local sym cache. */
1944 struct sym_cache sym_cache;
1946 /* For convenience in allocate_dynrelocs. */
1949 /* The amount of space used by the reserved portion of the sgotplt
1950 section, plus whatever space is used by the jump slots. */
1951 bfd_vma sgotplt_jump_table_size;
1953 /* The stub hash table. */
1954 struct bfd_hash_table stub_hash_table;
1956 /* Linker stub bfd. */
1959 /* Linker call-backs. */
1960 asection *(*add_stub_section) (const char *, asection *);
1961 void (*layout_sections_again) (void);
1963 /* Array to keep track of which stub sections have been created, and
1964 information on stub grouping. */
1967 /* This is the section to which stubs in the group will be
1970 /* The stub section. */
1974 /* Assorted information used by elfNN_aarch64_size_stubs. */
1975 unsigned int bfd_count;
1977 asection **input_list;
1979 /* The offset into splt of the PLT entry for the TLS descriptor
1980 resolver. Special values are 0, if not necessary (or not found
1981 to be necessary yet), and -1 if needed but not determined
1983 bfd_vma tlsdesc_plt;
1985 /* The GOT offset for the lazy trampoline. Communicated to the
1986 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1987 indicates an offset is not allocated. */
1988 bfd_vma dt_tlsdesc_got;
1990 /* Used by local STT_GNU_IFUNC symbols. */
1991 htab_t loc_hash_table;
1992 void * loc_hash_memory;
1995 /* Create an entry in an AArch64 ELF linker hash table. */
1997 static struct bfd_hash_entry *
1998 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1999 struct bfd_hash_table *table,
2002 struct elf_aarch64_link_hash_entry *ret =
2003 (struct elf_aarch64_link_hash_entry *) entry;
2005 /* Allocate the structure if it has not already been allocated by a
2008 ret = bfd_hash_allocate (table,
2009 sizeof (struct elf_aarch64_link_hash_entry));
2011 return (struct bfd_hash_entry *) ret;
2013 /* Call the allocation method of the superclass. */
2014 ret = ((struct elf_aarch64_link_hash_entry *)
2015 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2019 ret->dyn_relocs = NULL;
2020 ret->got_type = GOT_UNKNOWN;
2021 ret->plt_got_offset = (bfd_vma) - 1;
2022 ret->stub_cache = NULL;
2023 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2026 return (struct bfd_hash_entry *) ret;
2029 /* Initialize an entry in the stub hash table. */
2031 static struct bfd_hash_entry *
2032 stub_hash_newfunc (struct bfd_hash_entry *entry,
2033 struct bfd_hash_table *table, const char *string)
2035 /* Allocate the structure if it has not already been allocated by a
2039 entry = bfd_hash_allocate (table,
2041 elf_aarch64_stub_hash_entry));
2046 /* Call the allocation method of the superclass. */
2047 entry = bfd_hash_newfunc (entry, table, string);
2050 struct elf_aarch64_stub_hash_entry *eh;
2052 /* Initialize the local fields. */
2053 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2054 eh->adrp_offset = 0;
2055 eh->stub_sec = NULL;
2056 eh->stub_offset = 0;
2057 eh->target_value = 0;
2058 eh->target_section = NULL;
2059 eh->stub_type = aarch64_stub_none;
2067 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2068 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2069 as global symbol. We reuse indx and dynstr_index for local symbol
2070 hash since they aren't used by global symbols in this backend. */
2073 elfNN_aarch64_local_htab_hash (const void *ptr)
2075 struct elf_link_hash_entry *h
2076 = (struct elf_link_hash_entry *) ptr;
2077 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2080 /* Compare local hash entries. */
2083 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2085 struct elf_link_hash_entry *h1
2086 = (struct elf_link_hash_entry *) ptr1;
2087 struct elf_link_hash_entry *h2
2088 = (struct elf_link_hash_entry *) ptr2;
2090 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2093 /* Find and/or create a hash entry for local symbol. */
2095 static struct elf_link_hash_entry *
2096 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2097 bfd *abfd, const Elf_Internal_Rela *rel,
2100 struct elf_aarch64_link_hash_entry e, *ret;
2101 asection *sec = abfd->sections;
2102 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2103 ELFNN_R_SYM (rel->r_info));
2106 e.root.indx = sec->id;
2107 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2108 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2109 create ? INSERT : NO_INSERT);
2116 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2120 ret = (struct elf_aarch64_link_hash_entry *)
2121 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2122 sizeof (struct elf_aarch64_link_hash_entry));
2125 memset (ret, 0, sizeof (*ret));
2126 ret->root.indx = sec->id;
2127 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2128 ret->root.dynindx = -1;
2134 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2137 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2138 struct elf_link_hash_entry *dir,
2139 struct elf_link_hash_entry *ind)
2141 struct elf_aarch64_link_hash_entry *edir, *eind;
2143 edir = (struct elf_aarch64_link_hash_entry *) dir;
2144 eind = (struct elf_aarch64_link_hash_entry *) ind;
2146 if (eind->dyn_relocs != NULL)
2148 if (edir->dyn_relocs != NULL)
2150 struct elf_dyn_relocs **pp;
2151 struct elf_dyn_relocs *p;
2153 /* Add reloc counts against the indirect sym to the direct sym
2154 list. Merge any entries against the same section. */
2155 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2157 struct elf_dyn_relocs *q;
2159 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2160 if (q->sec == p->sec)
2162 q->pc_count += p->pc_count;
2163 q->count += p->count;
2170 *pp = edir->dyn_relocs;
2173 edir->dyn_relocs = eind->dyn_relocs;
2174 eind->dyn_relocs = NULL;
2177 if (ind->root.type == bfd_link_hash_indirect)
2179 /* Copy over PLT info. */
2180 if (dir->got.refcount <= 0)
2182 edir->got_type = eind->got_type;
2183 eind->got_type = GOT_UNKNOWN;
2187 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2190 /* Destroy an AArch64 elf linker hash table. */
2193 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2195 struct elf_aarch64_link_hash_table *ret
2196 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2198 if (ret->loc_hash_table)
2199 htab_delete (ret->loc_hash_table);
2200 if (ret->loc_hash_memory)
2201 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2203 bfd_hash_table_free (&ret->stub_hash_table);
2204 _bfd_elf_link_hash_table_free (obfd);
2207 /* Create an AArch64 elf linker hash table. */
2209 static struct bfd_link_hash_table *
2210 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2212 struct elf_aarch64_link_hash_table *ret;
2213 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2215 ret = bfd_zmalloc (amt);
2219 if (!_bfd_elf_link_hash_table_init
2220 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2221 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2227 ret->plt_header_size = PLT_ENTRY_SIZE;
2228 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2230 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2232 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2233 sizeof (struct elf_aarch64_stub_hash_entry)))
2235 _bfd_elf_link_hash_table_free (abfd);
2239 ret->loc_hash_table = htab_try_create (1024,
2240 elfNN_aarch64_local_htab_hash,
2241 elfNN_aarch64_local_htab_eq,
2243 ret->loc_hash_memory = objalloc_create ();
2244 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2246 elfNN_aarch64_link_hash_table_free (abfd);
2249 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2251 return &ret->root.root;
2255 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2256 bfd_vma offset, bfd_vma value)
2258 reloc_howto_type *howto;
2261 howto = elfNN_aarch64_howto_from_type (r_type);
2262 place = (input_section->output_section->vma + input_section->output_offset
2265 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2266 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2267 return _bfd_aarch64_elf_put_addend (input_bfd,
2268 input_section->contents + offset, r_type,
2272 static enum elf_aarch64_stub_type
2273 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2275 if (aarch64_valid_for_adrp_p (value, place))
2276 return aarch64_stub_adrp_branch;
2277 return aarch64_stub_long_branch;
2280 /* Determine the type of stub needed, if any, for a call. */
2282 static enum elf_aarch64_stub_type
2283 aarch64_type_of_stub (struct bfd_link_info *info,
2284 asection *input_sec,
2285 const Elf_Internal_Rela *rel,
2286 unsigned char st_type,
2287 struct elf_aarch64_link_hash_entry *hash,
2288 bfd_vma destination)
2291 bfd_signed_vma branch_offset;
2292 unsigned int r_type;
2293 struct elf_aarch64_link_hash_table *globals;
2294 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2295 bfd_boolean via_plt_p;
2297 if (st_type != STT_FUNC)
2300 globals = elf_aarch64_hash_table (info);
2301 via_plt_p = (globals->root.splt != NULL && hash != NULL
2302 && hash->root.plt.offset != (bfd_vma) - 1);
2307 /* Determine where the call point is. */
2308 location = (input_sec->output_offset
2309 + input_sec->output_section->vma + rel->r_offset);
2311 branch_offset = (bfd_signed_vma) (destination - location);
2313 r_type = ELFNN_R_TYPE (rel->r_info);
2315 /* We don't want to redirect any old unconditional jump in this way,
2316 only one which is being used for a sibcall, where it is
2317 acceptable for the IP0 and IP1 registers to be clobbered. */
2318 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2319 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2320 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2322 stub_type = aarch64_stub_long_branch;
2328 /* Build a name for an entry in the stub hash table. */
2331 elfNN_aarch64_stub_name (const asection *input_section,
2332 const asection *sym_sec,
2333 const struct elf_aarch64_link_hash_entry *hash,
2334 const Elf_Internal_Rela *rel)
2341 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2342 stub_name = bfd_malloc (len);
2343 if (stub_name != NULL)
2344 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2345 (unsigned int) input_section->id,
2346 hash->root.root.root.string,
2351 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2352 stub_name = bfd_malloc (len);
2353 if (stub_name != NULL)
2354 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2355 (unsigned int) input_section->id,
2356 (unsigned int) sym_sec->id,
2357 (unsigned int) ELFNN_R_SYM (rel->r_info),
2364 /* Look up an entry in the stub hash. Stub entries are cached because
2365 creating the stub name takes a bit of time. */
2367 static struct elf_aarch64_stub_hash_entry *
2368 elfNN_aarch64_get_stub_entry (const asection *input_section,
2369 const asection *sym_sec,
2370 struct elf_link_hash_entry *hash,
2371 const Elf_Internal_Rela *rel,
2372 struct elf_aarch64_link_hash_table *htab)
2374 struct elf_aarch64_stub_hash_entry *stub_entry;
2375 struct elf_aarch64_link_hash_entry *h =
2376 (struct elf_aarch64_link_hash_entry *) hash;
2377 const asection *id_sec;
2379 if ((input_section->flags & SEC_CODE) == 0)
2382 /* If this input section is part of a group of sections sharing one
2383 stub section, then use the id of the first section in the group.
2384 Stub names need to include a section id, as there may well be
2385 more than one stub used to reach say, printf, and we need to
2386 distinguish between them. */
2387 id_sec = htab->stub_group[input_section->id].link_sec;
2389 if (h != NULL && h->stub_cache != NULL
2390 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2392 stub_entry = h->stub_cache;
2398 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2399 if (stub_name == NULL)
2402 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2403 stub_name, FALSE, FALSE);
2405 h->stub_cache = stub_entry;
2414 /* Create a stub section. */
2417 _bfd_aarch64_create_stub_section (asection *section,
2418 struct elf_aarch64_link_hash_table *htab)
2424 namelen = strlen (section->name);
2425 len = namelen + sizeof (STUB_SUFFIX);
2426 s_name = bfd_alloc (htab->stub_bfd, len);
2430 memcpy (s_name, section->name, namelen);
2431 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2432 return (*htab->add_stub_section) (s_name, section);
2436 /* Find or create a stub section for a link section.
2438 Fix or create the stub section used to collect stubs attached to
2439 the specified link section. */
2442 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2443 struct elf_aarch64_link_hash_table *htab)
2445 if (htab->stub_group[link_section->id].stub_sec == NULL)
2446 htab->stub_group[link_section->id].stub_sec
2447 = _bfd_aarch64_create_stub_section (link_section, htab);
2448 return htab->stub_group[link_section->id].stub_sec;
2452 /* Find or create a stub section in the stub group for an input
2456 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2457 struct elf_aarch64_link_hash_table *htab)
2459 asection *link_sec = htab->stub_group[section->id].link_sec;
2460 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2464 /* Add a new stub entry in the stub group associated with an input
2465 section to the stub hash. Not all fields of the new stub entry are
2468 static struct elf_aarch64_stub_hash_entry *
2469 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2471 struct elf_aarch64_link_hash_table *htab)
2475 struct elf_aarch64_stub_hash_entry *stub_entry;
2477 link_sec = htab->stub_group[section->id].link_sec;
2478 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2480 /* Enter this entry into the linker stub hash table. */
2481 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2483 if (stub_entry == NULL)
2485 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2486 section->owner, stub_name);
2490 stub_entry->stub_sec = stub_sec;
2491 stub_entry->stub_offset = 0;
2492 stub_entry->id_sec = link_sec;
2497 /* Add a new stub entry in the final stub section to the stub hash.
2498 Not all fields of the new stub entry are initialised. */
2500 static struct elf_aarch64_stub_hash_entry *
2501 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2502 asection *link_section,
2503 struct elf_aarch64_link_hash_table *htab)
2506 struct elf_aarch64_stub_hash_entry *stub_entry;
2508 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2509 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2511 if (stub_entry == NULL)
2513 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2517 stub_entry->stub_sec = stub_sec;
2518 stub_entry->stub_offset = 0;
2519 stub_entry->id_sec = link_section;
2526 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2527 void *in_arg ATTRIBUTE_UNUSED)
2529 struct elf_aarch64_stub_hash_entry *stub_entry;
2534 bfd_vma veneered_insn_loc;
2535 bfd_vma veneer_entry_loc;
2536 bfd_signed_vma branch_offset = 0;
2537 unsigned int template_size;
2538 const uint32_t *template;
2541 /* Massage our args to the form they really have. */
2542 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2544 stub_sec = stub_entry->stub_sec;
2546 /* Make a note of the offset within the stubs for this entry. */
2547 stub_entry->stub_offset = stub_sec->size;
2548 loc = stub_sec->contents + stub_entry->stub_offset;
2550 stub_bfd = stub_sec->owner;
2552 /* This is the address of the stub destination. */
2553 sym_value = (stub_entry->target_value
2554 + stub_entry->target_section->output_offset
2555 + stub_entry->target_section->output_section->vma);
2557 if (stub_entry->stub_type == aarch64_stub_long_branch)
2559 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2560 + stub_sec->output_offset);
2562 /* See if we can relax the stub. */
2563 if (aarch64_valid_for_adrp_p (sym_value, place))
2564 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2567 switch (stub_entry->stub_type)
2569 case aarch64_stub_adrp_branch:
2570 template = aarch64_adrp_branch_stub;
2571 template_size = sizeof (aarch64_adrp_branch_stub);
2573 case aarch64_stub_long_branch:
2574 template = aarch64_long_branch_stub;
2575 template_size = sizeof (aarch64_long_branch_stub);
2577 case aarch64_stub_erratum_835769_veneer:
2578 template = aarch64_erratum_835769_stub;
2579 template_size = sizeof (aarch64_erratum_835769_stub);
2581 case aarch64_stub_erratum_843419_veneer:
2582 template = aarch64_erratum_843419_stub;
2583 template_size = sizeof (aarch64_erratum_843419_stub);
2589 for (i = 0; i < (template_size / sizeof template[0]); i++)
2591 bfd_putl32 (template[i], loc);
2595 template_size = (template_size + 7) & ~7;
2596 stub_sec->size += template_size;
2598 switch (stub_entry->stub_type)
2600 case aarch64_stub_adrp_branch:
2601 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2602 stub_entry->stub_offset, sym_value))
2603 /* The stub would not have been relaxed if the offset was out
2607 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2608 stub_entry->stub_offset + 4, sym_value))
2612 case aarch64_stub_long_branch:
2613 /* We want the value relative to the address 12 bytes back from the
2615 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2616 stub_entry->stub_offset + 16, sym_value + 12))
2620 case aarch64_stub_erratum_835769_veneer:
2621 veneered_insn_loc = stub_entry->target_section->output_section->vma
2622 + stub_entry->target_section->output_offset
2623 + stub_entry->target_value;
2624 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2625 + stub_entry->stub_sec->output_offset
2626 + stub_entry->stub_offset;
2627 branch_offset = veneered_insn_loc - veneer_entry_loc;
2628 branch_offset >>= 2;
2629 branch_offset &= 0x3ffffff;
2630 bfd_putl32 (stub_entry->veneered_insn,
2631 stub_sec->contents + stub_entry->stub_offset);
2632 bfd_putl32 (template[1] | branch_offset,
2633 stub_sec->contents + stub_entry->stub_offset + 4);
2636 case aarch64_stub_erratum_843419_veneer:
2637 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2638 stub_entry->stub_offset + 4, sym_value + 4))
2649 /* As above, but don't actually build the stub. Just bump offset so
2650 we know stub section sizes. */
2653 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2654 void *in_arg ATTRIBUTE_UNUSED)
2656 struct elf_aarch64_stub_hash_entry *stub_entry;
2659 /* Massage our args to the form they really have. */
2660 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2662 switch (stub_entry->stub_type)
2664 case aarch64_stub_adrp_branch:
2665 size = sizeof (aarch64_adrp_branch_stub);
2667 case aarch64_stub_long_branch:
2668 size = sizeof (aarch64_long_branch_stub);
2670 case aarch64_stub_erratum_835769_veneer:
2671 size = sizeof (aarch64_erratum_835769_stub);
2673 case aarch64_stub_erratum_843419_veneer:
2674 size = sizeof (aarch64_erratum_843419_stub);
2680 size = (size + 7) & ~7;
2681 stub_entry->stub_sec->size += size;
2685 /* External entry points for sizing and building linker stubs. */
2687 /* Set up various things so that we can make a list of input sections
2688 for each output section included in the link. Returns -1 on error,
2689 0 when no stubs will be needed, and 1 on success. */
2692 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2693 struct bfd_link_info *info)
2696 unsigned int bfd_count;
2697 int top_id, top_index;
2699 asection **input_list, **list;
2701 struct elf_aarch64_link_hash_table *htab =
2702 elf_aarch64_hash_table (info);
2704 if (!is_elf_hash_table (htab))
2707 /* Count the number of input BFDs and find the top input section id. */
2708 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2709 input_bfd != NULL; input_bfd = input_bfd->link.next)
2712 for (section = input_bfd->sections;
2713 section != NULL; section = section->next)
2715 if (top_id < section->id)
2716 top_id = section->id;
2719 htab->bfd_count = bfd_count;
2721 amt = sizeof (struct map_stub) * (top_id + 1);
2722 htab->stub_group = bfd_zmalloc (amt);
2723 if (htab->stub_group == NULL)
2726 /* We can't use output_bfd->section_count here to find the top output
2727 section index as some sections may have been removed, and
2728 _bfd_strip_section_from_output doesn't renumber the indices. */
2729 for (section = output_bfd->sections, top_index = 0;
2730 section != NULL; section = section->next)
2732 if (top_index < section->index)
2733 top_index = section->index;
2736 htab->top_index = top_index;
2737 amt = sizeof (asection *) * (top_index + 1);
2738 input_list = bfd_malloc (amt);
2739 htab->input_list = input_list;
2740 if (input_list == NULL)
2743 /* For sections we aren't interested in, mark their entries with a
2744 value we can check later. */
2745 list = input_list + top_index;
2747 *list = bfd_abs_section_ptr;
2748 while (list-- != input_list);
2750 for (section = output_bfd->sections;
2751 section != NULL; section = section->next)
2753 if ((section->flags & SEC_CODE) != 0)
2754 input_list[section->index] = NULL;
2760 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2761 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2763 /* The linker repeatedly calls this function for each input section,
2764 in the order that input sections are linked into output sections.
2765 Build lists of input sections to determine groupings between which
2766 we may insert linker stubs. */
2769 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2771 struct elf_aarch64_link_hash_table *htab =
2772 elf_aarch64_hash_table (info);
2774 if (isec->output_section->index <= htab->top_index)
2776 asection **list = htab->input_list + isec->output_section->index;
2778 if (*list != bfd_abs_section_ptr)
2780 /* Steal the link_sec pointer for our list. */
2781 /* This happens to make the list in reverse order,
2782 which is what we want. */
2783 PREV_SEC (isec) = *list;
2789 /* See whether we can group stub sections together. Grouping stub
2790 sections may result in fewer stubs. More importantly, we need to
2791 put all .init* and .fini* stubs at the beginning of the .init or
2792 .fini output sections respectively, because glibc splits the
2793 _init and _fini functions into multiple parts. Putting a stub in
2794 the middle of a function is not a good idea. */
2797 group_sections (struct elf_aarch64_link_hash_table *htab,
2798 bfd_size_type stub_group_size,
2799 bfd_boolean stubs_always_before_branch)
2801 asection **list = htab->input_list + htab->top_index;
2805 asection *tail = *list;
2807 if (tail == bfd_abs_section_ptr)
2810 while (tail != NULL)
2814 bfd_size_type total;
2818 while ((prev = PREV_SEC (curr)) != NULL
2819 && ((total += curr->output_offset - prev->output_offset)
2823 /* OK, the size from the start of CURR to the end is less
2824 than stub_group_size and thus can be handled by one stub
2825 section. (Or the tail section is itself larger than
2826 stub_group_size, in which case we may be toast.)
2827 We should really be keeping track of the total size of
2828 stubs added here, as stubs contribute to the final output
2832 prev = PREV_SEC (tail);
2833 /* Set up this stub group. */
2834 htab->stub_group[tail->id].link_sec = curr;
2836 while (tail != curr && (tail = prev) != NULL);
2838 /* But wait, there's more! Input sections up to stub_group_size
2839 bytes before the stub section can be handled by it too. */
2840 if (!stubs_always_before_branch)
2844 && ((total += tail->output_offset - prev->output_offset)
2848 prev = PREV_SEC (tail);
2849 htab->stub_group[tail->id].link_sec = curr;
2855 while (list-- != htab->input_list);
2857 free (htab->input_list);
2862 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2864 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2865 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2866 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2867 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2868 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2869 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2871 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2872 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2873 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2874 #define AARCH64_ZR 0x1f
2876 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2877 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2879 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2880 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2881 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2882 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2883 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2884 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2885 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2886 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2887 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2888 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2889 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2890 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2891 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2892 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2893 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2894 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2895 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2896 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2898 /* Classify an INSN if it is indeed a load/store.
2900 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2902 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2905 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2910 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2911 bfd_boolean *pair, bfd_boolean *load)
2919 /* Bail out quickly if INSN doesn't fall into the the load-store
2921 if (!AARCH64_LDST (insn))
2926 if (AARCH64_LDST_EX (insn))
2928 *rt = AARCH64_RT (insn);
2930 if (AARCH64_BIT (insn, 21) == 1)
2933 *rt2 = AARCH64_RT2 (insn);
2935 *load = AARCH64_LD (insn);
2938 else if (AARCH64_LDST_NAP (insn)
2939 || AARCH64_LDSTP_PI (insn)
2940 || AARCH64_LDSTP_O (insn)
2941 || AARCH64_LDSTP_PRE (insn))
2944 *rt = AARCH64_RT (insn);
2945 *rt2 = AARCH64_RT2 (insn);
2946 *load = AARCH64_LD (insn);
2949 else if (AARCH64_LDST_PCREL (insn)
2950 || AARCH64_LDST_UI (insn)
2951 || AARCH64_LDST_PIIMM (insn)
2952 || AARCH64_LDST_U (insn)
2953 || AARCH64_LDST_PREIMM (insn)
2954 || AARCH64_LDST_RO (insn)
2955 || AARCH64_LDST_UIMM (insn))
2957 *rt = AARCH64_RT (insn);
2959 if (AARCH64_LDST_PCREL (insn))
2961 opc = AARCH64_BITS (insn, 22, 2);
2962 v = AARCH64_BIT (insn, 26);
2963 opc_v = opc | (v << 2);
2964 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2965 || opc_v == 5 || opc_v == 7);
2968 else if (AARCH64_LDST_SIMD_M (insn)
2969 || AARCH64_LDST_SIMD_M_PI (insn))
2971 *rt = AARCH64_RT (insn);
2972 *load = AARCH64_BIT (insn, 22);
2973 opcode = (insn >> 12) & 0xf;
3000 else if (AARCH64_LDST_SIMD_S (insn)
3001 || AARCH64_LDST_SIMD_S_PI (insn))
3003 *rt = AARCH64_RT (insn);
3004 r = (insn >> 21) & 1;
3005 *load = AARCH64_BIT (insn, 22);
3006 opcode = (insn >> 13) & 0x7;
3018 *rt2 = *rt + (r == 0 ? 2 : 3);
3026 *rt2 = *rt + (r == 0 ? 2 : 3);
3038 /* Return TRUE if INSN is multiply-accumulate. */
3041 aarch64_mlxl_p (uint32_t insn)
3043 uint32_t op31 = AARCH64_OP31 (insn);
3045 if (AARCH64_MAC (insn)
3046 && (op31 == 0 || op31 == 1 || op31 == 5)
3047 /* Exclude MUL instructions which are encoded as a multiple accumulate
3049 && AARCH64_RA (insn) != AARCH64_ZR)
3055 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3056 it is possible for a 64-bit multiply-accumulate instruction to generate an
3057 incorrect result. The details are quite complex and hard to
3058 determine statically, since branches in the code may exist in some
3059 circumstances, but all cases end with a memory (load, store, or
3060 prefetch) instruction followed immediately by the multiply-accumulate
3061 operation. We employ a linker patching technique, by moving the potentially
3062 affected multiply-accumulate instruction into a patch region and replacing
3063 the original instruction with a branch to the patch. This function checks
3064 if INSN_1 is the memory operation followed by a multiply-accumulate
3065 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3066 if INSN_1 and INSN_2 are safe. */
3069 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3079 if (aarch64_mlxl_p (insn_2)
3080 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3082 /* Any SIMD memory op is independent of the subsequent MLA
3083 by definition of the erratum. */
3084 if (AARCH64_BIT (insn_1, 26))
3087 /* If not SIMD, check for integer memory ops and MLA relationship. */
3088 rn = AARCH64_RN (insn_2);
3089 ra = AARCH64_RA (insn_2);
3090 rm = AARCH64_RM (insn_2);
3092 /* If this is a load and there's a true(RAW) dependency, we are safe
3093 and this is not an erratum sequence. */
3095 (rt == rn || rt == rm || rt == ra
3096 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3099 /* We conservatively put out stubs for all other cases (including
3107 /* Used to order a list of mapping symbols by address. */
3110 elf_aarch64_compare_mapping (const void *a, const void *b)
3112 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3113 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3115 if (amap->vma > bmap->vma)
3117 else if (amap->vma < bmap->vma)
3119 else if (amap->type > bmap->type)
3120 /* Ensure results do not depend on the host qsort for objects with
3121 multiple mapping symbols at the same address by sorting on type
3124 else if (amap->type < bmap->type)
3132 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3134 char *stub_name = (char *) bfd_malloc
3135 (strlen ("__erratum_835769_veneer_") + 16);
3136 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3140 /* Scan for Cortex-A53 erratum 835769 sequence.
3142 Return TRUE else FALSE on abnormal termination. */
3145 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3146 struct bfd_link_info *info,
3147 unsigned int *num_fixes_p)
3150 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3151 unsigned int num_fixes = *num_fixes_p;
3156 for (section = input_bfd->sections;
3158 section = section->next)
3160 bfd_byte *contents = NULL;
3161 struct _aarch64_elf_section_data *sec_data;
3164 if (elf_section_type (section) != SHT_PROGBITS
3165 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3166 || (section->flags & SEC_EXCLUDE) != 0
3167 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3168 || (section->output_section == bfd_abs_section_ptr))
3171 if (elf_section_data (section)->this_hdr.contents != NULL)
3172 contents = elf_section_data (section)->this_hdr.contents;
3173 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3176 sec_data = elf_aarch64_section_data (section);
3178 qsort (sec_data->map, sec_data->mapcount,
3179 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3181 for (span = 0; span < sec_data->mapcount; span++)
3183 unsigned int span_start = sec_data->map[span].vma;
3184 unsigned int span_end = ((span == sec_data->mapcount - 1)
3185 ? sec_data->map[0].vma + section->size
3186 : sec_data->map[span + 1].vma);
3188 char span_type = sec_data->map[span].type;
3190 if (span_type == 'd')
3193 for (i = span_start; i + 4 < span_end; i += 4)
3195 uint32_t insn_1 = bfd_getl32 (contents + i);
3196 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3198 if (aarch64_erratum_sequence (insn_1, insn_2))
3200 struct elf_aarch64_stub_hash_entry *stub_entry;
3201 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3205 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3211 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3212 stub_entry->target_section = section;
3213 stub_entry->target_value = i + 4;
3214 stub_entry->veneered_insn = insn_2;
3215 stub_entry->output_name = stub_name;
3220 if (elf_section_data (section)->this_hdr.contents == NULL)
3224 *num_fixes_p = num_fixes;
3230 /* Test if instruction INSN is ADRP. */
3233 _bfd_aarch64_adrp_p (uint32_t insn)
3235 return ((insn & 0x9f000000) == 0x90000000);
3239 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3242 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3250 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3253 && AARCH64_LDST_UIMM (insn_3)
3254 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3258 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3260 Return TRUE if section CONTENTS at offset I contains one of the
3261 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3262 seen set P_VENEER_I to the offset of the final LOAD/STORE
3263 instruction in the sequence.
3267 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3268 bfd_vma i, bfd_vma span_end,
3269 bfd_vma *p_veneer_i)
3271 uint32_t insn_1 = bfd_getl32 (contents + i);
3273 if (!_bfd_aarch64_adrp_p (insn_1))
3276 if (span_end < i + 12)
3279 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3280 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3282 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3285 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3287 *p_veneer_i = i + 8;
3291 if (span_end < i + 16)
3294 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3296 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3298 *p_veneer_i = i + 12;
3306 /* Resize all stub sections. */
3309 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3313 /* OK, we've added some stubs. Find out the new size of the
3315 for (section = htab->stub_bfd->sections;
3316 section != NULL; section = section->next)
3318 /* Ignore non-stub sections. */
3319 if (!strstr (section->name, STUB_SUFFIX))
3324 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3326 for (section = htab->stub_bfd->sections;
3327 section != NULL; section = section->next)
3329 if (!strstr (section->name, STUB_SUFFIX))
3335 /* Ensure all stub sections have a size which is a multiple of
3336 4096. This is important in order to ensure that the insertion
3337 of stub sections does not in itself move existing code around
3338 in such a way that new errata sequences are created. */
3339 if (htab->fix_erratum_843419)
3341 section->size = BFD_ALIGN (section->size, 0x1000);
3346 /* Construct an erratum 843419 workaround stub name.
3350 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3353 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3354 char *stub_name = bfd_malloc (len);
3356 if (stub_name != NULL)
3357 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3358 input_section->owner->id,
3364 /* Build a stub_entry structure describing an 843419 fixup.
3366 The stub_entry constructed is populated with the bit pattern INSN
3367 of the instruction located at OFFSET within input SECTION.
3369 Returns TRUE on success. */
3372 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3373 bfd_vma adrp_offset,
3374 bfd_vma ldst_offset,
3376 struct bfd_link_info *info)
3378 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3380 struct elf_aarch64_stub_hash_entry *stub_entry;
3382 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3383 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3391 /* We always place an 843419 workaround veneer in the stub section
3392 attached to the input section in which an erratum sequence has
3393 been found. This ensures that later in the link process (in
3394 elfNN_aarch64_write_section) when we copy the veneered
3395 instruction from the input section into the stub section the
3396 copied instruction will have had any relocations applied to it.
3397 If we placed workaround veneers in any other stub section then we
3398 could not assume that all relocations have been processed on the
3399 corresponding input section at the point we output the stub
3403 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3404 if (stub_entry == NULL)
3410 stub_entry->adrp_offset = adrp_offset;
3411 stub_entry->target_value = ldst_offset;
3412 stub_entry->target_section = section;
3413 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3414 stub_entry->veneered_insn = insn;
3415 stub_entry->output_name = stub_name;
3421 /* Scan an input section looking for the signature of erratum 843419.
3423 Scans input SECTION in INPUT_BFD looking for erratum 843419
3424 signatures, for each signature found a stub_entry is created
3425 describing the location of the erratum for subsequent fixup.
3427 Return TRUE on successful scan, FALSE on failure to scan.
3431 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3432 struct bfd_link_info *info)
3434 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3439 if (elf_section_type (section) != SHT_PROGBITS
3440 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3441 || (section->flags & SEC_EXCLUDE) != 0
3442 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3443 || (section->output_section == bfd_abs_section_ptr))
3448 bfd_byte *contents = NULL;
3449 struct _aarch64_elf_section_data *sec_data;
3452 if (elf_section_data (section)->this_hdr.contents != NULL)
3453 contents = elf_section_data (section)->this_hdr.contents;
3454 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3457 sec_data = elf_aarch64_section_data (section);
3459 qsort (sec_data->map, sec_data->mapcount,
3460 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3462 for (span = 0; span < sec_data->mapcount; span++)
3464 unsigned int span_start = sec_data->map[span].vma;
3465 unsigned int span_end = ((span == sec_data->mapcount - 1)
3466 ? sec_data->map[0].vma + section->size
3467 : sec_data->map[span + 1].vma);
3469 char span_type = sec_data->map[span].type;
3471 if (span_type == 'd')
3474 for (i = span_start; i + 8 < span_end; i += 4)
3476 bfd_vma vma = (section->output_section->vma
3477 + section->output_offset
3481 if (_bfd_aarch64_erratum_843419_p
3482 (contents, vma, i, span_end, &veneer_i))
3484 uint32_t insn = bfd_getl32 (contents + veneer_i);
3486 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3493 if (elf_section_data (section)->this_hdr.contents == NULL)
3502 /* Determine and set the size of the stub section for a final link.
3504 The basic idea here is to examine all the relocations looking for
3505 PC-relative calls to a target that is unreachable with a "bl"
3509 elfNN_aarch64_size_stubs (bfd *output_bfd,
3511 struct bfd_link_info *info,
3512 bfd_signed_vma group_size,
3513 asection * (*add_stub_section) (const char *,
3515 void (*layout_sections_again) (void))
3517 bfd_size_type stub_group_size;
3518 bfd_boolean stubs_always_before_branch;
3519 bfd_boolean stub_changed = FALSE;
3520 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3521 unsigned int num_erratum_835769_fixes = 0;
3523 /* Propagate mach to stub bfd, because it may not have been
3524 finalized when we created stub_bfd. */
3525 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3526 bfd_get_mach (output_bfd));
3528 /* Stash our params away. */
3529 htab->stub_bfd = stub_bfd;
3530 htab->add_stub_section = add_stub_section;
3531 htab->layout_sections_again = layout_sections_again;
3532 stubs_always_before_branch = group_size < 0;
3534 stub_group_size = -group_size;
3536 stub_group_size = group_size;
3538 if (stub_group_size == 1)
3540 /* Default values. */
3541 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3542 stub_group_size = 127 * 1024 * 1024;
3545 group_sections (htab, stub_group_size, stubs_always_before_branch);
3547 (*htab->layout_sections_again) ();
3549 if (htab->fix_erratum_835769)
3553 for (input_bfd = info->input_bfds;
3554 input_bfd != NULL; input_bfd = input_bfd->link.next)
3555 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3556 &num_erratum_835769_fixes))
3559 _bfd_aarch64_resize_stubs (htab);
3560 (*htab->layout_sections_again) ();
3563 if (htab->fix_erratum_843419)
3567 for (input_bfd = info->input_bfds;
3569 input_bfd = input_bfd->link.next)
3573 for (section = input_bfd->sections;
3575 section = section->next)
3576 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3580 _bfd_aarch64_resize_stubs (htab);
3581 (*htab->layout_sections_again) ();
3588 for (input_bfd = info->input_bfds;
3589 input_bfd != NULL; input_bfd = input_bfd->link.next)
3591 Elf_Internal_Shdr *symtab_hdr;
3593 Elf_Internal_Sym *local_syms = NULL;
3595 /* We'll need the symbol table in a second. */
3596 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3597 if (symtab_hdr->sh_info == 0)
3600 /* Walk over each section attached to the input bfd. */
3601 for (section = input_bfd->sections;
3602 section != NULL; section = section->next)
3604 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3606 /* If there aren't any relocs, then there's nothing more
3608 if ((section->flags & SEC_RELOC) == 0
3609 || section->reloc_count == 0
3610 || (section->flags & SEC_CODE) == 0)
3613 /* If this section is a link-once section that will be
3614 discarded, then don't create any stubs. */
3615 if (section->output_section == NULL
3616 || section->output_section->owner != output_bfd)
3619 /* Get the relocs. */
3621 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3622 NULL, info->keep_memory);
3623 if (internal_relocs == NULL)
3624 goto error_ret_free_local;
3626 /* Now examine each relocation. */
3627 irela = internal_relocs;
3628 irelaend = irela + section->reloc_count;
3629 for (; irela < irelaend; irela++)
3631 unsigned int r_type, r_indx;
3632 enum elf_aarch64_stub_type stub_type;
3633 struct elf_aarch64_stub_hash_entry *stub_entry;
3636 bfd_vma destination;
3637 struct elf_aarch64_link_hash_entry *hash;
3638 const char *sym_name;
3640 const asection *id_sec;
3641 unsigned char st_type;
3644 r_type = ELFNN_R_TYPE (irela->r_info);
3645 r_indx = ELFNN_R_SYM (irela->r_info);
3647 if (r_type >= (unsigned int) R_AARCH64_end)
3649 bfd_set_error (bfd_error_bad_value);
3650 error_ret_free_internal:
3651 if (elf_section_data (section)->relocs == NULL)
3652 free (internal_relocs);
3653 goto error_ret_free_local;
3656 /* Only look for stubs on unconditional branch and
3657 branch and link instructions. */
3658 if (r_type != (unsigned int) AARCH64_R (CALL26)
3659 && r_type != (unsigned int) AARCH64_R (JUMP26))
3662 /* Now determine the call target, its name, value,
3669 if (r_indx < symtab_hdr->sh_info)
3671 /* It's a local symbol. */
3672 Elf_Internal_Sym *sym;
3673 Elf_Internal_Shdr *hdr;
3675 if (local_syms == NULL)
3678 = (Elf_Internal_Sym *) symtab_hdr->contents;
3679 if (local_syms == NULL)
3681 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3682 symtab_hdr->sh_info, 0,
3684 if (local_syms == NULL)
3685 goto error_ret_free_internal;
3688 sym = local_syms + r_indx;
3689 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3690 sym_sec = hdr->bfd_section;
3692 /* This is an undefined symbol. It can never
3696 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3697 sym_value = sym->st_value;
3698 destination = (sym_value + irela->r_addend
3699 + sym_sec->output_offset
3700 + sym_sec->output_section->vma);
3701 st_type = ELF_ST_TYPE (sym->st_info);
3703 = bfd_elf_string_from_elf_section (input_bfd,
3704 symtab_hdr->sh_link,
3711 e_indx = r_indx - symtab_hdr->sh_info;
3712 hash = ((struct elf_aarch64_link_hash_entry *)
3713 elf_sym_hashes (input_bfd)[e_indx]);
3715 while (hash->root.root.type == bfd_link_hash_indirect
3716 || hash->root.root.type == bfd_link_hash_warning)
3717 hash = ((struct elf_aarch64_link_hash_entry *)
3718 hash->root.root.u.i.link);
3720 if (hash->root.root.type == bfd_link_hash_defined
3721 || hash->root.root.type == bfd_link_hash_defweak)
3723 struct elf_aarch64_link_hash_table *globals =
3724 elf_aarch64_hash_table (info);
3725 sym_sec = hash->root.root.u.def.section;
3726 sym_value = hash->root.root.u.def.value;
3727 /* For a destination in a shared library,
3728 use the PLT stub as target address to
3729 decide whether a branch stub is
3731 if (globals->root.splt != NULL && hash != NULL
3732 && hash->root.plt.offset != (bfd_vma) - 1)
3734 sym_sec = globals->root.splt;
3735 sym_value = hash->root.plt.offset;
3736 if (sym_sec->output_section != NULL)
3737 destination = (sym_value
3738 + sym_sec->output_offset
3740 sym_sec->output_section->vma);
3742 else if (sym_sec->output_section != NULL)
3743 destination = (sym_value + irela->r_addend
3744 + sym_sec->output_offset
3745 + sym_sec->output_section->vma);
3747 else if (hash->root.root.type == bfd_link_hash_undefined
3748 || (hash->root.root.type
3749 == bfd_link_hash_undefweak))
3751 /* For a shared library, use the PLT stub as
3752 target address to decide whether a long
3753 branch stub is needed.
3754 For absolute code, they cannot be handled. */
3755 struct elf_aarch64_link_hash_table *globals =
3756 elf_aarch64_hash_table (info);
3758 if (globals->root.splt != NULL && hash != NULL
3759 && hash->root.plt.offset != (bfd_vma) - 1)
3761 sym_sec = globals->root.splt;
3762 sym_value = hash->root.plt.offset;
3763 if (sym_sec->output_section != NULL)
3764 destination = (sym_value
3765 + sym_sec->output_offset
3767 sym_sec->output_section->vma);
3774 bfd_set_error (bfd_error_bad_value);
3775 goto error_ret_free_internal;
3777 st_type = ELF_ST_TYPE (hash->root.type);
3778 sym_name = hash->root.root.root.string;
3781 /* Determine what (if any) linker stub is needed. */
3782 stub_type = aarch64_type_of_stub
3783 (info, section, irela, st_type, hash, destination);
3784 if (stub_type == aarch64_stub_none)
3787 /* Support for grouping stub sections. */
3788 id_sec = htab->stub_group[section->id].link_sec;
3790 /* Get the name of this stub. */
3791 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3794 goto error_ret_free_internal;
3797 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3798 stub_name, FALSE, FALSE);
3799 if (stub_entry != NULL)
3801 /* The proper stub has already been created. */
3806 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3807 (stub_name, section, htab);
3808 if (stub_entry == NULL)
3811 goto error_ret_free_internal;
3814 stub_entry->target_value = sym_value;
3815 stub_entry->target_section = sym_sec;
3816 stub_entry->stub_type = stub_type;
3817 stub_entry->h = hash;
3818 stub_entry->st_type = st_type;
3820 if (sym_name == NULL)
3821 sym_name = "unnamed";
3822 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3823 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3824 if (stub_entry->output_name == NULL)
3827 goto error_ret_free_internal;
3830 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3833 stub_changed = TRUE;
3836 /* We're done with the internal relocs, free them. */
3837 if (elf_section_data (section)->relocs == NULL)
3838 free (internal_relocs);
3845 _bfd_aarch64_resize_stubs (htab);
3847 /* Ask the linker to do its stuff. */
3848 (*htab->layout_sections_again) ();
3849 stub_changed = FALSE;
3854 error_ret_free_local:
3858 /* Build all the stubs associated with the current output file. The
3859 stubs are kept in a hash table attached to the main linker hash
3860 table. We also set up the .plt entries for statically linked PIC
3861 functions here. This function is called via aarch64_elf_finish in the
3865 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3868 struct bfd_hash_table *table;
3869 struct elf_aarch64_link_hash_table *htab;
3871 htab = elf_aarch64_hash_table (info);
3873 for (stub_sec = htab->stub_bfd->sections;
3874 stub_sec != NULL; stub_sec = stub_sec->next)
3878 /* Ignore non-stub sections. */
3879 if (!strstr (stub_sec->name, STUB_SUFFIX))
3882 /* Allocate memory to hold the linker stubs. */
3883 size = stub_sec->size;
3884 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3885 if (stub_sec->contents == NULL && size != 0)
3889 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3890 stub_sec->size += 4;
3893 /* Build the stubs as directed by the stub hash table. */
3894 table = &htab->stub_hash_table;
3895 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3901 /* Add an entry to the code/data map for section SEC. */
3904 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3906 struct _aarch64_elf_section_data *sec_data =
3907 elf_aarch64_section_data (sec);
3908 unsigned int newidx;
3910 if (sec_data->map == NULL)
3912 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3913 sec_data->mapcount = 0;
3914 sec_data->mapsize = 1;
3917 newidx = sec_data->mapcount++;
3919 if (sec_data->mapcount > sec_data->mapsize)
3921 sec_data->mapsize *= 2;
3922 sec_data->map = bfd_realloc_or_free
3923 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3928 sec_data->map[newidx].vma = vma;
3929 sec_data->map[newidx].type = type;
3934 /* Initialise maps of insn/data for input BFDs. */
3936 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3938 Elf_Internal_Sym *isymbuf;
3939 Elf_Internal_Shdr *hdr;
3940 unsigned int i, localsyms;
3942 /* Make sure that we are dealing with an AArch64 elf binary. */
3943 if (!is_aarch64_elf (abfd))
3946 if ((abfd->flags & DYNAMIC) != 0)
3949 hdr = &elf_symtab_hdr (abfd);
3950 localsyms = hdr->sh_info;
3952 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3953 should contain the number of local symbols, which should come before any
3954 global symbols. Mapping symbols are always local. */
3955 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3957 /* No internal symbols read? Skip this BFD. */
3958 if (isymbuf == NULL)
3961 for (i = 0; i < localsyms; i++)
3963 Elf_Internal_Sym *isym = &isymbuf[i];
3964 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3967 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3969 name = bfd_elf_string_from_elf_section (abfd,
3973 if (bfd_is_aarch64_special_symbol_name
3974 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3975 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3980 /* Set option values needed during linking. */
3982 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3983 struct bfd_link_info *link_info,
3985 int no_wchar_warn, int pic_veneer,
3986 int fix_erratum_835769,
3987 int fix_erratum_843419)
3989 struct elf_aarch64_link_hash_table *globals;
3991 globals = elf_aarch64_hash_table (link_info);
3992 globals->pic_veneer = pic_veneer;
3993 globals->fix_erratum_835769 = fix_erratum_835769;
3994 globals->fix_erratum_843419 = fix_erratum_843419;
3995 globals->fix_erratum_843419_adr = TRUE;
3997 BFD_ASSERT (is_aarch64_elf (output_bfd));
3998 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3999 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4003 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4004 struct elf_aarch64_link_hash_table
4005 *globals, struct bfd_link_info *info,
4006 bfd_vma value, bfd *output_bfd,
4007 bfd_boolean *unresolved_reloc_p)
4009 bfd_vma off = (bfd_vma) - 1;
4010 asection *basegot = globals->root.sgot;
4011 bfd_boolean dyn = globals->root.dynamic_sections_created;
4015 BFD_ASSERT (basegot != NULL);
4016 off = h->got.offset;
4017 BFD_ASSERT (off != (bfd_vma) - 1);
4018 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4020 && SYMBOL_REFERENCES_LOCAL (info, h))
4021 || (ELF_ST_VISIBILITY (h->other)
4022 && h->root.type == bfd_link_hash_undefweak))
4024 /* This is actually a static link, or it is a -Bsymbolic link
4025 and the symbol is defined locally. We must initialize this
4026 entry in the global offset table. Since the offset must
4027 always be a multiple of 8 (4 in the case of ILP32), we use
4028 the least significant bit to record whether we have
4029 initialized it already.
4030 When doing a dynamic link, we create a .rel(a).got relocation
4031 entry to initialize the value. This is done in the
4032 finish_dynamic_symbol routine. */
4037 bfd_put_NN (output_bfd, value, basegot->contents + off);
4042 *unresolved_reloc_p = FALSE;
4044 off = off + basegot->output_section->vma + basegot->output_offset;
4050 /* Change R_TYPE to a more efficient access model where possible,
4051 return the new reloc type. */
4053 static bfd_reloc_code_real_type
4054 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4055 struct elf_link_hash_entry *h)
4057 bfd_boolean is_local = h == NULL;
4061 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4062 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4064 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4065 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4067 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4069 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4072 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4074 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4075 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4077 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4078 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4080 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4081 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4083 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4084 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4086 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4087 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4089 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4092 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4094 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4095 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4097 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4098 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4099 /* Instructions with these relocations will become NOPs. */
4100 return BFD_RELOC_AARCH64_NONE;
4110 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4114 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4115 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4116 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4117 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4118 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4119 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4122 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4123 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4124 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4125 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4128 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4129 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4130 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4131 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4132 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4133 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4134 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4135 return GOT_TLSDESC_GD;
4137 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4138 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4139 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4140 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4143 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4144 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4145 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4146 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4147 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4148 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4149 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4150 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4160 aarch64_can_relax_tls (bfd *input_bfd,
4161 struct bfd_link_info *info,
4162 bfd_reloc_code_real_type r_type,
4163 struct elf_link_hash_entry *h,
4164 unsigned long r_symndx)
4166 unsigned int symbol_got_type;
4167 unsigned int reloc_got_type;
4169 if (! IS_AARCH64_TLS_RELOC (r_type))
4172 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4173 reloc_got_type = aarch64_reloc_got_type (r_type);
4175 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4181 if (h && h->root.type == bfd_link_hash_undefweak)
4187 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4190 static bfd_reloc_code_real_type
4191 aarch64_tls_transition (bfd *input_bfd,
4192 struct bfd_link_info *info,
4193 unsigned int r_type,
4194 struct elf_link_hash_entry *h,
4195 unsigned long r_symndx)
4197 bfd_reloc_code_real_type bfd_r_type
4198 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4200 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4203 return aarch64_tls_transition_without_check (bfd_r_type, h);
4206 /* Return the base VMA address which should be subtracted from real addresses
4207 when resolving R_AARCH64_TLS_DTPREL relocation. */
4210 dtpoff_base (struct bfd_link_info *info)
4212 /* If tls_sec is NULL, we should have signalled an error already. */
4213 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4214 return elf_hash_table (info)->tls_sec->vma;
4217 /* Return the base VMA address which should be subtracted from real addresses
4218 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4221 tpoff_base (struct bfd_link_info *info)
4223 struct elf_link_hash_table *htab = elf_hash_table (info);
4225 /* If tls_sec is NULL, we should have signalled an error already. */
4226 BFD_ASSERT (htab->tls_sec != NULL);
4228 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4229 htab->tls_sec->alignment_power);
4230 return htab->tls_sec->vma - base;
4234 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4235 unsigned long r_symndx)
4237 /* Calculate the address of the GOT entry for symbol
4238 referred to in h. */
4240 return &h->got.offset;
4244 struct elf_aarch64_local_symbol *l;
4246 l = elf_aarch64_locals (input_bfd);
4247 return &l[r_symndx].got_offset;
4252 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4253 unsigned long r_symndx)
4256 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4261 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4262 unsigned long r_symndx)
4265 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4270 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4271 unsigned long r_symndx)
4274 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4280 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4281 unsigned long r_symndx)
4283 /* Calculate the address of the GOT entry for symbol
4284 referred to in h. */
4287 struct elf_aarch64_link_hash_entry *eh;
4288 eh = (struct elf_aarch64_link_hash_entry *) h;
4289 return &eh->tlsdesc_got_jump_table_offset;
4294 struct elf_aarch64_local_symbol *l;
4296 l = elf_aarch64_locals (input_bfd);
4297 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4302 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4303 unsigned long r_symndx)
4306 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4311 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4312 struct elf_link_hash_entry *h,
4313 unsigned long r_symndx)
4316 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4321 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4322 unsigned long r_symndx)
4325 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4330 /* Data for make_branch_to_erratum_835769_stub(). */
4332 struct erratum_835769_branch_to_stub_data
4334 struct bfd_link_info *info;
4335 asection *output_section;
4339 /* Helper to insert branches to erratum 835769 stubs in the right
4340 places for a particular section. */
4343 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4346 struct elf_aarch64_stub_hash_entry *stub_entry;
4347 struct erratum_835769_branch_to_stub_data *data;
4349 unsigned long branch_insn = 0;
4350 bfd_vma veneered_insn_loc, veneer_entry_loc;
4351 bfd_signed_vma branch_offset;
4352 unsigned int target;
4355 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4356 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4358 if (stub_entry->target_section != data->output_section
4359 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4362 contents = data->contents;
4363 veneered_insn_loc = stub_entry->target_section->output_section->vma
4364 + stub_entry->target_section->output_offset
4365 + stub_entry->target_value;
4366 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4367 + stub_entry->stub_sec->output_offset
4368 + stub_entry->stub_offset;
4369 branch_offset = veneer_entry_loc - veneered_insn_loc;
4371 abfd = stub_entry->target_section->owner;
4372 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4373 (*_bfd_error_handler)
4374 (_("%B: error: Erratum 835769 stub out "
4375 "of range (input file too large)"), abfd);
4377 target = stub_entry->target_value;
4378 branch_insn = 0x14000000;
4379 branch_offset >>= 2;
4380 branch_offset &= 0x3ffffff;
4381 branch_insn |= branch_offset;
4382 bfd_putl32 (branch_insn, &contents[target]);
4389 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4392 struct elf_aarch64_stub_hash_entry *stub_entry
4393 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4394 struct erratum_835769_branch_to_stub_data *data
4395 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4396 struct bfd_link_info *info;
4397 struct elf_aarch64_link_hash_table *htab;
4405 contents = data->contents;
4406 section = data->output_section;
4408 htab = elf_aarch64_hash_table (info);
4410 if (stub_entry->target_section != section
4411 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4414 insn = bfd_getl32 (contents + stub_entry->target_value);
4416 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4418 place = (section->output_section->vma + section->output_offset
4419 + stub_entry->adrp_offset);
4420 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4422 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4425 bfd_signed_vma imm =
4426 (_bfd_aarch64_sign_extend
4427 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4430 if (htab->fix_erratum_843419_adr
4431 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4433 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4434 | AARCH64_RT (insn));
4435 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4439 bfd_vma veneered_insn_loc;
4440 bfd_vma veneer_entry_loc;
4441 bfd_signed_vma branch_offset;
4442 uint32_t branch_insn;
4444 veneered_insn_loc = stub_entry->target_section->output_section->vma
4445 + stub_entry->target_section->output_offset
4446 + stub_entry->target_value;
4447 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4448 + stub_entry->stub_sec->output_offset
4449 + stub_entry->stub_offset;
4450 branch_offset = veneer_entry_loc - veneered_insn_loc;
4452 abfd = stub_entry->target_section->owner;
4453 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4454 (*_bfd_error_handler)
4455 (_("%B: error: Erratum 843419 stub out "
4456 "of range (input file too large)"), abfd);
4458 branch_insn = 0x14000000;
4459 branch_offset >>= 2;
4460 branch_offset &= 0x3ffffff;
4461 branch_insn |= branch_offset;
4462 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4469 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4470 struct bfd_link_info *link_info,
4475 struct elf_aarch64_link_hash_table *globals =
4476 elf_aarch64_hash_table (link_info);
4478 if (globals == NULL)
4481 /* Fix code to point to erratum 835769 stubs. */
4482 if (globals->fix_erratum_835769)
4484 struct erratum_835769_branch_to_stub_data data;
4486 data.info = link_info;
4487 data.output_section = sec;
4488 data.contents = contents;
4489 bfd_hash_traverse (&globals->stub_hash_table,
4490 make_branch_to_erratum_835769_stub, &data);
4493 if (globals->fix_erratum_843419)
4495 struct erratum_835769_branch_to_stub_data data;
4497 data.info = link_info;
4498 data.output_section = sec;
4499 data.contents = contents;
4500 bfd_hash_traverse (&globals->stub_hash_table,
4501 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4507 /* Perform a relocation as part of a final link. */
4508 static bfd_reloc_status_type
4509 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4512 asection *input_section,
4514 Elf_Internal_Rela *rel,
4516 struct bfd_link_info *info,
4518 struct elf_link_hash_entry *h,
4519 bfd_boolean *unresolved_reloc_p,
4520 bfd_boolean save_addend,
4521 bfd_vma *saved_addend,
4522 Elf_Internal_Sym *sym)
4524 Elf_Internal_Shdr *symtab_hdr;
4525 unsigned int r_type = howto->type;
4526 bfd_reloc_code_real_type bfd_r_type
4527 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4528 bfd_reloc_code_real_type new_bfd_r_type;
4529 unsigned long r_symndx;
4530 bfd_byte *hit_data = contents + rel->r_offset;
4532 bfd_signed_vma signed_addend;
4533 struct elf_aarch64_link_hash_table *globals;
4534 bfd_boolean weak_undef_p;
4537 globals = elf_aarch64_hash_table (info);
4539 symtab_hdr = &elf_symtab_hdr (input_bfd);
4541 BFD_ASSERT (is_aarch64_elf (input_bfd));
4543 r_symndx = ELFNN_R_SYM (rel->r_info);
4545 /* It is possible to have linker relaxations on some TLS access
4546 models. Update our information here. */
4547 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4548 if (new_bfd_r_type != bfd_r_type)
4550 bfd_r_type = new_bfd_r_type;
4551 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4552 BFD_ASSERT (howto != NULL);
4553 r_type = howto->type;
4556 place = input_section->output_section->vma
4557 + input_section->output_offset + rel->r_offset;
4559 /* Get addend, accumulating the addend for consecutive relocs
4560 which refer to the same offset. */
4561 signed_addend = saved_addend ? *saved_addend : 0;
4562 signed_addend += rel->r_addend;
4564 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4565 : bfd_is_und_section (sym_sec));
4567 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4568 it here if it is defined in a non-shared object. */
4570 && h->type == STT_GNU_IFUNC
4577 if ((input_section->flags & SEC_ALLOC) == 0
4578 || h->plt.offset == (bfd_vma) -1)
4581 /* STT_GNU_IFUNC symbol must go through PLT. */
4582 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4583 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4588 if (h->root.root.string)
4589 name = h->root.root.string;
4591 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4593 (*_bfd_error_handler)
4594 (_("%B: relocation %s against STT_GNU_IFUNC "
4595 "symbol `%s' isn't handled by %s"), input_bfd,
4596 howto->name, name, __FUNCTION__);
4597 bfd_set_error (bfd_error_bad_value);
4600 case BFD_RELOC_AARCH64_NN:
4601 if (rel->r_addend != 0)
4603 if (h->root.root.string)
4604 name = h->root.root.string;
4606 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4608 (*_bfd_error_handler)
4609 (_("%B: relocation %s against STT_GNU_IFUNC "
4610 "symbol `%s' has non-zero addend: %d"),
4611 input_bfd, howto->name, name, rel->r_addend);
4612 bfd_set_error (bfd_error_bad_value);
4616 /* Generate dynamic relocation only when there is a
4617 non-GOT reference in a shared object. */
4618 if (info->shared && h->non_got_ref)
4620 Elf_Internal_Rela outrel;
4623 /* Need a dynamic relocation to get the real function
4625 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4629 if (outrel.r_offset == (bfd_vma) -1
4630 || outrel.r_offset == (bfd_vma) -2)
4633 outrel.r_offset += (input_section->output_section->vma
4634 + input_section->output_offset);
4636 if (h->dynindx == -1
4638 || info->executable)
4640 /* This symbol is resolved locally. */
4641 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4642 outrel.r_addend = (h->root.u.def.value
4643 + h->root.u.def.section->output_section->vma
4644 + h->root.u.def.section->output_offset);
4648 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4649 outrel.r_addend = 0;
4652 sreloc = globals->root.irelifunc;
4653 elf_append_rela (output_bfd, sreloc, &outrel);
4655 /* If this reloc is against an external symbol, we
4656 do not want to fiddle with the addend. Otherwise,
4657 we need to include the symbol value so that it
4658 becomes an addend for the dynamic reloc. For an
4659 internal symbol, we have updated addend. */
4660 return bfd_reloc_ok;
4663 case BFD_RELOC_AARCH64_CALL26:
4664 case BFD_RELOC_AARCH64_JUMP26:
4665 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4668 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4670 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4671 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4672 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4673 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4674 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4675 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4676 base_got = globals->root.sgot;
4677 off = h->got.offset;
4679 if (base_got == NULL)
4682 if (off == (bfd_vma) -1)
4686 /* We can't use h->got.offset here to save state, or
4687 even just remember the offset, as finish_dynamic_symbol
4688 would use that as offset into .got. */
4690 if (globals->root.splt != NULL)
4692 plt_index = ((h->plt.offset - globals->plt_header_size) /
4693 globals->plt_entry_size);
4694 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4695 base_got = globals->root.sgotplt;
4699 plt_index = h->plt.offset / globals->plt_entry_size;
4700 off = plt_index * GOT_ENTRY_SIZE;
4701 base_got = globals->root.igotplt;
4704 if (h->dynindx == -1
4708 /* This references the local definition. We must
4709 initialize this entry in the global offset table.
4710 Since the offset must always be a multiple of 8,
4711 we use the least significant bit to record
4712 whether we have initialized it already.
4714 When doing a dynamic link, we create a .rela.got
4715 relocation entry to initialize the value. This
4716 is done in the finish_dynamic_symbol routine. */
4721 bfd_put_NN (output_bfd, value,
4722 base_got->contents + off);
4723 /* Note that this is harmless as -1 | 1 still is -1. */
4727 value = (base_got->output_section->vma
4728 + base_got->output_offset + off);
4731 value = aarch64_calculate_got_entry_vma (h, globals, info,
4733 unresolved_reloc_p);
4734 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4735 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4736 addend = (globals->root.sgot->output_section->vma
4737 + globals->root.sgot->output_offset);
4738 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4739 addend, weak_undef_p);
4740 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4741 case BFD_RELOC_AARCH64_ADD_LO12:
4742 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4749 case BFD_RELOC_AARCH64_NONE:
4750 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4751 *unresolved_reloc_p = FALSE;
4752 return bfd_reloc_ok;
4754 case BFD_RELOC_AARCH64_NN:
4756 /* When generating a shared object or relocatable executable, these
4757 relocations are copied into the output file to be resolved at
4759 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4760 && (input_section->flags & SEC_ALLOC)
4762 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4763 || h->root.type != bfd_link_hash_undefweak))
4765 Elf_Internal_Rela outrel;
4767 bfd_boolean skip, relocate;
4770 *unresolved_reloc_p = FALSE;
4775 outrel.r_addend = signed_addend;
4777 _bfd_elf_section_offset (output_bfd, info, input_section,
4779 if (outrel.r_offset == (bfd_vma) - 1)
4781 else if (outrel.r_offset == (bfd_vma) - 2)
4787 outrel.r_offset += (input_section->output_section->vma
4788 + input_section->output_offset);
4791 memset (&outrel, 0, sizeof outrel);
4794 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4795 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4800 /* On SVR4-ish systems, the dynamic loader cannot
4801 relocate the text and data segments independently,
4802 so the symbol does not matter. */
4804 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4805 outrel.r_addend += value;
4808 sreloc = elf_section_data (input_section)->sreloc;
4809 if (sreloc == NULL || sreloc->contents == NULL)
4810 return bfd_reloc_notsupported;
4812 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4813 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4815 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4817 /* Sanity to check that we have previously allocated
4818 sufficient space in the relocation section for the
4819 number of relocations we actually want to emit. */
4823 /* If this reloc is against an external symbol, we do not want to
4824 fiddle with the addend. Otherwise, we need to include the symbol
4825 value so that it becomes an addend for the dynamic reloc. */
4827 return bfd_reloc_ok;
4829 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4830 contents, rel->r_offset, value,
4834 value += signed_addend;
4837 case BFD_RELOC_AARCH64_CALL26:
4838 case BFD_RELOC_AARCH64_JUMP26:
4840 asection *splt = globals->root.splt;
4841 bfd_boolean via_plt_p =
4842 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4844 /* A call to an undefined weak symbol is converted to a jump to
4845 the next instruction unless a PLT entry will be created.
4846 The jump to the next instruction is optimized as a NOP.
4847 Do the same for local undefined symbols. */
4848 if (weak_undef_p && ! via_plt_p)
4850 bfd_putl32 (INSN_NOP, hit_data);
4851 return bfd_reloc_ok;
4854 /* If the call goes through a PLT entry, make sure to
4855 check distance to the right destination address. */
4858 value = (splt->output_section->vma
4859 + splt->output_offset + h->plt.offset);
4860 *unresolved_reloc_p = FALSE;
4863 /* If the target symbol is global and marked as a function the
4864 relocation applies a function call or a tail call. In this
4865 situation we can veneer out of range branches. The veneers
4866 use IP0 and IP1 hence cannot be used arbitrary out of range
4867 branches that occur within the body of a function. */
4868 if (h && h->type == STT_FUNC)
4870 /* Check if a stub has to be inserted because the destination
4872 if (! aarch64_valid_branch_p (value, place))
4874 /* The target is out of reach, so redirect the branch to
4875 the local stub for this function. */
4876 struct elf_aarch64_stub_hash_entry *stub_entry;
4877 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4880 if (stub_entry != NULL)
4881 value = (stub_entry->stub_offset
4882 + stub_entry->stub_sec->output_offset
4883 + stub_entry->stub_sec->output_section->vma);
4887 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4888 signed_addend, weak_undef_p);
4891 case BFD_RELOC_AARCH64_16_PCREL:
4892 case BFD_RELOC_AARCH64_32_PCREL:
4893 case BFD_RELOC_AARCH64_64_PCREL:
4894 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4895 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4896 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4897 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4899 && (input_section->flags & SEC_ALLOC) != 0
4900 && (input_section->flags & SEC_READONLY) != 0
4904 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4906 (*_bfd_error_handler)
4907 (_("%B: relocation %s against external symbol `%s' can not be used"
4908 " when making a shared object; recompile with -fPIC"),
4909 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4910 h->root.root.string);
4911 bfd_set_error (bfd_error_bad_value);
4915 case BFD_RELOC_AARCH64_16:
4917 case BFD_RELOC_AARCH64_32:
4919 case BFD_RELOC_AARCH64_ADD_LO12:
4920 case BFD_RELOC_AARCH64_BRANCH19:
4921 case BFD_RELOC_AARCH64_LDST128_LO12:
4922 case BFD_RELOC_AARCH64_LDST16_LO12:
4923 case BFD_RELOC_AARCH64_LDST32_LO12:
4924 case BFD_RELOC_AARCH64_LDST64_LO12:
4925 case BFD_RELOC_AARCH64_LDST8_LO12:
4926 case BFD_RELOC_AARCH64_MOVW_G0:
4927 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4928 case BFD_RELOC_AARCH64_MOVW_G0_S:
4929 case BFD_RELOC_AARCH64_MOVW_G1:
4930 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4931 case BFD_RELOC_AARCH64_MOVW_G1_S:
4932 case BFD_RELOC_AARCH64_MOVW_G2:
4933 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4934 case BFD_RELOC_AARCH64_MOVW_G2_S:
4935 case BFD_RELOC_AARCH64_MOVW_G3:
4936 case BFD_RELOC_AARCH64_TSTBR14:
4937 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4938 signed_addend, weak_undef_p);
4941 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4942 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4943 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4944 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4945 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4946 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4947 if (globals->root.sgot == NULL)
4948 BFD_ASSERT (h != NULL);
4953 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4955 unresolved_reloc_p);
4956 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4957 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4958 addend = (globals->root.sgot->output_section->vma
4959 + globals->root.sgot->output_offset);
4960 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4961 addend, weak_undef_p);
4966 struct elf_aarch64_local_symbol *locals
4967 = elf_aarch64_locals (input_bfd);
4971 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4972 (*_bfd_error_handler)
4973 (_("%B: Local symbol descriptor table be NULL when applying "
4974 "relocation %s against local symbol"),
4975 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
4979 off = symbol_got_offset (input_bfd, h, r_symndx);
4980 base_got = globals->root.sgot;
4981 bfd_vma got_entry_addr = (base_got->output_section->vma
4982 + base_got->output_offset + off);
4984 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4986 bfd_put_64 (output_bfd, value, base_got->contents + off);
4991 Elf_Internal_Rela outrel;
4993 /* For local symbol, we have done absolute relocation in static
4994 linking stageh. While for share library, we need to update
4995 the content of GOT entry according to the share objects
4996 loading base address. So we need to generate a
4997 R_AARCH64_RELATIVE reloc for dynamic linker. */
4998 s = globals->root.srelgot;
5002 outrel.r_offset = got_entry_addr;
5003 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5004 outrel.r_addend = value;
5005 elf_append_rela (output_bfd, s, &outrel);
5008 symbol_got_offset_mark (input_bfd, h, r_symndx);
5011 /* Update the relocation value to GOT entry addr as we have transformed
5012 the direct data access into indirect data access through GOT. */
5013 value = got_entry_addr;
5015 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5016 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5017 addend = base_got->output_section->vma + base_got->output_offset;
5019 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5020 addend, weak_undef_p);
5025 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5026 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5027 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5028 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5029 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5030 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5031 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5032 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5033 if (globals->root.sgot == NULL)
5034 return bfd_reloc_notsupported;
5036 value = (symbol_got_offset (input_bfd, h, r_symndx)
5037 + globals->root.sgot->output_section->vma
5038 + globals->root.sgot->output_offset);
5040 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5042 *unresolved_reloc_p = FALSE;
5045 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5046 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5047 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5048 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5049 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5050 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5051 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5052 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5053 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5054 signed_addend - tpoff_base (info),
5056 *unresolved_reloc_p = FALSE;
5059 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5060 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5061 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5062 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5063 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5064 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5065 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5066 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5067 if (globals->root.sgot == NULL)
5068 return bfd_reloc_notsupported;
5069 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5070 + globals->root.sgotplt->output_section->vma
5071 + globals->root.sgotplt->output_offset
5072 + globals->sgotplt_jump_table_size);
5074 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5076 *unresolved_reloc_p = FALSE;
5080 return bfd_reloc_notsupported;
5084 *saved_addend = value;
5086 /* Only apply the final relocation in a sequence. */
5088 return bfd_reloc_continue;
5090 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5094 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5095 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5098 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5099 is to then call final_link_relocate. Return other values in the
5102 static bfd_reloc_status_type
5103 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5104 bfd *input_bfd, bfd_byte *contents,
5105 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5107 bfd_boolean is_local = h == NULL;
5108 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5111 BFD_ASSERT (globals && input_bfd && contents && rel);
5113 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5115 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5116 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5119 /* GD->LE relaxation:
5120 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5122 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5124 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5125 return bfd_reloc_continue;
5129 /* GD->IE relaxation:
5130 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5132 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5134 return bfd_reloc_continue;
5137 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5141 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5144 /* Tiny TLSDESC->LE relaxation:
5145 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5146 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5150 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5151 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5153 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5154 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5155 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5157 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5158 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5159 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5160 return bfd_reloc_continue;
5164 /* Tiny TLSDESC->IE relaxation:
5165 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5166 adr x0, :tlsdesc:var => nop
5170 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5171 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5173 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5174 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5176 bfd_putl32 (0x58000000, contents + rel->r_offset);
5177 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5178 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5179 return bfd_reloc_continue;
5182 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5185 /* Tiny GD->LE relaxation:
5186 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5187 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5188 nop => add x0, x0, #:tprel_lo12_nc:x
5191 /* First kill the tls_get_addr reloc on the bl instruction. */
5192 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5194 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5195 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5196 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5198 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5199 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5200 rel[1].r_offset = rel->r_offset + 8;
5202 /* Move the current relocation to the second instruction in
5205 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5206 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5207 return bfd_reloc_continue;
5211 /* Tiny GD->IE relaxation:
5212 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5213 bl __tls_get_addr => mrs x1, tpidr_el0
5214 nop => add x0, x0, x1
5217 /* First kill the tls_get_addr reloc on the bl instruction. */
5218 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5219 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5221 bfd_putl32 (0x58000000, contents + rel->r_offset);
5222 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5223 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5224 return bfd_reloc_continue;
5227 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5228 return bfd_reloc_continue;
5230 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5233 /* GD->LE relaxation:
5234 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5236 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5237 return bfd_reloc_continue;
5241 /* GD->IE relaxation:
5242 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5244 insn = bfd_getl32 (contents + rel->r_offset);
5246 bfd_putl32 (insn, contents + rel->r_offset);
5247 return bfd_reloc_continue;
5250 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5253 /* GD->LE relaxation
5254 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5255 bl __tls_get_addr => mrs x1, tpidr_el0
5256 nop => add x0, x1, x0
5259 /* First kill the tls_get_addr reloc on the bl instruction. */
5260 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5261 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5263 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5264 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5265 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5266 return bfd_reloc_continue;
5270 /* GD->IE relaxation
5271 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5272 BL __tls_get_addr => mrs x1, tpidr_el0
5274 NOP => add x0, x1, x0
5277 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5279 /* Remove the relocation on the BL instruction. */
5280 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5282 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5284 /* We choose to fixup the BL and NOP instructions using the
5285 offset from the second relocation to allow flexibility in
5286 scheduling instructions between the ADD and BL. */
5287 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5288 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5289 return bfd_reloc_continue;
5292 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5293 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5294 /* GD->IE/LE relaxation:
5295 add x0, x0, #:tlsdesc_lo12:var => nop
5298 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5299 return bfd_reloc_ok;
5301 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5302 /* IE->LE relaxation:
5303 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5307 insn = bfd_getl32 (contents + rel->r_offset);
5308 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5310 return bfd_reloc_continue;
5312 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5313 /* IE->LE relaxation:
5314 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5318 insn = bfd_getl32 (contents + rel->r_offset);
5319 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5321 return bfd_reloc_continue;
5324 return bfd_reloc_continue;
5327 return bfd_reloc_ok;
5330 /* Relocate an AArch64 ELF section. */
5333 elfNN_aarch64_relocate_section (bfd *output_bfd,
5334 struct bfd_link_info *info,
5336 asection *input_section,
5338 Elf_Internal_Rela *relocs,
5339 Elf_Internal_Sym *local_syms,
5340 asection **local_sections)
5342 Elf_Internal_Shdr *symtab_hdr;
5343 struct elf_link_hash_entry **sym_hashes;
5344 Elf_Internal_Rela *rel;
5345 Elf_Internal_Rela *relend;
5347 struct elf_aarch64_link_hash_table *globals;
5348 bfd_boolean save_addend = FALSE;
5351 globals = elf_aarch64_hash_table (info);
5353 symtab_hdr = &elf_symtab_hdr (input_bfd);
5354 sym_hashes = elf_sym_hashes (input_bfd);
5357 relend = relocs + input_section->reloc_count;
5358 for (; rel < relend; rel++)
5360 unsigned int r_type;
5361 bfd_reloc_code_real_type bfd_r_type;
5362 bfd_reloc_code_real_type relaxed_bfd_r_type;
5363 reloc_howto_type *howto;
5364 unsigned long r_symndx;
5365 Elf_Internal_Sym *sym;
5367 struct elf_link_hash_entry *h;
5369 bfd_reloc_status_type r;
5372 bfd_boolean unresolved_reloc = FALSE;
5373 char *error_message = NULL;
5375 r_symndx = ELFNN_R_SYM (rel->r_info);
5376 r_type = ELFNN_R_TYPE (rel->r_info);
5378 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5379 howto = bfd_reloc.howto;
5383 (*_bfd_error_handler)
5384 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5385 input_bfd, input_section, r_type);
5388 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5394 if (r_symndx < symtab_hdr->sh_info)
5396 sym = local_syms + r_symndx;
5397 sym_type = ELFNN_ST_TYPE (sym->st_info);
5398 sec = local_sections[r_symndx];
5400 /* An object file might have a reference to a local
5401 undefined symbol. This is a daft object file, but we
5402 should at least do something about it. */
5403 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5404 && bfd_is_und_section (sec)
5405 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5407 if (!info->callbacks->undefined_symbol
5408 (info, bfd_elf_string_from_elf_section
5409 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5410 input_bfd, input_section, rel->r_offset, TRUE))
5414 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5416 /* Relocate against local STT_GNU_IFUNC symbol. */
5417 if (!info->relocatable
5418 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5420 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5425 /* Set STT_GNU_IFUNC symbol value. */
5426 h->root.u.def.value = sym->st_value;
5427 h->root.u.def.section = sec;
5432 bfd_boolean warned, ignored;
5434 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5435 r_symndx, symtab_hdr, sym_hashes,
5437 unresolved_reloc, warned, ignored);
5442 if (sec != NULL && discarded_section (sec))
5443 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5444 rel, 1, relend, howto, 0, contents);
5446 if (info->relocatable)
5450 name = h->root.root.string;
5453 name = (bfd_elf_string_from_elf_section
5454 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5455 if (name == NULL || *name == '\0')
5456 name = bfd_section_name (input_bfd, sec);
5460 && r_type != R_AARCH64_NONE
5461 && r_type != R_AARCH64_NULL
5463 || h->root.type == bfd_link_hash_defined
5464 || h->root.type == bfd_link_hash_defweak)
5465 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5467 (*_bfd_error_handler)
5468 ((sym_type == STT_TLS
5469 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5470 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5472 input_section, (long) rel->r_offset, howto->name, name);
5475 /* We relax only if we can see that there can be a valid transition
5476 from a reloc type to another.
5477 We call elfNN_aarch64_final_link_relocate unless we're completely
5478 done, i.e., the relaxation produced the final output we want. */
5480 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5482 if (relaxed_bfd_r_type != bfd_r_type)
5484 bfd_r_type = relaxed_bfd_r_type;
5485 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5486 BFD_ASSERT (howto != NULL);
5487 r_type = howto->type;
5488 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5489 unresolved_reloc = 0;
5492 r = bfd_reloc_continue;
5494 /* There may be multiple consecutive relocations for the
5495 same offset. In that case we are supposed to treat the
5496 output of each relocation as the addend for the next. */
5497 if (rel + 1 < relend
5498 && rel->r_offset == rel[1].r_offset
5499 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5500 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5503 save_addend = FALSE;
5505 if (r == bfd_reloc_continue)
5506 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5507 input_section, contents, rel,
5508 relocation, info, sec,
5509 h, &unresolved_reloc,
5510 save_addend, &addend, sym);
5512 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5514 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5515 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5516 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5517 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5518 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5520 bfd_boolean need_relocs = FALSE;
5525 off = symbol_got_offset (input_bfd, h, r_symndx);
5526 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5529 (info->shared || indx != 0) &&
5531 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5532 || h->root.type != bfd_link_hash_undefweak);
5534 BFD_ASSERT (globals->root.srelgot != NULL);
5538 Elf_Internal_Rela rela;
5539 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5541 rela.r_offset = globals->root.sgot->output_section->vma +
5542 globals->root.sgot->output_offset + off;
5545 loc = globals->root.srelgot->contents;
5546 loc += globals->root.srelgot->reloc_count++
5547 * RELOC_SIZE (htab);
5548 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5550 if (elfNN_aarch64_bfd_reloc_from_type (r_type)
5551 == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
5553 /* For local dynamic, don't generate DTPREL in any case.
5554 Initialize the DTPREL slot into zero, so we get module
5555 base address when invoke runtime TLS resolver. */
5556 bfd_put_NN (output_bfd, 0,
5557 globals->root.sgot->contents + off
5562 bfd_put_NN (output_bfd,
5563 relocation - dtpoff_base (info),
5564 globals->root.sgot->contents + off
5569 /* This TLS symbol is global. We emit a
5570 relocation to fixup the tls offset at load
5573 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5576 (globals->root.sgot->output_section->vma
5577 + globals->root.sgot->output_offset + off
5580 loc = globals->root.srelgot->contents;
5581 loc += globals->root.srelgot->reloc_count++
5582 * RELOC_SIZE (globals);
5583 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5584 bfd_put_NN (output_bfd, (bfd_vma) 0,
5585 globals->root.sgot->contents + off
5591 bfd_put_NN (output_bfd, (bfd_vma) 1,
5592 globals->root.sgot->contents + off);
5593 bfd_put_NN (output_bfd,
5594 relocation - dtpoff_base (info),
5595 globals->root.sgot->contents + off
5599 symbol_got_offset_mark (input_bfd, h, r_symndx);
5603 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5604 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5605 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5606 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5608 bfd_boolean need_relocs = FALSE;
5613 off = symbol_got_offset (input_bfd, h, r_symndx);
5615 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5618 (info->shared || indx != 0) &&
5620 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5621 || h->root.type != bfd_link_hash_undefweak);
5623 BFD_ASSERT (globals->root.srelgot != NULL);
5627 Elf_Internal_Rela rela;
5630 rela.r_addend = relocation - dtpoff_base (info);
5634 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5635 rela.r_offset = globals->root.sgot->output_section->vma +
5636 globals->root.sgot->output_offset + off;
5638 loc = globals->root.srelgot->contents;
5639 loc += globals->root.srelgot->reloc_count++
5640 * RELOC_SIZE (htab);
5642 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5644 bfd_put_NN (output_bfd, rela.r_addend,
5645 globals->root.sgot->contents + off);
5648 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5649 globals->root.sgot->contents + off);
5651 symbol_got_offset_mark (input_bfd, h, r_symndx);
5655 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5656 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5657 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5658 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5659 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5660 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5661 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5662 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5665 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5666 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5667 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5668 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5669 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5670 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5672 bfd_boolean need_relocs = FALSE;
5673 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5674 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5676 need_relocs = (h == NULL
5677 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5678 || h->root.type != bfd_link_hash_undefweak);
5680 BFD_ASSERT (globals->root.srelgot != NULL);
5681 BFD_ASSERT (globals->root.sgot != NULL);
5686 Elf_Internal_Rela rela;
5687 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5690 rela.r_offset = (globals->root.sgotplt->output_section->vma
5691 + globals->root.sgotplt->output_offset
5692 + off + globals->sgotplt_jump_table_size);
5695 rela.r_addend = relocation - dtpoff_base (info);
5697 /* Allocate the next available slot in the PLT reloc
5698 section to hold our R_AARCH64_TLSDESC, the next
5699 available slot is determined from reloc_count,
5700 which we step. But note, reloc_count was
5701 artifically moved down while allocating slots for
5702 real PLT relocs such that all of the PLT relocs
5703 will fit above the initial reloc_count and the
5704 extra stuff will fit below. */
5705 loc = globals->root.srelplt->contents;
5706 loc += globals->root.srelplt->reloc_count++
5707 * RELOC_SIZE (globals);
5709 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5711 bfd_put_NN (output_bfd, (bfd_vma) 0,
5712 globals->root.sgotplt->contents + off +
5713 globals->sgotplt_jump_table_size);
5714 bfd_put_NN (output_bfd, (bfd_vma) 0,
5715 globals->root.sgotplt->contents + off +
5716 globals->sgotplt_jump_table_size +
5720 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5731 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5732 because such sections are not SEC_ALLOC and thus ld.so will
5733 not process them. */
5734 if (unresolved_reloc
5735 && !((input_section->flags & SEC_DEBUGGING) != 0
5737 && _bfd_elf_section_offset (output_bfd, info, input_section,
5738 +rel->r_offset) != (bfd_vma) - 1)
5740 (*_bfd_error_handler)
5742 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5743 input_bfd, input_section, (long) rel->r_offset, howto->name,
5744 h->root.root.string);
5748 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5752 case bfd_reloc_overflow:
5753 if (!(*info->callbacks->reloc_overflow)
5754 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5755 input_bfd, input_section, rel->r_offset))
5759 case bfd_reloc_undefined:
5760 if (!((*info->callbacks->undefined_symbol)
5761 (info, name, input_bfd, input_section,
5762 rel->r_offset, TRUE)))
5766 case bfd_reloc_outofrange:
5767 error_message = _("out of range");
5770 case bfd_reloc_notsupported:
5771 error_message = _("unsupported relocation");
5774 case bfd_reloc_dangerous:
5775 /* error_message should already be set. */
5779 error_message = _("unknown error");
5783 BFD_ASSERT (error_message != NULL);
5784 if (!((*info->callbacks->reloc_dangerous)
5785 (info, error_message, input_bfd, input_section,
5796 /* Set the right machine number. */
5799 elfNN_aarch64_object_p (bfd *abfd)
5802 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5804 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5809 /* Function to keep AArch64 specific flags in the ELF header. */
5812 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5814 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5819 elf_elfheader (abfd)->e_flags = flags;
5820 elf_flags_init (abfd) = TRUE;
5826 /* Merge backend specific data from an object file to the output
5827 object file when linking. */
5830 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5834 bfd_boolean flags_compatible = TRUE;
5837 /* Check if we have the same endianess. */
5838 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5841 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5844 /* The input BFD must have had its flags initialised. */
5845 /* The following seems bogus to me -- The flags are initialized in
5846 the assembler but I don't think an elf_flags_init field is
5847 written into the object. */
5848 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5850 in_flags = elf_elfheader (ibfd)->e_flags;
5851 out_flags = elf_elfheader (obfd)->e_flags;
5853 if (!elf_flags_init (obfd))
5855 /* If the input is the default architecture and had the default
5856 flags then do not bother setting the flags for the output
5857 architecture, instead allow future merges to do this. If no
5858 future merges ever set these flags then they will retain their
5859 uninitialised values, which surprise surprise, correspond
5860 to the default values. */
5861 if (bfd_get_arch_info (ibfd)->the_default
5862 && elf_elfheader (ibfd)->e_flags == 0)
5865 elf_flags_init (obfd) = TRUE;
5866 elf_elfheader (obfd)->e_flags = in_flags;
5868 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5869 && bfd_get_arch_info (obfd)->the_default)
5870 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5871 bfd_get_mach (ibfd));
5876 /* Identical flags must be compatible. */
5877 if (in_flags == out_flags)
5880 /* Check to see if the input BFD actually contains any sections. If
5881 not, its flags may not have been initialised either, but it
5882 cannot actually cause any incompatiblity. Do not short-circuit
5883 dynamic objects; their section list may be emptied by
5884 elf_link_add_object_symbols.
5886 Also check to see if there are no code sections in the input.
5887 In this case there is no need to check for code specific flags.
5888 XXX - do we need to worry about floating-point format compatability
5889 in data sections ? */
5890 if (!(ibfd->flags & DYNAMIC))
5892 bfd_boolean null_input_bfd = TRUE;
5893 bfd_boolean only_data_sections = TRUE;
5895 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5897 if ((bfd_get_section_flags (ibfd, sec)
5898 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5899 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5900 only_data_sections = FALSE;
5902 null_input_bfd = FALSE;
5906 if (null_input_bfd || only_data_sections)
5910 return flags_compatible;
5913 /* Display the flags field. */
5916 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5918 FILE *file = (FILE *) ptr;
5919 unsigned long flags;
5921 BFD_ASSERT (abfd != NULL && ptr != NULL);
5923 /* Print normal ELF private data. */
5924 _bfd_elf_print_private_bfd_data (abfd, ptr);
5926 flags = elf_elfheader (abfd)->e_flags;
5927 /* Ignore init flag - it may not be set, despite the flags field
5928 containing valid data. */
5930 /* xgettext:c-format */
5931 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5934 fprintf (file, _("<Unrecognised flag bits set>"));
5941 /* Update the got entry reference counts for the section being removed. */
5944 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5945 struct bfd_link_info *info,
5947 const Elf_Internal_Rela * relocs)
5949 struct elf_aarch64_link_hash_table *htab;
5950 Elf_Internal_Shdr *symtab_hdr;
5951 struct elf_link_hash_entry **sym_hashes;
5952 struct elf_aarch64_local_symbol *locals;
5953 const Elf_Internal_Rela *rel, *relend;
5955 if (info->relocatable)
5958 htab = elf_aarch64_hash_table (info);
5963 elf_section_data (sec)->local_dynrel = NULL;
5965 symtab_hdr = &elf_symtab_hdr (abfd);
5966 sym_hashes = elf_sym_hashes (abfd);
5968 locals = elf_aarch64_locals (abfd);
5970 relend = relocs + sec->reloc_count;
5971 for (rel = relocs; rel < relend; rel++)
5973 unsigned long r_symndx;
5974 unsigned int r_type;
5975 struct elf_link_hash_entry *h = NULL;
5977 r_symndx = ELFNN_R_SYM (rel->r_info);
5979 if (r_symndx >= symtab_hdr->sh_info)
5982 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5983 while (h->root.type == bfd_link_hash_indirect
5984 || h->root.type == bfd_link_hash_warning)
5985 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5989 Elf_Internal_Sym *isym;
5991 /* A local symbol. */
5992 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5995 /* Check relocation against local STT_GNU_IFUNC symbol. */
5997 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5999 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6007 struct elf_aarch64_link_hash_entry *eh;
6008 struct elf_dyn_relocs **pp;
6009 struct elf_dyn_relocs *p;
6011 eh = (struct elf_aarch64_link_hash_entry *) h;
6013 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6016 /* Everything must go for SEC. */
6022 r_type = ELFNN_R_TYPE (rel->r_info);
6023 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6025 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6026 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6027 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6028 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6029 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6030 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6031 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6032 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6033 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6034 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6035 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6036 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6037 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6038 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6039 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6040 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6041 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6042 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6043 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6044 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6045 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6046 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6047 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6048 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6049 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6050 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6051 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6052 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6055 if (h->got.refcount > 0)
6056 h->got.refcount -= 1;
6058 if (h->type == STT_GNU_IFUNC)
6060 if (h->plt.refcount > 0)
6061 h->plt.refcount -= 1;
6064 else if (locals != NULL)
6066 if (locals[r_symndx].got_refcount > 0)
6067 locals[r_symndx].got_refcount -= 1;
6071 case BFD_RELOC_AARCH64_CALL26:
6072 case BFD_RELOC_AARCH64_JUMP26:
6073 /* If this is a local symbol then we resolve it
6074 directly without creating a PLT entry. */
6078 if (h->plt.refcount > 0)
6079 h->plt.refcount -= 1;
6082 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6083 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6084 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6085 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6086 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6087 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6088 case BFD_RELOC_AARCH64_MOVW_G3:
6089 case BFD_RELOC_AARCH64_NN:
6090 if (h != NULL && info->executable)
6092 if (h->plt.refcount > 0)
6093 h->plt.refcount -= 1;
6105 /* Adjust a symbol defined by a dynamic object and referenced by a
6106 regular object. The current definition is in some section of the
6107 dynamic object, but we're not including those sections. We have to
6108 change the definition to something the rest of the link can
6112 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6113 struct elf_link_hash_entry *h)
6115 struct elf_aarch64_link_hash_table *htab;
6118 /* If this is a function, put it in the procedure linkage table. We
6119 will fill in the contents of the procedure linkage table later,
6120 when we know the address of the .got section. */
6121 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6123 if (h->plt.refcount <= 0
6124 || (h->type != STT_GNU_IFUNC
6125 && (SYMBOL_CALLS_LOCAL (info, h)
6126 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6127 && h->root.type == bfd_link_hash_undefweak))))
6129 /* This case can occur if we saw a CALL26 reloc in
6130 an input file, but the symbol wasn't referred to
6131 by a dynamic object or all references were
6132 garbage collected. In which case we can end up
6134 h->plt.offset = (bfd_vma) - 1;
6141 /* Otherwise, reset to -1. */
6142 h->plt.offset = (bfd_vma) - 1;
6145 /* If this is a weak symbol, and there is a real definition, the
6146 processor independent code will have arranged for us to see the
6147 real definition first, and we can just use the same value. */
6148 if (h->u.weakdef != NULL)
6150 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6151 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6152 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6153 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6154 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6155 h->non_got_ref = h->u.weakdef->non_got_ref;
6159 /* If we are creating a shared library, we must presume that the
6160 only references to the symbol are via the global offset table.
6161 For such cases we need not do anything here; the relocations will
6162 be handled correctly by relocate_section. */
6166 /* If there are no references to this symbol that do not use the
6167 GOT, we don't need to generate a copy reloc. */
6168 if (!h->non_got_ref)
6171 /* If -z nocopyreloc was given, we won't generate them either. */
6172 if (info->nocopyreloc)
6178 /* We must allocate the symbol in our .dynbss section, which will
6179 become part of the .bss section of the executable. There will be
6180 an entry for this symbol in the .dynsym section. The dynamic
6181 object will contain position independent code, so all references
6182 from the dynamic object to this symbol will go through the global
6183 offset table. The dynamic linker will use the .dynsym entry to
6184 determine the address it must put in the global offset table, so
6185 both the dynamic object and the regular object will refer to the
6186 same memory location for the variable. */
6188 htab = elf_aarch64_hash_table (info);
6190 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6191 to copy the initial value out of the dynamic object and into the
6192 runtime process image. */
6193 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6195 htab->srelbss->size += RELOC_SIZE (htab);
6201 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6206 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6208 struct elf_aarch64_local_symbol *locals;
6209 locals = elf_aarch64_locals (abfd);
6212 locals = (struct elf_aarch64_local_symbol *)
6213 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6216 elf_aarch64_locals (abfd) = locals;
6221 /* Create the .got section to hold the global offset table. */
6224 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6226 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6229 struct elf_link_hash_entry *h;
6230 struct elf_link_hash_table *htab = elf_hash_table (info);
6232 /* This function may be called more than once. */
6233 s = bfd_get_linker_section (abfd, ".got");
6237 flags = bed->dynamic_sec_flags;
6239 s = bfd_make_section_anyway_with_flags (abfd,
6240 (bed->rela_plts_and_copies_p
6241 ? ".rela.got" : ".rel.got"),
6242 (bed->dynamic_sec_flags
6245 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6249 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6251 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6254 htab->sgot->size += GOT_ENTRY_SIZE;
6256 if (bed->want_got_sym)
6258 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6259 (or .got.plt) section. We don't do this in the linker script
6260 because we don't want to define the symbol if we are not creating
6261 a global offset table. */
6262 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6263 "_GLOBAL_OFFSET_TABLE_");
6264 elf_hash_table (info)->hgot = h;
6269 if (bed->want_got_plt)
6271 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6273 || !bfd_set_section_alignment (abfd, s,
6274 bed->s->log_file_align))
6279 /* The first bit of the global offset table is the header. */
6280 s->size += bed->got_header_size;
6285 /* Look through the relocs for a section during the first phase. */
6288 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6289 asection *sec, const Elf_Internal_Rela *relocs)
6291 Elf_Internal_Shdr *symtab_hdr;
6292 struct elf_link_hash_entry **sym_hashes;
6293 const Elf_Internal_Rela *rel;
6294 const Elf_Internal_Rela *rel_end;
6297 struct elf_aarch64_link_hash_table *htab;
6299 if (info->relocatable)
6302 BFD_ASSERT (is_aarch64_elf (abfd));
6304 htab = elf_aarch64_hash_table (info);
6307 symtab_hdr = &elf_symtab_hdr (abfd);
6308 sym_hashes = elf_sym_hashes (abfd);
6310 rel_end = relocs + sec->reloc_count;
6311 for (rel = relocs; rel < rel_end; rel++)
6313 struct elf_link_hash_entry *h;
6314 unsigned long r_symndx;
6315 unsigned int r_type;
6316 bfd_reloc_code_real_type bfd_r_type;
6317 Elf_Internal_Sym *isym;
6319 r_symndx = ELFNN_R_SYM (rel->r_info);
6320 r_type = ELFNN_R_TYPE (rel->r_info);
6322 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6324 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6329 if (r_symndx < symtab_hdr->sh_info)
6331 /* A local symbol. */
6332 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6337 /* Check relocation against local STT_GNU_IFUNC symbol. */
6338 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6340 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6345 /* Fake a STT_GNU_IFUNC symbol. */
6346 h->type = STT_GNU_IFUNC;
6349 h->forced_local = 1;
6350 h->root.type = bfd_link_hash_defined;
6357 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6358 while (h->root.type == bfd_link_hash_indirect
6359 || h->root.type == bfd_link_hash_warning)
6360 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6362 /* PR15323, ref flags aren't set for references in the same
6364 h->root.non_ir_ref = 1;
6367 /* Could be done earlier, if h were already available. */
6368 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6372 /* Create the ifunc sections for static executables. If we
6373 never see an indirect function symbol nor we are building
6374 a static executable, those sections will be empty and
6375 won't appear in output. */
6381 case BFD_RELOC_AARCH64_ADD_LO12:
6382 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6383 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6384 case BFD_RELOC_AARCH64_CALL26:
6385 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6386 case BFD_RELOC_AARCH64_JUMP26:
6387 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6388 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6389 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6390 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6391 case BFD_RELOC_AARCH64_NN:
6392 if (htab->root.dynobj == NULL)
6393 htab->root.dynobj = abfd;
6394 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6399 /* It is referenced by a non-shared object. */
6401 h->root.non_ir_ref = 1;
6406 case BFD_RELOC_AARCH64_NN:
6408 /* We don't need to handle relocs into sections not going into
6409 the "real" output. */
6410 if ((sec->flags & SEC_ALLOC) == 0)
6418 h->plt.refcount += 1;
6419 h->pointer_equality_needed = 1;
6422 /* No need to do anything if we're not creating a shared
6428 struct elf_dyn_relocs *p;
6429 struct elf_dyn_relocs **head;
6431 /* We must copy these reloc types into the output file.
6432 Create a reloc section in dynobj and make room for
6436 if (htab->root.dynobj == NULL)
6437 htab->root.dynobj = abfd;
6439 sreloc = _bfd_elf_make_dynamic_reloc_section
6440 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6446 /* If this is a global symbol, we count the number of
6447 relocations we need for this symbol. */
6450 struct elf_aarch64_link_hash_entry *eh;
6451 eh = (struct elf_aarch64_link_hash_entry *) h;
6452 head = &eh->dyn_relocs;
6456 /* Track dynamic relocs needed for local syms too.
6457 We really need local syms available to do this
6463 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6468 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6472 /* Beware of type punned pointers vs strict aliasing
6474 vpp = &(elf_section_data (s)->local_dynrel);
6475 head = (struct elf_dyn_relocs **) vpp;
6479 if (p == NULL || p->sec != sec)
6481 bfd_size_type amt = sizeof *p;
6482 p = ((struct elf_dyn_relocs *)
6483 bfd_zalloc (htab->root.dynobj, amt));
6496 /* RR: We probably want to keep a consistency check that
6497 there are no dangling GOT_PAGE relocs. */
6498 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6499 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6500 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6501 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6502 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6503 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6504 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6505 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6506 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6507 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6508 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6509 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6510 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6511 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6512 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6513 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6514 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6515 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6516 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6517 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6518 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6519 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6520 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6521 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6522 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6523 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6524 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6525 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6528 unsigned old_got_type;
6530 got_type = aarch64_reloc_got_type (bfd_r_type);
6534 h->got.refcount += 1;
6535 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6539 struct elf_aarch64_local_symbol *locals;
6541 if (!elfNN_aarch64_allocate_local_symbols
6542 (abfd, symtab_hdr->sh_info))
6545 locals = elf_aarch64_locals (abfd);
6546 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6547 locals[r_symndx].got_refcount += 1;
6548 old_got_type = locals[r_symndx].got_type;
6551 /* If a variable is accessed with both general dynamic TLS
6552 methods, two slots may be created. */
6553 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6554 got_type |= old_got_type;
6556 /* We will already have issued an error message if there
6557 is a TLS/non-TLS mismatch, based on the symbol type.
6558 So just combine any TLS types needed. */
6559 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6560 && got_type != GOT_NORMAL)
6561 got_type |= old_got_type;
6563 /* If the symbol is accessed by both IE and GD methods, we
6564 are able to relax. Turn off the GD flag, without
6565 messing up with any other kind of TLS types that may be
6567 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6568 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6570 if (old_got_type != got_type)
6573 elf_aarch64_hash_entry (h)->got_type = got_type;
6576 struct elf_aarch64_local_symbol *locals;
6577 locals = elf_aarch64_locals (abfd);
6578 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6579 locals[r_symndx].got_type = got_type;
6583 if (htab->root.dynobj == NULL)
6584 htab->root.dynobj = abfd;
6585 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6590 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6591 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6592 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6593 case BFD_RELOC_AARCH64_MOVW_G3:
6596 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6597 (*_bfd_error_handler)
6598 (_("%B: relocation %s against `%s' can not be used when making "
6599 "a shared object; recompile with -fPIC"),
6600 abfd, elfNN_aarch64_howto_table[howto_index].name,
6601 (h) ? h->root.root.string : "a local symbol");
6602 bfd_set_error (bfd_error_bad_value);
6606 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6607 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6608 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6609 if (h != NULL && info->executable)
6611 /* If this reloc is in a read-only section, we might
6612 need a copy reloc. We can't check reliably at this
6613 stage whether the section is read-only, as input
6614 sections have not yet been mapped to output sections.
6615 Tentatively set the flag for now, and correct in
6616 adjust_dynamic_symbol. */
6618 h->plt.refcount += 1;
6619 h->pointer_equality_needed = 1;
6621 /* FIXME:: RR need to handle these in shared libraries
6622 and essentially bomb out as these being non-PIC
6623 relocations in shared libraries. */
6626 case BFD_RELOC_AARCH64_CALL26:
6627 case BFD_RELOC_AARCH64_JUMP26:
6628 /* If this is a local symbol then we resolve it
6629 directly without creating a PLT entry. */
6634 if (h->plt.refcount <= 0)
6635 h->plt.refcount = 1;
6637 h->plt.refcount += 1;
6648 /* Treat mapping symbols as special target symbols. */
6651 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6654 return bfd_is_aarch64_special_symbol_name (sym->name,
6655 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6658 /* This is a copy of elf_find_function () from elf.c except that
6659 AArch64 mapping symbols are ignored when looking for function names. */
6662 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6666 const char **filename_ptr,
6667 const char **functionname_ptr)
6669 const char *filename = NULL;
6670 asymbol *func = NULL;
6671 bfd_vma low_func = 0;
6674 for (p = symbols; *p != NULL; p++)
6678 q = (elf_symbol_type *) * p;
6680 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6685 filename = bfd_asymbol_name (&q->symbol);
6689 /* Skip mapping symbols. */
6690 if ((q->symbol.flags & BSF_LOCAL)
6691 && (bfd_is_aarch64_special_symbol_name
6692 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6695 if (bfd_get_section (&q->symbol) == section
6696 && q->symbol.value >= low_func && q->symbol.value <= offset)
6698 func = (asymbol *) q;
6699 low_func = q->symbol.value;
6709 *filename_ptr = filename;
6710 if (functionname_ptr)
6711 *functionname_ptr = bfd_asymbol_name (func);
6717 /* Find the nearest line to a particular section and offset, for error
6718 reporting. This code is a duplicate of the code in elf.c, except
6719 that it uses aarch64_elf_find_function. */
6722 elfNN_aarch64_find_nearest_line (bfd *abfd,
6726 const char **filename_ptr,
6727 const char **functionname_ptr,
6728 unsigned int *line_ptr,
6729 unsigned int *discriminator_ptr)
6731 bfd_boolean found = FALSE;
6733 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6734 filename_ptr, functionname_ptr,
6735 line_ptr, discriminator_ptr,
6736 dwarf_debug_sections, 0,
6737 &elf_tdata (abfd)->dwarf2_find_line_info))
6739 if (!*functionname_ptr)
6740 aarch64_elf_find_function (abfd, symbols, section, offset,
6741 *filename_ptr ? NULL : filename_ptr,
6747 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6748 toolchain uses DWARF1. */
6750 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6751 &found, filename_ptr,
6752 functionname_ptr, line_ptr,
6753 &elf_tdata (abfd)->line_info))
6756 if (found && (*functionname_ptr || *line_ptr))
6759 if (symbols == NULL)
6762 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6763 filename_ptr, functionname_ptr))
6771 elfNN_aarch64_find_inliner_info (bfd *abfd,
6772 const char **filename_ptr,
6773 const char **functionname_ptr,
6774 unsigned int *line_ptr)
6777 found = _bfd_dwarf2_find_inliner_info
6778 (abfd, filename_ptr,
6779 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6785 elfNN_aarch64_post_process_headers (bfd *abfd,
6786 struct bfd_link_info *link_info)
6788 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6790 i_ehdrp = elf_elfheader (abfd);
6791 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6793 _bfd_elf_post_process_headers (abfd, link_info);
6796 static enum elf_reloc_type_class
6797 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6798 const asection *rel_sec ATTRIBUTE_UNUSED,
6799 const Elf_Internal_Rela *rela)
6801 switch ((int) ELFNN_R_TYPE (rela->r_info))
6803 case AARCH64_R (RELATIVE):
6804 return reloc_class_relative;
6805 case AARCH64_R (JUMP_SLOT):
6806 return reloc_class_plt;
6807 case AARCH64_R (COPY):
6808 return reloc_class_copy;
6810 return reloc_class_normal;
6814 /* Handle an AArch64 specific section when reading an object file. This is
6815 called when bfd_section_from_shdr finds a section with an unknown
6819 elfNN_aarch64_section_from_shdr (bfd *abfd,
6820 Elf_Internal_Shdr *hdr,
6821 const char *name, int shindex)
6823 /* There ought to be a place to keep ELF backend specific flags, but
6824 at the moment there isn't one. We just keep track of the
6825 sections by their name, instead. Fortunately, the ABI gives
6826 names for all the AArch64 specific sections, so we will probably get
6828 switch (hdr->sh_type)
6830 case SHT_AARCH64_ATTRIBUTES:
6837 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6843 /* A structure used to record a list of sections, independently
6844 of the next and prev fields in the asection structure. */
6845 typedef struct section_list
6848 struct section_list *next;
6849 struct section_list *prev;
6853 /* Unfortunately we need to keep a list of sections for which
6854 an _aarch64_elf_section_data structure has been allocated. This
6855 is because it is possible for functions like elfNN_aarch64_write_section
6856 to be called on a section which has had an elf_data_structure
6857 allocated for it (and so the used_by_bfd field is valid) but
6858 for which the AArch64 extended version of this structure - the
6859 _aarch64_elf_section_data structure - has not been allocated. */
6860 static section_list *sections_with_aarch64_elf_section_data = NULL;
6863 record_section_with_aarch64_elf_section_data (asection *sec)
6865 struct section_list *entry;
6867 entry = bfd_malloc (sizeof (*entry));
6871 entry->next = sections_with_aarch64_elf_section_data;
6873 if (entry->next != NULL)
6874 entry->next->prev = entry;
6875 sections_with_aarch64_elf_section_data = entry;
6878 static struct section_list *
6879 find_aarch64_elf_section_entry (asection *sec)
6881 struct section_list *entry;
6882 static struct section_list *last_entry = NULL;
6884 /* This is a short cut for the typical case where the sections are added
6885 to the sections_with_aarch64_elf_section_data list in forward order and
6886 then looked up here in backwards order. This makes a real difference
6887 to the ld-srec/sec64k.exp linker test. */
6888 entry = sections_with_aarch64_elf_section_data;
6889 if (last_entry != NULL)
6891 if (last_entry->sec == sec)
6893 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6894 entry = last_entry->next;
6897 for (; entry; entry = entry->next)
6898 if (entry->sec == sec)
6902 /* Record the entry prior to this one - it is the entry we are
6903 most likely to want to locate next time. Also this way if we
6904 have been called from
6905 unrecord_section_with_aarch64_elf_section_data () we will not
6906 be caching a pointer that is about to be freed. */
6907 last_entry = entry->prev;
6913 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6915 struct section_list *entry;
6917 entry = find_aarch64_elf_section_entry (sec);
6921 if (entry->prev != NULL)
6922 entry->prev->next = entry->next;
6923 if (entry->next != NULL)
6924 entry->next->prev = entry->prev;
6925 if (entry == sections_with_aarch64_elf_section_data)
6926 sections_with_aarch64_elf_section_data = entry->next;
6935 struct bfd_link_info *info;
6938 int (*func) (void *, const char *, Elf_Internal_Sym *,
6939 asection *, struct elf_link_hash_entry *);
6940 } output_arch_syminfo;
6942 enum map_symbol_type
6949 /* Output a single mapping symbol. */
6952 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6953 enum map_symbol_type type, bfd_vma offset)
6955 static const char *names[2] = { "$x", "$d" };
6956 Elf_Internal_Sym sym;
6958 sym.st_value = (osi->sec->output_section->vma
6959 + osi->sec->output_offset + offset);
6962 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6963 sym.st_shndx = osi->sec_shndx;
6964 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6969 /* Output mapping symbols for PLT entries associated with H. */
6972 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6974 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6977 if (h->root.type == bfd_link_hash_indirect)
6980 if (h->root.type == bfd_link_hash_warning)
6981 /* When warning symbols are created, they **replace** the "real"
6982 entry in the hash table, thus we never get to see the real
6983 symbol in a hash traversal. So look at it now. */
6984 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6986 if (h->plt.offset == (bfd_vma) - 1)
6989 addr = h->plt.offset;
6992 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6999 /* Output a single local symbol for a generated stub. */
7002 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7003 bfd_vma offset, bfd_vma size)
7005 Elf_Internal_Sym sym;
7007 sym.st_value = (osi->sec->output_section->vma
7008 + osi->sec->output_offset + offset);
7011 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7012 sym.st_shndx = osi->sec_shndx;
7013 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7017 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7019 struct elf_aarch64_stub_hash_entry *stub_entry;
7023 output_arch_syminfo *osi;
7025 /* Massage our args to the form they really have. */
7026 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7027 osi = (output_arch_syminfo *) in_arg;
7029 stub_sec = stub_entry->stub_sec;
7031 /* Ensure this stub is attached to the current section being
7033 if (stub_sec != osi->sec)
7036 addr = (bfd_vma) stub_entry->stub_offset;
7038 stub_name = stub_entry->output_name;
7040 switch (stub_entry->stub_type)
7042 case aarch64_stub_adrp_branch:
7043 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7044 sizeof (aarch64_adrp_branch_stub)))
7046 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7049 case aarch64_stub_long_branch:
7050 if (!elfNN_aarch64_output_stub_sym
7051 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7053 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7055 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7058 case aarch64_stub_erratum_835769_veneer:
7059 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7060 sizeof (aarch64_erratum_835769_stub)))
7062 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7065 case aarch64_stub_erratum_843419_veneer:
7066 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7067 sizeof (aarch64_erratum_843419_stub)))
7069 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7080 /* Output mapping symbols for linker generated sections. */
7083 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7084 struct bfd_link_info *info,
7086 int (*func) (void *, const char *,
7089 struct elf_link_hash_entry
7092 output_arch_syminfo osi;
7093 struct elf_aarch64_link_hash_table *htab;
7095 htab = elf_aarch64_hash_table (info);
7101 /* Long calls stubs. */
7102 if (htab->stub_bfd && htab->stub_bfd->sections)
7106 for (stub_sec = htab->stub_bfd->sections;
7107 stub_sec != NULL; stub_sec = stub_sec->next)
7109 /* Ignore non-stub sections. */
7110 if (!strstr (stub_sec->name, STUB_SUFFIX))
7115 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7116 (output_bfd, osi.sec->output_section);
7118 /* The first instruction in a stub is always a branch. */
7119 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7122 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7127 /* Finally, output mapping symbols for the PLT. */
7128 if (!htab->root.splt || htab->root.splt->size == 0)
7131 /* For now live without mapping symbols for the plt. */
7132 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7133 (output_bfd, htab->root.splt->output_section);
7134 osi.sec = htab->root.splt;
7136 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7143 /* Allocate target specific section data. */
7146 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7148 if (!sec->used_by_bfd)
7150 _aarch64_elf_section_data *sdata;
7151 bfd_size_type amt = sizeof (*sdata);
7153 sdata = bfd_zalloc (abfd, amt);
7156 sec->used_by_bfd = sdata;
7159 record_section_with_aarch64_elf_section_data (sec);
7161 return _bfd_elf_new_section_hook (abfd, sec);
7166 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7168 void *ignore ATTRIBUTE_UNUSED)
7170 unrecord_section_with_aarch64_elf_section_data (sec);
7174 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7177 bfd_map_over_sections (abfd,
7178 unrecord_section_via_map_over_sections, NULL);
7180 return _bfd_elf_close_and_cleanup (abfd);
7184 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7187 bfd_map_over_sections (abfd,
7188 unrecord_section_via_map_over_sections, NULL);
7190 return _bfd_free_cached_info (abfd);
7193 /* Create dynamic sections. This is different from the ARM backend in that
7194 the got, plt, gotplt and their relocation sections are all created in the
7195 standard part of the bfd elf backend. */
7198 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7199 struct bfd_link_info *info)
7201 struct elf_aarch64_link_hash_table *htab;
7203 /* We need to create .got section. */
7204 if (!aarch64_elf_create_got_section (dynobj, info))
7207 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7210 htab = elf_aarch64_hash_table (info);
7211 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7213 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7215 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7222 /* Allocate space in .plt, .got and associated reloc sections for
7226 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7228 struct bfd_link_info *info;
7229 struct elf_aarch64_link_hash_table *htab;
7230 struct elf_aarch64_link_hash_entry *eh;
7231 struct elf_dyn_relocs *p;
7233 /* An example of a bfd_link_hash_indirect symbol is versioned
7234 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7235 -> __gxx_personality_v0(bfd_link_hash_defined)
7237 There is no need to process bfd_link_hash_indirect symbols here
7238 because we will also be presented with the concrete instance of
7239 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7240 called to copy all relevant data from the generic to the concrete
7243 if (h->root.type == bfd_link_hash_indirect)
7246 if (h->root.type == bfd_link_hash_warning)
7247 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7249 info = (struct bfd_link_info *) inf;
7250 htab = elf_aarch64_hash_table (info);
7252 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7253 here if it is defined and referenced in a non-shared object. */
7254 if (h->type == STT_GNU_IFUNC
7257 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7259 /* Make sure this symbol is output as a dynamic symbol.
7260 Undefined weak syms won't yet be marked as dynamic. */
7261 if (h->dynindx == -1 && !h->forced_local)
7263 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7267 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7269 asection *s = htab->root.splt;
7271 /* If this is the first .plt entry, make room for the special
7274 s->size += htab->plt_header_size;
7276 h->plt.offset = s->size;
7278 /* If this symbol is not defined in a regular file, and we are
7279 not generating a shared library, then set the symbol to this
7280 location in the .plt. This is required to make function
7281 pointers compare as equal between the normal executable and
7282 the shared library. */
7283 if (!info->shared && !h->def_regular)
7285 h->root.u.def.section = s;
7286 h->root.u.def.value = h->plt.offset;
7289 /* Make room for this entry. For now we only create the
7290 small model PLT entries. We later need to find a way
7291 of relaxing into these from the large model PLT entries. */
7292 s->size += PLT_SMALL_ENTRY_SIZE;
7294 /* We also need to make an entry in the .got.plt section, which
7295 will be placed in the .got section by the linker script. */
7296 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7298 /* We also need to make an entry in the .rela.plt section. */
7299 htab->root.srelplt->size += RELOC_SIZE (htab);
7301 /* We need to ensure that all GOT entries that serve the PLT
7302 are consecutive with the special GOT slots [0] [1] and
7303 [2]. Any addtional relocations, such as
7304 R_AARCH64_TLSDESC, must be placed after the PLT related
7305 entries. We abuse the reloc_count such that during
7306 sizing we adjust reloc_count to indicate the number of
7307 PLT related reserved entries. In subsequent phases when
7308 filling in the contents of the reloc entries, PLT related
7309 entries are placed by computing their PLT index (0
7310 .. reloc_count). While other none PLT relocs are placed
7311 at the slot indicated by reloc_count and reloc_count is
7314 htab->root.srelplt->reloc_count++;
7318 h->plt.offset = (bfd_vma) - 1;
7324 h->plt.offset = (bfd_vma) - 1;
7328 eh = (struct elf_aarch64_link_hash_entry *) h;
7329 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7331 if (h->got.refcount > 0)
7334 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7336 h->got.offset = (bfd_vma) - 1;
7338 dyn = htab->root.dynamic_sections_created;
7340 /* Make sure this symbol is output as a dynamic symbol.
7341 Undefined weak syms won't yet be marked as dynamic. */
7342 if (dyn && h->dynindx == -1 && !h->forced_local)
7344 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7348 if (got_type == GOT_UNKNOWN)
7351 else if (got_type == GOT_NORMAL)
7353 h->got.offset = htab->root.sgot->size;
7354 htab->root.sgot->size += GOT_ENTRY_SIZE;
7355 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7356 || h->root.type != bfd_link_hash_undefweak)
7358 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7360 htab->root.srelgot->size += RELOC_SIZE (htab);
7366 if (got_type & GOT_TLSDESC_GD)
7368 eh->tlsdesc_got_jump_table_offset =
7369 (htab->root.sgotplt->size
7370 - aarch64_compute_jump_table_size (htab));
7371 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7372 h->got.offset = (bfd_vma) - 2;
7375 if (got_type & GOT_TLS_GD)
7377 h->got.offset = htab->root.sgot->size;
7378 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7381 if (got_type & GOT_TLS_IE)
7383 h->got.offset = htab->root.sgot->size;
7384 htab->root.sgot->size += GOT_ENTRY_SIZE;
7387 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7388 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7389 || h->root.type != bfd_link_hash_undefweak)
7392 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7394 if (got_type & GOT_TLSDESC_GD)
7396 htab->root.srelplt->size += RELOC_SIZE (htab);
7397 /* Note reloc_count not incremented here! We have
7398 already adjusted reloc_count for this relocation
7401 /* TLSDESC PLT is now needed, but not yet determined. */
7402 htab->tlsdesc_plt = (bfd_vma) - 1;
7405 if (got_type & GOT_TLS_GD)
7406 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7408 if (got_type & GOT_TLS_IE)
7409 htab->root.srelgot->size += RELOC_SIZE (htab);
7415 h->got.offset = (bfd_vma) - 1;
7418 if (eh->dyn_relocs == NULL)
7421 /* In the shared -Bsymbolic case, discard space allocated for
7422 dynamic pc-relative relocs against symbols which turn out to be
7423 defined in regular objects. For the normal shared case, discard
7424 space for pc-relative relocs that have become local due to symbol
7425 visibility changes. */
7429 /* Relocs that use pc_count are those that appear on a call
7430 insn, or certain REL relocs that can generated via assembly.
7431 We want calls to protected symbols to resolve directly to the
7432 function rather than going via the plt. If people want
7433 function pointer comparisons to work as expected then they
7434 should avoid writing weird assembly. */
7435 if (SYMBOL_CALLS_LOCAL (info, h))
7437 struct elf_dyn_relocs **pp;
7439 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7441 p->count -= p->pc_count;
7450 /* Also discard relocs on undefined weak syms with non-default
7452 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7454 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7455 eh->dyn_relocs = NULL;
7457 /* Make sure undefined weak symbols are output as a dynamic
7459 else if (h->dynindx == -1
7461 && !bfd_elf_link_record_dynamic_symbol (info, h))
7466 else if (ELIMINATE_COPY_RELOCS)
7468 /* For the non-shared case, discard space for relocs against
7469 symbols which turn out to need copy relocs or are not
7475 || (htab->root.dynamic_sections_created
7476 && (h->root.type == bfd_link_hash_undefweak
7477 || h->root.type == bfd_link_hash_undefined))))
7479 /* Make sure this symbol is output as a dynamic symbol.
7480 Undefined weak syms won't yet be marked as dynamic. */
7481 if (h->dynindx == -1
7483 && !bfd_elf_link_record_dynamic_symbol (info, h))
7486 /* If that succeeded, we know we'll be keeping all the
7488 if (h->dynindx != -1)
7492 eh->dyn_relocs = NULL;
7497 /* Finally, allocate space. */
7498 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7502 sreloc = elf_section_data (p->sec)->sreloc;
7504 BFD_ASSERT (sreloc != NULL);
7506 sreloc->size += p->count * RELOC_SIZE (htab);
7512 /* Allocate space in .plt, .got and associated reloc sections for
7513 ifunc dynamic relocs. */
7516 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7519 struct bfd_link_info *info;
7520 struct elf_aarch64_link_hash_table *htab;
7521 struct elf_aarch64_link_hash_entry *eh;
7523 /* An example of a bfd_link_hash_indirect symbol is versioned
7524 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7525 -> __gxx_personality_v0(bfd_link_hash_defined)
7527 There is no need to process bfd_link_hash_indirect symbols here
7528 because we will also be presented with the concrete instance of
7529 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7530 called to copy all relevant data from the generic to the concrete
7533 if (h->root.type == bfd_link_hash_indirect)
7536 if (h->root.type == bfd_link_hash_warning)
7537 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7539 info = (struct bfd_link_info *) inf;
7540 htab = elf_aarch64_hash_table (info);
7542 eh = (struct elf_aarch64_link_hash_entry *) h;
7544 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7545 here if it is defined and referenced in a non-shared object. */
7546 if (h->type == STT_GNU_IFUNC
7548 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7550 htab->plt_entry_size,
7551 htab->plt_header_size,
7556 /* Allocate space in .plt, .got and associated reloc sections for
7557 local dynamic relocs. */
7560 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7562 struct elf_link_hash_entry *h
7563 = (struct elf_link_hash_entry *) *slot;
7565 if (h->type != STT_GNU_IFUNC
7569 || h->root.type != bfd_link_hash_defined)
7572 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7575 /* Allocate space in .plt, .got and associated reloc sections for
7576 local ifunc dynamic relocs. */
7579 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7581 struct elf_link_hash_entry *h
7582 = (struct elf_link_hash_entry *) *slot;
7584 if (h->type != STT_GNU_IFUNC
7588 || h->root.type != bfd_link_hash_defined)
7591 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7594 /* Find any dynamic relocs that apply to read-only sections. */
7597 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7599 struct elf_aarch64_link_hash_entry * eh;
7600 struct elf_dyn_relocs * p;
7602 eh = (struct elf_aarch64_link_hash_entry *) h;
7603 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7605 asection *s = p->sec;
7607 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7609 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7611 info->flags |= DF_TEXTREL;
7613 /* Not an error, just cut short the traversal. */
7620 /* This is the most important function of all . Innocuosly named
7623 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7624 struct bfd_link_info *info)
7626 struct elf_aarch64_link_hash_table *htab;
7632 htab = elf_aarch64_hash_table ((info));
7633 dynobj = htab->root.dynobj;
7635 BFD_ASSERT (dynobj != NULL);
7637 if (htab->root.dynamic_sections_created)
7639 if (info->executable)
7641 s = bfd_get_linker_section (dynobj, ".interp");
7644 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7645 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7649 /* Set up .got offsets for local syms, and space for local dynamic
7651 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7653 struct elf_aarch64_local_symbol *locals = NULL;
7654 Elf_Internal_Shdr *symtab_hdr;
7658 if (!is_aarch64_elf (ibfd))
7661 for (s = ibfd->sections; s != NULL; s = s->next)
7663 struct elf_dyn_relocs *p;
7665 for (p = (struct elf_dyn_relocs *)
7666 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7668 if (!bfd_is_abs_section (p->sec)
7669 && bfd_is_abs_section (p->sec->output_section))
7671 /* Input section has been discarded, either because
7672 it is a copy of a linkonce section or due to
7673 linker script /DISCARD/, so we'll be discarding
7676 else if (p->count != 0)
7678 srel = elf_section_data (p->sec)->sreloc;
7679 srel->size += p->count * RELOC_SIZE (htab);
7680 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7681 info->flags |= DF_TEXTREL;
7686 locals = elf_aarch64_locals (ibfd);
7690 symtab_hdr = &elf_symtab_hdr (ibfd);
7691 srel = htab->root.srelgot;
7692 for (i = 0; i < symtab_hdr->sh_info; i++)
7694 locals[i].got_offset = (bfd_vma) - 1;
7695 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7696 if (locals[i].got_refcount > 0)
7698 unsigned got_type = locals[i].got_type;
7699 if (got_type & GOT_TLSDESC_GD)
7701 locals[i].tlsdesc_got_jump_table_offset =
7702 (htab->root.sgotplt->size
7703 - aarch64_compute_jump_table_size (htab));
7704 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7705 locals[i].got_offset = (bfd_vma) - 2;
7708 if (got_type & GOT_TLS_GD)
7710 locals[i].got_offset = htab->root.sgot->size;
7711 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7714 if (got_type & GOT_TLS_IE
7715 || got_type & GOT_NORMAL)
7717 locals[i].got_offset = htab->root.sgot->size;
7718 htab->root.sgot->size += GOT_ENTRY_SIZE;
7721 if (got_type == GOT_UNKNOWN)
7727 if (got_type & GOT_TLSDESC_GD)
7729 htab->root.srelplt->size += RELOC_SIZE (htab);
7730 /* Note RELOC_COUNT not incremented here! */
7731 htab->tlsdesc_plt = (bfd_vma) - 1;
7734 if (got_type & GOT_TLS_GD)
7735 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7737 if (got_type & GOT_TLS_IE
7738 || got_type & GOT_NORMAL)
7739 htab->root.srelgot->size += RELOC_SIZE (htab);
7744 locals[i].got_refcount = (bfd_vma) - 1;
7750 /* Allocate global sym .plt and .got entries, and space for global
7751 sym dynamic relocs. */
7752 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7755 /* Allocate global ifunc sym .plt and .got entries, and space for global
7756 ifunc sym dynamic relocs. */
7757 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7760 /* Allocate .plt and .got entries, and space for local symbols. */
7761 htab_traverse (htab->loc_hash_table,
7762 elfNN_aarch64_allocate_local_dynrelocs,
7765 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7766 htab_traverse (htab->loc_hash_table,
7767 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7770 /* For every jump slot reserved in the sgotplt, reloc_count is
7771 incremented. However, when we reserve space for TLS descriptors,
7772 it's not incremented, so in order to compute the space reserved
7773 for them, it suffices to multiply the reloc count by the jump
7776 if (htab->root.srelplt)
7777 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7779 if (htab->tlsdesc_plt)
7781 if (htab->root.splt->size == 0)
7782 htab->root.splt->size += PLT_ENTRY_SIZE;
7784 htab->tlsdesc_plt = htab->root.splt->size;
7785 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7787 /* If we're not using lazy TLS relocations, don't generate the
7788 GOT entry required. */
7789 if (!(info->flags & DF_BIND_NOW))
7791 htab->dt_tlsdesc_got = htab->root.sgot->size;
7792 htab->root.sgot->size += GOT_ENTRY_SIZE;
7796 /* Init mapping symbols information to use later to distingush between
7797 code and data while scanning for errata. */
7798 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7799 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7801 if (!is_aarch64_elf (ibfd))
7803 bfd_elfNN_aarch64_init_maps (ibfd);
7806 /* We now have determined the sizes of the various dynamic sections.
7807 Allocate memory for them. */
7809 for (s = dynobj->sections; s != NULL; s = s->next)
7811 if ((s->flags & SEC_LINKER_CREATED) == 0)
7814 if (s == htab->root.splt
7815 || s == htab->root.sgot
7816 || s == htab->root.sgotplt
7817 || s == htab->root.iplt
7818 || s == htab->root.igotplt || s == htab->sdynbss)
7820 /* Strip this section if we don't need it; see the
7823 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7825 if (s->size != 0 && s != htab->root.srelplt)
7828 /* We use the reloc_count field as a counter if we need
7829 to copy relocs into the output file. */
7830 if (s != htab->root.srelplt)
7835 /* It's not one of our sections, so don't allocate space. */
7841 /* If we don't need this section, strip it from the
7842 output file. This is mostly to handle .rela.bss and
7843 .rela.plt. We must create both sections in
7844 create_dynamic_sections, because they must be created
7845 before the linker maps input sections to output
7846 sections. The linker does that before
7847 adjust_dynamic_symbol is called, and it is that
7848 function which decides whether anything needs to go
7849 into these sections. */
7851 s->flags |= SEC_EXCLUDE;
7855 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7858 /* Allocate memory for the section contents. We use bfd_zalloc
7859 here in case unused entries are not reclaimed before the
7860 section's contents are written out. This should not happen,
7861 but this way if it does, we get a R_AARCH64_NONE reloc instead
7863 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7864 if (s->contents == NULL)
7868 if (htab->root.dynamic_sections_created)
7870 /* Add some entries to the .dynamic section. We fill in the
7871 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7872 must add the entries now so that we get the correct size for
7873 the .dynamic section. The DT_DEBUG entry is filled in by the
7874 dynamic linker and used by the debugger. */
7875 #define add_dynamic_entry(TAG, VAL) \
7876 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7878 if (info->executable)
7880 if (!add_dynamic_entry (DT_DEBUG, 0))
7884 if (htab->root.splt->size != 0)
7886 if (!add_dynamic_entry (DT_PLTGOT, 0)
7887 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7888 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7889 || !add_dynamic_entry (DT_JMPREL, 0))
7892 if (htab->tlsdesc_plt
7893 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7894 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7900 if (!add_dynamic_entry (DT_RELA, 0)
7901 || !add_dynamic_entry (DT_RELASZ, 0)
7902 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7905 /* If any dynamic relocs apply to a read-only section,
7906 then we need a DT_TEXTREL entry. */
7907 if ((info->flags & DF_TEXTREL) == 0)
7908 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7911 if ((info->flags & DF_TEXTREL) != 0)
7913 if (!add_dynamic_entry (DT_TEXTREL, 0))
7918 #undef add_dynamic_entry
7924 elf_aarch64_update_plt_entry (bfd *output_bfd,
7925 bfd_reloc_code_real_type r_type,
7926 bfd_byte *plt_entry, bfd_vma value)
7928 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7930 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7934 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7935 struct elf_aarch64_link_hash_table
7936 *htab, bfd *output_bfd,
7937 struct bfd_link_info *info)
7939 bfd_byte *plt_entry;
7942 bfd_vma gotplt_entry_address;
7943 bfd_vma plt_entry_address;
7944 Elf_Internal_Rela rela;
7946 asection *plt, *gotplt, *relplt;
7948 /* When building a static executable, use .iplt, .igot.plt and
7949 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7950 if (htab->root.splt != NULL)
7952 plt = htab->root.splt;
7953 gotplt = htab->root.sgotplt;
7954 relplt = htab->root.srelplt;
7958 plt = htab->root.iplt;
7959 gotplt = htab->root.igotplt;
7960 relplt = htab->root.irelplt;
7963 /* Get the index in the procedure linkage table which
7964 corresponds to this symbol. This is the index of this symbol
7965 in all the symbols for which we are making plt entries. The
7966 first entry in the procedure linkage table is reserved.
7968 Get the offset into the .got table of the entry that
7969 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7970 bytes. The first three are reserved for the dynamic linker.
7972 For static executables, we don't reserve anything. */
7974 if (plt == htab->root.splt)
7976 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7977 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7981 plt_index = h->plt.offset / htab->plt_entry_size;
7982 got_offset = plt_index * GOT_ENTRY_SIZE;
7985 plt_entry = plt->contents + h->plt.offset;
7986 plt_entry_address = plt->output_section->vma
7987 + plt->output_offset + h->plt.offset;
7988 gotplt_entry_address = gotplt->output_section->vma +
7989 gotplt->output_offset + got_offset;
7991 /* Copy in the boiler-plate for the PLTn entry. */
7992 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7994 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7995 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7996 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7998 PG (gotplt_entry_address) -
7999 PG (plt_entry_address));
8001 /* Fill in the lo12 bits for the load from the pltgot. */
8002 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8004 PG_OFFSET (gotplt_entry_address));
8006 /* Fill in the lo12 bits for the add from the pltgot entry. */
8007 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8009 PG_OFFSET (gotplt_entry_address));
8011 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8012 bfd_put_NN (output_bfd,
8013 plt->output_section->vma + plt->output_offset,
8014 gotplt->contents + got_offset);
8016 rela.r_offset = gotplt_entry_address;
8018 if (h->dynindx == -1
8019 || ((info->executable
8020 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8022 && h->type == STT_GNU_IFUNC))
8024 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8025 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8026 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8027 rela.r_addend = (h->root.u.def.value
8028 + h->root.u.def.section->output_section->vma
8029 + h->root.u.def.section->output_offset);
8033 /* Fill in the entry in the .rela.plt section. */
8034 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8038 /* Compute the relocation entry to used based on PLT index and do
8039 not adjust reloc_count. The reloc_count has already been adjusted
8040 to account for this entry. */
8041 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8042 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8045 /* Size sections even though they're not dynamic. We use it to setup
8046 _TLS_MODULE_BASE_, if needed. */
8049 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8050 struct bfd_link_info *info)
8054 if (info->relocatable)
8057 tls_sec = elf_hash_table (info)->tls_sec;
8061 struct elf_link_hash_entry *tlsbase;
8063 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8064 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8068 struct bfd_link_hash_entry *h = NULL;
8069 const struct elf_backend_data *bed =
8070 get_elf_backend_data (output_bfd);
8072 if (!(_bfd_generic_link_add_one_symbol
8073 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8074 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8077 tlsbase->type = STT_TLS;
8078 tlsbase = (struct elf_link_hash_entry *) h;
8079 tlsbase->def_regular = 1;
8080 tlsbase->other = STV_HIDDEN;
8081 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8088 /* Finish up dynamic symbol handling. We set the contents of various
8089 dynamic sections here. */
8091 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8092 struct bfd_link_info *info,
8093 struct elf_link_hash_entry *h,
8094 Elf_Internal_Sym *sym)
8096 struct elf_aarch64_link_hash_table *htab;
8097 htab = elf_aarch64_hash_table (info);
8099 if (h->plt.offset != (bfd_vma) - 1)
8101 asection *plt, *gotplt, *relplt;
8103 /* This symbol has an entry in the procedure linkage table. Set
8106 /* When building a static executable, use .iplt, .igot.plt and
8107 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8108 if (htab->root.splt != NULL)
8110 plt = htab->root.splt;
8111 gotplt = htab->root.sgotplt;
8112 relplt = htab->root.srelplt;
8116 plt = htab->root.iplt;
8117 gotplt = htab->root.igotplt;
8118 relplt = htab->root.irelplt;
8121 /* This symbol has an entry in the procedure linkage table. Set
8123 if ((h->dynindx == -1
8124 && !((h->forced_local || info->executable)
8126 && h->type == STT_GNU_IFUNC))
8132 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8133 if (!h->def_regular)
8135 /* Mark the symbol as undefined, rather than as defined in
8136 the .plt section. */
8137 sym->st_shndx = SHN_UNDEF;
8138 /* If the symbol is weak we need to clear the value.
8139 Otherwise, the PLT entry would provide a definition for
8140 the symbol even if the symbol wasn't defined anywhere,
8141 and so the symbol would never be NULL. Leave the value if
8142 there were any relocations where pointer equality matters
8143 (this is a clue for the dynamic linker, to make function
8144 pointer comparisons work between an application and shared
8146 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8151 if (h->got.offset != (bfd_vma) - 1
8152 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8154 Elf_Internal_Rela rela;
8157 /* This symbol has an entry in the global offset table. Set it
8159 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8162 rela.r_offset = (htab->root.sgot->output_section->vma
8163 + htab->root.sgot->output_offset
8164 + (h->got.offset & ~(bfd_vma) 1));
8167 && h->type == STT_GNU_IFUNC)
8171 /* Generate R_AARCH64_GLOB_DAT. */
8178 if (!h->pointer_equality_needed)
8181 /* For non-shared object, we can't use .got.plt, which
8182 contains the real function address if we need pointer
8183 equality. We load the GOT entry with the PLT entry. */
8184 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8185 bfd_put_NN (output_bfd, (plt->output_section->vma
8186 + plt->output_offset
8188 htab->root.sgot->contents
8189 + (h->got.offset & ~(bfd_vma) 1));
8193 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8195 if (!h->def_regular)
8198 BFD_ASSERT ((h->got.offset & 1) != 0);
8199 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8200 rela.r_addend = (h->root.u.def.value
8201 + h->root.u.def.section->output_section->vma
8202 + h->root.u.def.section->output_offset);
8207 BFD_ASSERT ((h->got.offset & 1) == 0);
8208 bfd_put_NN (output_bfd, (bfd_vma) 0,
8209 htab->root.sgot->contents + h->got.offset);
8210 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8214 loc = htab->root.srelgot->contents;
8215 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8216 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8221 Elf_Internal_Rela rela;
8224 /* This symbol needs a copy reloc. Set it up. */
8226 if (h->dynindx == -1
8227 || (h->root.type != bfd_link_hash_defined
8228 && h->root.type != bfd_link_hash_defweak)
8229 || htab->srelbss == NULL)
8232 rela.r_offset = (h->root.u.def.value
8233 + h->root.u.def.section->output_section->vma
8234 + h->root.u.def.section->output_offset);
8235 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8237 loc = htab->srelbss->contents;
8238 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8239 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8242 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8243 be NULL for local symbols. */
8245 && (h == elf_hash_table (info)->hdynamic
8246 || h == elf_hash_table (info)->hgot))
8247 sym->st_shndx = SHN_ABS;
8252 /* Finish up local dynamic symbol handling. We set the contents of
8253 various dynamic sections here. */
8256 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8258 struct elf_link_hash_entry *h
8259 = (struct elf_link_hash_entry *) *slot;
8260 struct bfd_link_info *info
8261 = (struct bfd_link_info *) inf;
8263 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8268 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8269 struct elf_aarch64_link_hash_table
8272 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8273 small and large plts and at the minute just generates
8276 /* PLT0 of the small PLT looks like this in ELF64 -
8277 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8278 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8279 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8281 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8282 // GOTPLT entry for this.
8284 PLT0 will be slightly different in ELF32 due to different got entry
8287 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8291 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8293 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8296 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8297 + htab->root.sgotplt->output_offset
8298 + GOT_ENTRY_SIZE * 2);
8300 plt_base = htab->root.splt->output_section->vma +
8301 htab->root.splt->output_offset;
8303 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8304 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8305 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8306 htab->root.splt->contents + 4,
8307 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8309 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8310 htab->root.splt->contents + 8,
8311 PG_OFFSET (plt_got_2nd_ent));
8313 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8314 htab->root.splt->contents + 12,
8315 PG_OFFSET (plt_got_2nd_ent));
8319 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8320 struct bfd_link_info *info)
8322 struct elf_aarch64_link_hash_table *htab;
8326 htab = elf_aarch64_hash_table (info);
8327 dynobj = htab->root.dynobj;
8328 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8330 if (htab->root.dynamic_sections_created)
8332 ElfNN_External_Dyn *dyncon, *dynconend;
8334 if (sdyn == NULL || htab->root.sgot == NULL)
8337 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8338 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8339 for (; dyncon < dynconend; dyncon++)
8341 Elf_Internal_Dyn dyn;
8344 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8352 s = htab->root.sgotplt;
8353 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8357 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8361 s = htab->root.srelplt;
8362 dyn.d_un.d_val = s->size;
8366 /* The procedure linkage table relocs (DT_JMPREL) should
8367 not be included in the overall relocs (DT_RELA).
8368 Therefore, we override the DT_RELASZ entry here to
8369 make it not include the JMPREL relocs. Since the
8370 linker script arranges for .rela.plt to follow all
8371 other relocation sections, we don't have to worry
8372 about changing the DT_RELA entry. */
8373 if (htab->root.srelplt != NULL)
8375 s = htab->root.srelplt;
8376 dyn.d_un.d_val -= s->size;
8380 case DT_TLSDESC_PLT:
8381 s = htab->root.splt;
8382 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8383 + htab->tlsdesc_plt;
8386 case DT_TLSDESC_GOT:
8387 s = htab->root.sgot;
8388 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8389 + htab->dt_tlsdesc_got;
8393 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8398 /* Fill in the special first entry in the procedure linkage table. */
8399 if (htab->root.splt && htab->root.splt->size > 0)
8401 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8403 elf_section_data (htab->root.splt->output_section)->
8404 this_hdr.sh_entsize = htab->plt_entry_size;
8407 if (htab->tlsdesc_plt)
8409 bfd_put_NN (output_bfd, (bfd_vma) 0,
8410 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8412 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8413 elfNN_aarch64_tlsdesc_small_plt_entry,
8414 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8417 bfd_vma adrp1_addr =
8418 htab->root.splt->output_section->vma
8419 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8421 bfd_vma adrp2_addr = adrp1_addr + 4;
8424 htab->root.sgot->output_section->vma
8425 + htab->root.sgot->output_offset;
8427 bfd_vma pltgot_addr =
8428 htab->root.sgotplt->output_section->vma
8429 + htab->root.sgotplt->output_offset;
8431 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8433 bfd_byte *plt_entry =
8434 htab->root.splt->contents + htab->tlsdesc_plt;
8436 /* adrp x2, DT_TLSDESC_GOT */
8437 elf_aarch64_update_plt_entry (output_bfd,
8438 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8440 (PG (dt_tlsdesc_got)
8441 - PG (adrp1_addr)));
8444 elf_aarch64_update_plt_entry (output_bfd,
8445 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8448 - PG (adrp2_addr)));
8450 /* ldr x2, [x2, #0] */
8451 elf_aarch64_update_plt_entry (output_bfd,
8452 BFD_RELOC_AARCH64_LDSTNN_LO12,
8454 PG_OFFSET (dt_tlsdesc_got));
8457 elf_aarch64_update_plt_entry (output_bfd,
8458 BFD_RELOC_AARCH64_ADD_LO12,
8460 PG_OFFSET (pltgot_addr));
8465 if (htab->root.sgotplt)
8467 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8469 (*_bfd_error_handler)
8470 (_("discarded output section: `%A'"), htab->root.sgotplt);
8474 /* Fill in the first three entries in the global offset table. */
8475 if (htab->root.sgotplt->size > 0)
8477 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8479 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8480 bfd_put_NN (output_bfd,
8482 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8483 bfd_put_NN (output_bfd,
8485 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8488 if (htab->root.sgot)
8490 if (htab->root.sgot->size > 0)
8493 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8494 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8498 elf_section_data (htab->root.sgotplt->output_section)->
8499 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8502 if (htab->root.sgot && htab->root.sgot->size > 0)
8503 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8506 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8507 htab_traverse (htab->loc_hash_table,
8508 elfNN_aarch64_finish_local_dynamic_symbol,
8514 /* Return address for Ith PLT stub in section PLT, for relocation REL
8515 or (bfd_vma) -1 if it should not be included. */
8518 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8519 const arelent *rel ATTRIBUTE_UNUSED)
8521 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8525 /* We use this so we can override certain functions
8526 (though currently we don't). */
8528 const struct elf_size_info elfNN_aarch64_size_info =
8530 sizeof (ElfNN_External_Ehdr),
8531 sizeof (ElfNN_External_Phdr),
8532 sizeof (ElfNN_External_Shdr),
8533 sizeof (ElfNN_External_Rel),
8534 sizeof (ElfNN_External_Rela),
8535 sizeof (ElfNN_External_Sym),
8536 sizeof (ElfNN_External_Dyn),
8537 sizeof (Elf_External_Note),
8538 4, /* Hash table entry size. */
8539 1, /* Internal relocs per external relocs. */
8540 ARCH_SIZE, /* Arch size. */
8541 LOG_FILE_ALIGN, /* Log_file_align. */
8542 ELFCLASSNN, EV_CURRENT,
8543 bfd_elfNN_write_out_phdrs,
8544 bfd_elfNN_write_shdrs_and_ehdr,
8545 bfd_elfNN_checksum_contents,
8546 bfd_elfNN_write_relocs,
8547 bfd_elfNN_swap_symbol_in,
8548 bfd_elfNN_swap_symbol_out,
8549 bfd_elfNN_slurp_reloc_table,
8550 bfd_elfNN_slurp_symbol_table,
8551 bfd_elfNN_swap_dyn_in,
8552 bfd_elfNN_swap_dyn_out,
8553 bfd_elfNN_swap_reloc_in,
8554 bfd_elfNN_swap_reloc_out,
8555 bfd_elfNN_swap_reloca_in,
8556 bfd_elfNN_swap_reloca_out
8559 #define ELF_ARCH bfd_arch_aarch64
8560 #define ELF_MACHINE_CODE EM_AARCH64
8561 #define ELF_MAXPAGESIZE 0x10000
8562 #define ELF_MINPAGESIZE 0x1000
8563 #define ELF_COMMONPAGESIZE 0x1000
8565 #define bfd_elfNN_close_and_cleanup \
8566 elfNN_aarch64_close_and_cleanup
8568 #define bfd_elfNN_bfd_free_cached_info \
8569 elfNN_aarch64_bfd_free_cached_info
8571 #define bfd_elfNN_bfd_is_target_special_symbol \
8572 elfNN_aarch64_is_target_special_symbol
8574 #define bfd_elfNN_bfd_link_hash_table_create \
8575 elfNN_aarch64_link_hash_table_create
8577 #define bfd_elfNN_bfd_merge_private_bfd_data \
8578 elfNN_aarch64_merge_private_bfd_data
8580 #define bfd_elfNN_bfd_print_private_bfd_data \
8581 elfNN_aarch64_print_private_bfd_data
8583 #define bfd_elfNN_bfd_reloc_type_lookup \
8584 elfNN_aarch64_reloc_type_lookup
8586 #define bfd_elfNN_bfd_reloc_name_lookup \
8587 elfNN_aarch64_reloc_name_lookup
8589 #define bfd_elfNN_bfd_set_private_flags \
8590 elfNN_aarch64_set_private_flags
8592 #define bfd_elfNN_find_inliner_info \
8593 elfNN_aarch64_find_inliner_info
8595 #define bfd_elfNN_find_nearest_line \
8596 elfNN_aarch64_find_nearest_line
8598 #define bfd_elfNN_mkobject \
8599 elfNN_aarch64_mkobject
8601 #define bfd_elfNN_new_section_hook \
8602 elfNN_aarch64_new_section_hook
8604 #define elf_backend_adjust_dynamic_symbol \
8605 elfNN_aarch64_adjust_dynamic_symbol
8607 #define elf_backend_always_size_sections \
8608 elfNN_aarch64_always_size_sections
8610 #define elf_backend_check_relocs \
8611 elfNN_aarch64_check_relocs
8613 #define elf_backend_copy_indirect_symbol \
8614 elfNN_aarch64_copy_indirect_symbol
8616 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8617 to them in our hash. */
8618 #define elf_backend_create_dynamic_sections \
8619 elfNN_aarch64_create_dynamic_sections
8621 #define elf_backend_init_index_section \
8622 _bfd_elf_init_2_index_sections
8624 #define elf_backend_finish_dynamic_sections \
8625 elfNN_aarch64_finish_dynamic_sections
8627 #define elf_backend_finish_dynamic_symbol \
8628 elfNN_aarch64_finish_dynamic_symbol
8630 #define elf_backend_gc_sweep_hook \
8631 elfNN_aarch64_gc_sweep_hook
8633 #define elf_backend_object_p \
8634 elfNN_aarch64_object_p
8636 #define elf_backend_output_arch_local_syms \
8637 elfNN_aarch64_output_arch_local_syms
8639 #define elf_backend_plt_sym_val \
8640 elfNN_aarch64_plt_sym_val
8642 #define elf_backend_post_process_headers \
8643 elfNN_aarch64_post_process_headers
8645 #define elf_backend_relocate_section \
8646 elfNN_aarch64_relocate_section
8648 #define elf_backend_reloc_type_class \
8649 elfNN_aarch64_reloc_type_class
8651 #define elf_backend_section_from_shdr \
8652 elfNN_aarch64_section_from_shdr
8654 #define elf_backend_size_dynamic_sections \
8655 elfNN_aarch64_size_dynamic_sections
8657 #define elf_backend_size_info \
8658 elfNN_aarch64_size_info
8660 #define elf_backend_write_section \
8661 elfNN_aarch64_write_section
8663 #define elf_backend_can_refcount 1
8664 #define elf_backend_can_gc_sections 1
8665 #define elf_backend_plt_readonly 1
8666 #define elf_backend_want_got_plt 1
8667 #define elf_backend_want_plt_sym 0
8668 #define elf_backend_may_use_rel_p 0
8669 #define elf_backend_may_use_rela_p 1
8670 #define elf_backend_default_use_rela_p 1
8671 #define elf_backend_rela_normal 1
8672 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8673 #define elf_backend_default_execstack 0
8674 #define elf_backend_extern_protected_data 1
8676 #undef elf_backend_obj_attrs_section
8677 #define elf_backend_obj_attrs_section ".ARM.attributes"
8679 #include "elfNN-target.h"